Loading...
1/*
2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
4 *
5 * Implements an efficient asynchronous io interface.
6 *
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
8 * Copyright 2018 Christoph Hellwig.
9 *
10 * See ../COPYING for licensing terms.
11 */
12#define pr_fmt(fmt) "%s: " fmt, __func__
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/time.h>
18#include <linux/aio_abi.h>
19#include <linux/export.h>
20#include <linux/syscalls.h>
21#include <linux/backing-dev.h>
22#include <linux/refcount.h>
23#include <linux/uio.h>
24
25#include <linux/sched/signal.h>
26#include <linux/fs.h>
27#include <linux/file.h>
28#include <linux/mm.h>
29#include <linux/mman.h>
30#include <linux/percpu.h>
31#include <linux/slab.h>
32#include <linux/timer.h>
33#include <linux/aio.h>
34#include <linux/highmem.h>
35#include <linux/workqueue.h>
36#include <linux/security.h>
37#include <linux/eventfd.h>
38#include <linux/blkdev.h>
39#include <linux/compat.h>
40#include <linux/migrate.h>
41#include <linux/ramfs.h>
42#include <linux/percpu-refcount.h>
43#include <linux/mount.h>
44#include <linux/pseudo_fs.h>
45
46#include <linux/uaccess.h>
47#include <linux/nospec.h>
48
49#include "internal.h"
50
51#define KIOCB_KEY 0
52
53#define AIO_RING_MAGIC 0xa10a10a1
54#define AIO_RING_COMPAT_FEATURES 1
55#define AIO_RING_INCOMPAT_FEATURES 0
56struct aio_ring {
57 unsigned id; /* kernel internal index number */
58 unsigned nr; /* number of io_events */
59 unsigned head; /* Written to by userland or under ring_lock
60 * mutex by aio_read_events_ring(). */
61 unsigned tail;
62
63 unsigned magic;
64 unsigned compat_features;
65 unsigned incompat_features;
66 unsigned header_length; /* size of aio_ring */
67
68
69 struct io_event io_events[];
70}; /* 128 bytes + ring size */
71
72/*
73 * Plugging is meant to work with larger batches of IOs. If we don't
74 * have more than the below, then don't bother setting up a plug.
75 */
76#define AIO_PLUG_THRESHOLD 2
77
78#define AIO_RING_PAGES 8
79
80struct kioctx_table {
81 struct rcu_head rcu;
82 unsigned nr;
83 struct kioctx __rcu *table[] __counted_by(nr);
84};
85
86struct kioctx_cpu {
87 unsigned reqs_available;
88};
89
90struct ctx_rq_wait {
91 struct completion comp;
92 atomic_t count;
93};
94
95struct kioctx {
96 struct percpu_ref users;
97 atomic_t dead;
98
99 struct percpu_ref reqs;
100
101 unsigned long user_id;
102
103 struct __percpu kioctx_cpu *cpu;
104
105 /*
106 * For percpu reqs_available, number of slots we move to/from global
107 * counter at a time:
108 */
109 unsigned req_batch;
110 /*
111 * This is what userspace passed to io_setup(), it's not used for
112 * anything but counting against the global max_reqs quota.
113 *
114 * The real limit is nr_events - 1, which will be larger (see
115 * aio_setup_ring())
116 */
117 unsigned max_reqs;
118
119 /* Size of ringbuffer, in units of struct io_event */
120 unsigned nr_events;
121
122 unsigned long mmap_base;
123 unsigned long mmap_size;
124
125 struct page **ring_pages;
126 long nr_pages;
127
128 struct rcu_work free_rwork; /* see free_ioctx() */
129
130 /*
131 * signals when all in-flight requests are done
132 */
133 struct ctx_rq_wait *rq_wait;
134
135 struct {
136 /*
137 * This counts the number of available slots in the ringbuffer,
138 * so we avoid overflowing it: it's decremented (if positive)
139 * when allocating a kiocb and incremented when the resulting
140 * io_event is pulled off the ringbuffer.
141 *
142 * We batch accesses to it with a percpu version.
143 */
144 atomic_t reqs_available;
145 } ____cacheline_aligned_in_smp;
146
147 struct {
148 spinlock_t ctx_lock;
149 struct list_head active_reqs; /* used for cancellation */
150 } ____cacheline_aligned_in_smp;
151
152 struct {
153 struct mutex ring_lock;
154 wait_queue_head_t wait;
155 } ____cacheline_aligned_in_smp;
156
157 struct {
158 unsigned tail;
159 unsigned completed_events;
160 spinlock_t completion_lock;
161 } ____cacheline_aligned_in_smp;
162
163 struct page *internal_pages[AIO_RING_PAGES];
164 struct file *aio_ring_file;
165
166 unsigned id;
167};
168
169/*
170 * First field must be the file pointer in all the
171 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
172 */
173struct fsync_iocb {
174 struct file *file;
175 struct work_struct work;
176 bool datasync;
177 struct cred *creds;
178};
179
180struct poll_iocb {
181 struct file *file;
182 struct wait_queue_head *head;
183 __poll_t events;
184 bool cancelled;
185 bool work_scheduled;
186 bool work_need_resched;
187 struct wait_queue_entry wait;
188 struct work_struct work;
189};
190
191/*
192 * NOTE! Each of the iocb union members has the file pointer
193 * as the first entry in their struct definition. So you can
194 * access the file pointer through any of the sub-structs,
195 * or directly as just 'ki_filp' in this struct.
196 */
197struct aio_kiocb {
198 union {
199 struct file *ki_filp;
200 struct kiocb rw;
201 struct fsync_iocb fsync;
202 struct poll_iocb poll;
203 };
204
205 struct kioctx *ki_ctx;
206 kiocb_cancel_fn *ki_cancel;
207
208 struct io_event ki_res;
209
210 struct list_head ki_list; /* the aio core uses this
211 * for cancellation */
212 refcount_t ki_refcnt;
213
214 /*
215 * If the aio_resfd field of the userspace iocb is not zero,
216 * this is the underlying eventfd context to deliver events to.
217 */
218 struct eventfd_ctx *ki_eventfd;
219};
220
221/*------ sysctl variables----*/
222static DEFINE_SPINLOCK(aio_nr_lock);
223static unsigned long aio_nr; /* current system wide number of aio requests */
224static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
225/*----end sysctl variables---*/
226#ifdef CONFIG_SYSCTL
227static struct ctl_table aio_sysctls[] = {
228 {
229 .procname = "aio-nr",
230 .data = &aio_nr,
231 .maxlen = sizeof(aio_nr),
232 .mode = 0444,
233 .proc_handler = proc_doulongvec_minmax,
234 },
235 {
236 .procname = "aio-max-nr",
237 .data = &aio_max_nr,
238 .maxlen = sizeof(aio_max_nr),
239 .mode = 0644,
240 .proc_handler = proc_doulongvec_minmax,
241 },
242};
243
244static void __init aio_sysctl_init(void)
245{
246 register_sysctl_init("fs", aio_sysctls);
247}
248#else
249#define aio_sysctl_init() do { } while (0)
250#endif
251
252static struct kmem_cache *kiocb_cachep;
253static struct kmem_cache *kioctx_cachep;
254
255static struct vfsmount *aio_mnt;
256
257static const struct file_operations aio_ring_fops;
258static const struct address_space_operations aio_ctx_aops;
259
260static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
261{
262 struct file *file;
263 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
264 if (IS_ERR(inode))
265 return ERR_CAST(inode);
266
267 inode->i_mapping->a_ops = &aio_ctx_aops;
268 inode->i_mapping->i_private_data = ctx;
269 inode->i_size = PAGE_SIZE * nr_pages;
270
271 file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
272 O_RDWR, &aio_ring_fops);
273 if (IS_ERR(file))
274 iput(inode);
275 return file;
276}
277
278static int aio_init_fs_context(struct fs_context *fc)
279{
280 if (!init_pseudo(fc, AIO_RING_MAGIC))
281 return -ENOMEM;
282 fc->s_iflags |= SB_I_NOEXEC;
283 return 0;
284}
285
286/* aio_setup
287 * Creates the slab caches used by the aio routines, panic on
288 * failure as this is done early during the boot sequence.
289 */
290static int __init aio_setup(void)
291{
292 static struct file_system_type aio_fs = {
293 .name = "aio",
294 .init_fs_context = aio_init_fs_context,
295 .kill_sb = kill_anon_super,
296 };
297 aio_mnt = kern_mount(&aio_fs);
298 if (IS_ERR(aio_mnt))
299 panic("Failed to create aio fs mount.");
300
301 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
302 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
303 aio_sysctl_init();
304 return 0;
305}
306__initcall(aio_setup);
307
308static void put_aio_ring_file(struct kioctx *ctx)
309{
310 struct file *aio_ring_file = ctx->aio_ring_file;
311 struct address_space *i_mapping;
312
313 if (aio_ring_file) {
314 truncate_setsize(file_inode(aio_ring_file), 0);
315
316 /* Prevent further access to the kioctx from migratepages */
317 i_mapping = aio_ring_file->f_mapping;
318 spin_lock(&i_mapping->i_private_lock);
319 i_mapping->i_private_data = NULL;
320 ctx->aio_ring_file = NULL;
321 spin_unlock(&i_mapping->i_private_lock);
322
323 fput(aio_ring_file);
324 }
325}
326
327static void aio_free_ring(struct kioctx *ctx)
328{
329 int i;
330
331 /* Disconnect the kiotx from the ring file. This prevents future
332 * accesses to the kioctx from page migration.
333 */
334 put_aio_ring_file(ctx);
335
336 for (i = 0; i < ctx->nr_pages; i++) {
337 struct page *page;
338 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
339 page_count(ctx->ring_pages[i]));
340 page = ctx->ring_pages[i];
341 if (!page)
342 continue;
343 ctx->ring_pages[i] = NULL;
344 put_page(page);
345 }
346
347 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
348 kfree(ctx->ring_pages);
349 ctx->ring_pages = NULL;
350 }
351}
352
353static int aio_ring_mremap(struct vm_area_struct *vma)
354{
355 struct file *file = vma->vm_file;
356 struct mm_struct *mm = vma->vm_mm;
357 struct kioctx_table *table;
358 int i, res = -EINVAL;
359
360 spin_lock(&mm->ioctx_lock);
361 rcu_read_lock();
362 table = rcu_dereference(mm->ioctx_table);
363 if (!table)
364 goto out_unlock;
365
366 for (i = 0; i < table->nr; i++) {
367 struct kioctx *ctx;
368
369 ctx = rcu_dereference(table->table[i]);
370 if (ctx && ctx->aio_ring_file == file) {
371 if (!atomic_read(&ctx->dead)) {
372 ctx->user_id = ctx->mmap_base = vma->vm_start;
373 res = 0;
374 }
375 break;
376 }
377 }
378
379out_unlock:
380 rcu_read_unlock();
381 spin_unlock(&mm->ioctx_lock);
382 return res;
383}
384
385static const struct vm_operations_struct aio_ring_vm_ops = {
386 .mremap = aio_ring_mremap,
387#if IS_ENABLED(CONFIG_MMU)
388 .fault = filemap_fault,
389 .map_pages = filemap_map_pages,
390 .page_mkwrite = filemap_page_mkwrite,
391#endif
392};
393
394static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
395{
396 vm_flags_set(vma, VM_DONTEXPAND);
397 vma->vm_ops = &aio_ring_vm_ops;
398 return 0;
399}
400
401static const struct file_operations aio_ring_fops = {
402 .mmap = aio_ring_mmap,
403};
404
405#if IS_ENABLED(CONFIG_MIGRATION)
406static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
407 struct folio *src, enum migrate_mode mode)
408{
409 struct kioctx *ctx;
410 unsigned long flags;
411 pgoff_t idx;
412 int rc;
413
414 /*
415 * We cannot support the _NO_COPY case here, because copy needs to
416 * happen under the ctx->completion_lock. That does not work with the
417 * migration workflow of MIGRATE_SYNC_NO_COPY.
418 */
419 if (mode == MIGRATE_SYNC_NO_COPY)
420 return -EINVAL;
421
422 rc = 0;
423
424 /* mapping->i_private_lock here protects against the kioctx teardown. */
425 spin_lock(&mapping->i_private_lock);
426 ctx = mapping->i_private_data;
427 if (!ctx) {
428 rc = -EINVAL;
429 goto out;
430 }
431
432 /* The ring_lock mutex. The prevents aio_read_events() from writing
433 * to the ring's head, and prevents page migration from mucking in
434 * a partially initialized kiotx.
435 */
436 if (!mutex_trylock(&ctx->ring_lock)) {
437 rc = -EAGAIN;
438 goto out;
439 }
440
441 idx = src->index;
442 if (idx < (pgoff_t)ctx->nr_pages) {
443 /* Make sure the old folio hasn't already been changed */
444 if (ctx->ring_pages[idx] != &src->page)
445 rc = -EAGAIN;
446 } else
447 rc = -EINVAL;
448
449 if (rc != 0)
450 goto out_unlock;
451
452 /* Writeback must be complete */
453 BUG_ON(folio_test_writeback(src));
454 folio_get(dst);
455
456 rc = folio_migrate_mapping(mapping, dst, src, 1);
457 if (rc != MIGRATEPAGE_SUCCESS) {
458 folio_put(dst);
459 goto out_unlock;
460 }
461
462 /* Take completion_lock to prevent other writes to the ring buffer
463 * while the old folio is copied to the new. This prevents new
464 * events from being lost.
465 */
466 spin_lock_irqsave(&ctx->completion_lock, flags);
467 folio_migrate_copy(dst, src);
468 BUG_ON(ctx->ring_pages[idx] != &src->page);
469 ctx->ring_pages[idx] = &dst->page;
470 spin_unlock_irqrestore(&ctx->completion_lock, flags);
471
472 /* The old folio is no longer accessible. */
473 folio_put(src);
474
475out_unlock:
476 mutex_unlock(&ctx->ring_lock);
477out:
478 spin_unlock(&mapping->i_private_lock);
479 return rc;
480}
481#else
482#define aio_migrate_folio NULL
483#endif
484
485static const struct address_space_operations aio_ctx_aops = {
486 .dirty_folio = noop_dirty_folio,
487 .migrate_folio = aio_migrate_folio,
488};
489
490static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
491{
492 struct aio_ring *ring;
493 struct mm_struct *mm = current->mm;
494 unsigned long size, unused;
495 int nr_pages;
496 int i;
497 struct file *file;
498
499 /* Compensate for the ring buffer's head/tail overlap entry */
500 nr_events += 2; /* 1 is required, 2 for good luck */
501
502 size = sizeof(struct aio_ring);
503 size += sizeof(struct io_event) * nr_events;
504
505 nr_pages = PFN_UP(size);
506 if (nr_pages < 0)
507 return -EINVAL;
508
509 file = aio_private_file(ctx, nr_pages);
510 if (IS_ERR(file)) {
511 ctx->aio_ring_file = NULL;
512 return -ENOMEM;
513 }
514
515 ctx->aio_ring_file = file;
516 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
517 / sizeof(struct io_event);
518
519 ctx->ring_pages = ctx->internal_pages;
520 if (nr_pages > AIO_RING_PAGES) {
521 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
522 GFP_KERNEL);
523 if (!ctx->ring_pages) {
524 put_aio_ring_file(ctx);
525 return -ENOMEM;
526 }
527 }
528
529 for (i = 0; i < nr_pages; i++) {
530 struct page *page;
531 page = find_or_create_page(file->f_mapping,
532 i, GFP_USER | __GFP_ZERO);
533 if (!page)
534 break;
535 pr_debug("pid(%d) page[%d]->count=%d\n",
536 current->pid, i, page_count(page));
537 SetPageUptodate(page);
538 unlock_page(page);
539
540 ctx->ring_pages[i] = page;
541 }
542 ctx->nr_pages = i;
543
544 if (unlikely(i != nr_pages)) {
545 aio_free_ring(ctx);
546 return -ENOMEM;
547 }
548
549 ctx->mmap_size = nr_pages * PAGE_SIZE;
550 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
551
552 if (mmap_write_lock_killable(mm)) {
553 ctx->mmap_size = 0;
554 aio_free_ring(ctx);
555 return -EINTR;
556 }
557
558 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
559 PROT_READ | PROT_WRITE,
560 MAP_SHARED, 0, 0, &unused, NULL);
561 mmap_write_unlock(mm);
562 if (IS_ERR((void *)ctx->mmap_base)) {
563 ctx->mmap_size = 0;
564 aio_free_ring(ctx);
565 return -ENOMEM;
566 }
567
568 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
569
570 ctx->user_id = ctx->mmap_base;
571 ctx->nr_events = nr_events; /* trusted copy */
572
573 ring = page_address(ctx->ring_pages[0]);
574 ring->nr = nr_events; /* user copy */
575 ring->id = ~0U;
576 ring->head = ring->tail = 0;
577 ring->magic = AIO_RING_MAGIC;
578 ring->compat_features = AIO_RING_COMPAT_FEATURES;
579 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
580 ring->header_length = sizeof(struct aio_ring);
581 flush_dcache_page(ctx->ring_pages[0]);
582
583 return 0;
584}
585
586#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
587#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
588#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
589
590void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
591{
592 struct aio_kiocb *req;
593 struct kioctx *ctx;
594 unsigned long flags;
595
596 /*
597 * kiocb didn't come from aio or is neither a read nor a write, hence
598 * ignore it.
599 */
600 if (!(iocb->ki_flags & IOCB_AIO_RW))
601 return;
602
603 req = container_of(iocb, struct aio_kiocb, rw);
604
605 if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
606 return;
607
608 ctx = req->ki_ctx;
609
610 spin_lock_irqsave(&ctx->ctx_lock, flags);
611 list_add_tail(&req->ki_list, &ctx->active_reqs);
612 req->ki_cancel = cancel;
613 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
614}
615EXPORT_SYMBOL(kiocb_set_cancel_fn);
616
617/*
618 * free_ioctx() should be RCU delayed to synchronize against the RCU
619 * protected lookup_ioctx() and also needs process context to call
620 * aio_free_ring(). Use rcu_work.
621 */
622static void free_ioctx(struct work_struct *work)
623{
624 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
625 free_rwork);
626 pr_debug("freeing %p\n", ctx);
627
628 aio_free_ring(ctx);
629 free_percpu(ctx->cpu);
630 percpu_ref_exit(&ctx->reqs);
631 percpu_ref_exit(&ctx->users);
632 kmem_cache_free(kioctx_cachep, ctx);
633}
634
635static void free_ioctx_reqs(struct percpu_ref *ref)
636{
637 struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
638
639 /* At this point we know that there are no any in-flight requests */
640 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
641 complete(&ctx->rq_wait->comp);
642
643 /* Synchronize against RCU protected table->table[] dereferences */
644 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
645 queue_rcu_work(system_wq, &ctx->free_rwork);
646}
647
648/*
649 * When this function runs, the kioctx has been removed from the "hash table"
650 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
651 * now it's safe to cancel any that need to be.
652 */
653static void free_ioctx_users(struct percpu_ref *ref)
654{
655 struct kioctx *ctx = container_of(ref, struct kioctx, users);
656 struct aio_kiocb *req;
657
658 spin_lock_irq(&ctx->ctx_lock);
659
660 while (!list_empty(&ctx->active_reqs)) {
661 req = list_first_entry(&ctx->active_reqs,
662 struct aio_kiocb, ki_list);
663 req->ki_cancel(&req->rw);
664 list_del_init(&req->ki_list);
665 }
666
667 spin_unlock_irq(&ctx->ctx_lock);
668
669 percpu_ref_kill(&ctx->reqs);
670 percpu_ref_put(&ctx->reqs);
671}
672
673static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
674{
675 unsigned i, new_nr;
676 struct kioctx_table *table, *old;
677 struct aio_ring *ring;
678
679 spin_lock(&mm->ioctx_lock);
680 table = rcu_dereference_raw(mm->ioctx_table);
681
682 while (1) {
683 if (table)
684 for (i = 0; i < table->nr; i++)
685 if (!rcu_access_pointer(table->table[i])) {
686 ctx->id = i;
687 rcu_assign_pointer(table->table[i], ctx);
688 spin_unlock(&mm->ioctx_lock);
689
690 /* While kioctx setup is in progress,
691 * we are protected from page migration
692 * changes ring_pages by ->ring_lock.
693 */
694 ring = page_address(ctx->ring_pages[0]);
695 ring->id = ctx->id;
696 return 0;
697 }
698
699 new_nr = (table ? table->nr : 1) * 4;
700 spin_unlock(&mm->ioctx_lock);
701
702 table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL);
703 if (!table)
704 return -ENOMEM;
705
706 table->nr = new_nr;
707
708 spin_lock(&mm->ioctx_lock);
709 old = rcu_dereference_raw(mm->ioctx_table);
710
711 if (!old) {
712 rcu_assign_pointer(mm->ioctx_table, table);
713 } else if (table->nr > old->nr) {
714 memcpy(table->table, old->table,
715 old->nr * sizeof(struct kioctx *));
716
717 rcu_assign_pointer(mm->ioctx_table, table);
718 kfree_rcu(old, rcu);
719 } else {
720 kfree(table);
721 table = old;
722 }
723 }
724}
725
726static void aio_nr_sub(unsigned nr)
727{
728 spin_lock(&aio_nr_lock);
729 if (WARN_ON(aio_nr - nr > aio_nr))
730 aio_nr = 0;
731 else
732 aio_nr -= nr;
733 spin_unlock(&aio_nr_lock);
734}
735
736/* ioctx_alloc
737 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
738 */
739static struct kioctx *ioctx_alloc(unsigned nr_events)
740{
741 struct mm_struct *mm = current->mm;
742 struct kioctx *ctx;
743 int err = -ENOMEM;
744
745 /*
746 * Store the original nr_events -- what userspace passed to io_setup(),
747 * for counting against the global limit -- before it changes.
748 */
749 unsigned int max_reqs = nr_events;
750
751 /*
752 * We keep track of the number of available ringbuffer slots, to prevent
753 * overflow (reqs_available), and we also use percpu counters for this.
754 *
755 * So since up to half the slots might be on other cpu's percpu counters
756 * and unavailable, double nr_events so userspace sees what they
757 * expected: additionally, we move req_batch slots to/from percpu
758 * counters at a time, so make sure that isn't 0:
759 */
760 nr_events = max(nr_events, num_possible_cpus() * 4);
761 nr_events *= 2;
762
763 /* Prevent overflows */
764 if (nr_events > (0x10000000U / sizeof(struct io_event))) {
765 pr_debug("ENOMEM: nr_events too high\n");
766 return ERR_PTR(-EINVAL);
767 }
768
769 if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
770 return ERR_PTR(-EAGAIN);
771
772 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
773 if (!ctx)
774 return ERR_PTR(-ENOMEM);
775
776 ctx->max_reqs = max_reqs;
777
778 spin_lock_init(&ctx->ctx_lock);
779 spin_lock_init(&ctx->completion_lock);
780 mutex_init(&ctx->ring_lock);
781 /* Protect against page migration throughout kiotx setup by keeping
782 * the ring_lock mutex held until setup is complete. */
783 mutex_lock(&ctx->ring_lock);
784 init_waitqueue_head(&ctx->wait);
785
786 INIT_LIST_HEAD(&ctx->active_reqs);
787
788 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
789 goto err;
790
791 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
792 goto err;
793
794 ctx->cpu = alloc_percpu(struct kioctx_cpu);
795 if (!ctx->cpu)
796 goto err;
797
798 err = aio_setup_ring(ctx, nr_events);
799 if (err < 0)
800 goto err;
801
802 atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
803 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
804 if (ctx->req_batch < 1)
805 ctx->req_batch = 1;
806
807 /* limit the number of system wide aios */
808 spin_lock(&aio_nr_lock);
809 if (aio_nr + ctx->max_reqs > aio_max_nr ||
810 aio_nr + ctx->max_reqs < aio_nr) {
811 spin_unlock(&aio_nr_lock);
812 err = -EAGAIN;
813 goto err_ctx;
814 }
815 aio_nr += ctx->max_reqs;
816 spin_unlock(&aio_nr_lock);
817
818 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
819 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
820
821 err = ioctx_add_table(ctx, mm);
822 if (err)
823 goto err_cleanup;
824
825 /* Release the ring_lock mutex now that all setup is complete. */
826 mutex_unlock(&ctx->ring_lock);
827
828 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
829 ctx, ctx->user_id, mm, ctx->nr_events);
830 return ctx;
831
832err_cleanup:
833 aio_nr_sub(ctx->max_reqs);
834err_ctx:
835 atomic_set(&ctx->dead, 1);
836 if (ctx->mmap_size)
837 vm_munmap(ctx->mmap_base, ctx->mmap_size);
838 aio_free_ring(ctx);
839err:
840 mutex_unlock(&ctx->ring_lock);
841 free_percpu(ctx->cpu);
842 percpu_ref_exit(&ctx->reqs);
843 percpu_ref_exit(&ctx->users);
844 kmem_cache_free(kioctx_cachep, ctx);
845 pr_debug("error allocating ioctx %d\n", err);
846 return ERR_PTR(err);
847}
848
849/* kill_ioctx
850 * Cancels all outstanding aio requests on an aio context. Used
851 * when the processes owning a context have all exited to encourage
852 * the rapid destruction of the kioctx.
853 */
854static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
855 struct ctx_rq_wait *wait)
856{
857 struct kioctx_table *table;
858
859 spin_lock(&mm->ioctx_lock);
860 if (atomic_xchg(&ctx->dead, 1)) {
861 spin_unlock(&mm->ioctx_lock);
862 return -EINVAL;
863 }
864
865 table = rcu_dereference_raw(mm->ioctx_table);
866 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
867 RCU_INIT_POINTER(table->table[ctx->id], NULL);
868 spin_unlock(&mm->ioctx_lock);
869
870 /* free_ioctx_reqs() will do the necessary RCU synchronization */
871 wake_up_all(&ctx->wait);
872
873 /*
874 * It'd be more correct to do this in free_ioctx(), after all
875 * the outstanding kiocbs have finished - but by then io_destroy
876 * has already returned, so io_setup() could potentially return
877 * -EAGAIN with no ioctxs actually in use (as far as userspace
878 * could tell).
879 */
880 aio_nr_sub(ctx->max_reqs);
881
882 if (ctx->mmap_size)
883 vm_munmap(ctx->mmap_base, ctx->mmap_size);
884
885 ctx->rq_wait = wait;
886 percpu_ref_kill(&ctx->users);
887 return 0;
888}
889
890/*
891 * exit_aio: called when the last user of mm goes away. At this point, there is
892 * no way for any new requests to be submited or any of the io_* syscalls to be
893 * called on the context.
894 *
895 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
896 * them.
897 */
898void exit_aio(struct mm_struct *mm)
899{
900 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
901 struct ctx_rq_wait wait;
902 int i, skipped;
903
904 if (!table)
905 return;
906
907 atomic_set(&wait.count, table->nr);
908 init_completion(&wait.comp);
909
910 skipped = 0;
911 for (i = 0; i < table->nr; ++i) {
912 struct kioctx *ctx =
913 rcu_dereference_protected(table->table[i], true);
914
915 if (!ctx) {
916 skipped++;
917 continue;
918 }
919
920 /*
921 * We don't need to bother with munmap() here - exit_mmap(mm)
922 * is coming and it'll unmap everything. And we simply can't,
923 * this is not necessarily our ->mm.
924 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
925 * that it needs to unmap the area, just set it to 0.
926 */
927 ctx->mmap_size = 0;
928 kill_ioctx(mm, ctx, &wait);
929 }
930
931 if (!atomic_sub_and_test(skipped, &wait.count)) {
932 /* Wait until all IO for the context are done. */
933 wait_for_completion(&wait.comp);
934 }
935
936 RCU_INIT_POINTER(mm->ioctx_table, NULL);
937 kfree(table);
938}
939
940static void put_reqs_available(struct kioctx *ctx, unsigned nr)
941{
942 struct kioctx_cpu *kcpu;
943 unsigned long flags;
944
945 local_irq_save(flags);
946 kcpu = this_cpu_ptr(ctx->cpu);
947 kcpu->reqs_available += nr;
948
949 while (kcpu->reqs_available >= ctx->req_batch * 2) {
950 kcpu->reqs_available -= ctx->req_batch;
951 atomic_add(ctx->req_batch, &ctx->reqs_available);
952 }
953
954 local_irq_restore(flags);
955}
956
957static bool __get_reqs_available(struct kioctx *ctx)
958{
959 struct kioctx_cpu *kcpu;
960 bool ret = false;
961 unsigned long flags;
962
963 local_irq_save(flags);
964 kcpu = this_cpu_ptr(ctx->cpu);
965 if (!kcpu->reqs_available) {
966 int avail = atomic_read(&ctx->reqs_available);
967
968 do {
969 if (avail < ctx->req_batch)
970 goto out;
971 } while (!atomic_try_cmpxchg(&ctx->reqs_available,
972 &avail, avail - ctx->req_batch));
973
974 kcpu->reqs_available += ctx->req_batch;
975 }
976
977 ret = true;
978 kcpu->reqs_available--;
979out:
980 local_irq_restore(flags);
981 return ret;
982}
983
984/* refill_reqs_available
985 * Updates the reqs_available reference counts used for tracking the
986 * number of free slots in the completion ring. This can be called
987 * from aio_complete() (to optimistically update reqs_available) or
988 * from aio_get_req() (the we're out of events case). It must be
989 * called holding ctx->completion_lock.
990 */
991static void refill_reqs_available(struct kioctx *ctx, unsigned head,
992 unsigned tail)
993{
994 unsigned events_in_ring, completed;
995
996 /* Clamp head since userland can write to it. */
997 head %= ctx->nr_events;
998 if (head <= tail)
999 events_in_ring = tail - head;
1000 else
1001 events_in_ring = ctx->nr_events - (head - tail);
1002
1003 completed = ctx->completed_events;
1004 if (events_in_ring < completed)
1005 completed -= events_in_ring;
1006 else
1007 completed = 0;
1008
1009 if (!completed)
1010 return;
1011
1012 ctx->completed_events -= completed;
1013 put_reqs_available(ctx, completed);
1014}
1015
1016/* user_refill_reqs_available
1017 * Called to refill reqs_available when aio_get_req() encounters an
1018 * out of space in the completion ring.
1019 */
1020static void user_refill_reqs_available(struct kioctx *ctx)
1021{
1022 spin_lock_irq(&ctx->completion_lock);
1023 if (ctx->completed_events) {
1024 struct aio_ring *ring;
1025 unsigned head;
1026
1027 /* Access of ring->head may race with aio_read_events_ring()
1028 * here, but that's okay since whether we read the old version
1029 * or the new version, and either will be valid. The important
1030 * part is that head cannot pass tail since we prevent
1031 * aio_complete() from updating tail by holding
1032 * ctx->completion_lock. Even if head is invalid, the check
1033 * against ctx->completed_events below will make sure we do the
1034 * safe/right thing.
1035 */
1036 ring = page_address(ctx->ring_pages[0]);
1037 head = ring->head;
1038
1039 refill_reqs_available(ctx, head, ctx->tail);
1040 }
1041
1042 spin_unlock_irq(&ctx->completion_lock);
1043}
1044
1045static bool get_reqs_available(struct kioctx *ctx)
1046{
1047 if (__get_reqs_available(ctx))
1048 return true;
1049 user_refill_reqs_available(ctx);
1050 return __get_reqs_available(ctx);
1051}
1052
1053/* aio_get_req
1054 * Allocate a slot for an aio request.
1055 * Returns NULL if no requests are free.
1056 *
1057 * The refcount is initialized to 2 - one for the async op completion,
1058 * one for the synchronous code that does this.
1059 */
1060static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1061{
1062 struct aio_kiocb *req;
1063
1064 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
1065 if (unlikely(!req))
1066 return NULL;
1067
1068 if (unlikely(!get_reqs_available(ctx))) {
1069 kmem_cache_free(kiocb_cachep, req);
1070 return NULL;
1071 }
1072
1073 percpu_ref_get(&ctx->reqs);
1074 req->ki_ctx = ctx;
1075 INIT_LIST_HEAD(&req->ki_list);
1076 refcount_set(&req->ki_refcnt, 2);
1077 req->ki_eventfd = NULL;
1078 return req;
1079}
1080
1081static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1082{
1083 struct aio_ring __user *ring = (void __user *)ctx_id;
1084 struct mm_struct *mm = current->mm;
1085 struct kioctx *ctx, *ret = NULL;
1086 struct kioctx_table *table;
1087 unsigned id;
1088
1089 if (get_user(id, &ring->id))
1090 return NULL;
1091
1092 rcu_read_lock();
1093 table = rcu_dereference(mm->ioctx_table);
1094
1095 if (!table || id >= table->nr)
1096 goto out;
1097
1098 id = array_index_nospec(id, table->nr);
1099 ctx = rcu_dereference(table->table[id]);
1100 if (ctx && ctx->user_id == ctx_id) {
1101 if (percpu_ref_tryget_live(&ctx->users))
1102 ret = ctx;
1103 }
1104out:
1105 rcu_read_unlock();
1106 return ret;
1107}
1108
1109static inline void iocb_destroy(struct aio_kiocb *iocb)
1110{
1111 if (iocb->ki_eventfd)
1112 eventfd_ctx_put(iocb->ki_eventfd);
1113 if (iocb->ki_filp)
1114 fput(iocb->ki_filp);
1115 percpu_ref_put(&iocb->ki_ctx->reqs);
1116 kmem_cache_free(kiocb_cachep, iocb);
1117}
1118
1119struct aio_waiter {
1120 struct wait_queue_entry w;
1121 size_t min_nr;
1122};
1123
1124/* aio_complete
1125 * Called when the io request on the given iocb is complete.
1126 */
1127static void aio_complete(struct aio_kiocb *iocb)
1128{
1129 struct kioctx *ctx = iocb->ki_ctx;
1130 struct aio_ring *ring;
1131 struct io_event *ev_page, *event;
1132 unsigned tail, pos, head, avail;
1133 unsigned long flags;
1134
1135 /*
1136 * Add a completion event to the ring buffer. Must be done holding
1137 * ctx->completion_lock to prevent other code from messing with the tail
1138 * pointer since we might be called from irq context.
1139 */
1140 spin_lock_irqsave(&ctx->completion_lock, flags);
1141
1142 tail = ctx->tail;
1143 pos = tail + AIO_EVENTS_OFFSET;
1144
1145 if (++tail >= ctx->nr_events)
1146 tail = 0;
1147
1148 ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1149 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1150
1151 *event = iocb->ki_res;
1152
1153 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1154
1155 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1156 (void __user *)(unsigned long)iocb->ki_res.obj,
1157 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1158
1159 /* after flagging the request as done, we
1160 * must never even look at it again
1161 */
1162 smp_wmb(); /* make event visible before updating tail */
1163
1164 ctx->tail = tail;
1165
1166 ring = page_address(ctx->ring_pages[0]);
1167 head = ring->head;
1168 ring->tail = tail;
1169 flush_dcache_page(ctx->ring_pages[0]);
1170
1171 ctx->completed_events++;
1172 if (ctx->completed_events > 1)
1173 refill_reqs_available(ctx, head, tail);
1174
1175 avail = tail > head
1176 ? tail - head
1177 : tail + ctx->nr_events - head;
1178 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1179
1180 pr_debug("added to ring %p at [%u]\n", iocb, tail);
1181
1182 /*
1183 * Check if the user asked us to deliver the result through an
1184 * eventfd. The eventfd_signal() function is safe to be called
1185 * from IRQ context.
1186 */
1187 if (iocb->ki_eventfd)
1188 eventfd_signal(iocb->ki_eventfd);
1189
1190 /*
1191 * We have to order our ring_info tail store above and test
1192 * of the wait list below outside the wait lock. This is
1193 * like in wake_up_bit() where clearing a bit has to be
1194 * ordered with the unlocked test.
1195 */
1196 smp_mb();
1197
1198 if (waitqueue_active(&ctx->wait)) {
1199 struct aio_waiter *curr, *next;
1200 unsigned long flags;
1201
1202 spin_lock_irqsave(&ctx->wait.lock, flags);
1203 list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry)
1204 if (avail >= curr->min_nr) {
1205 list_del_init_careful(&curr->w.entry);
1206 wake_up_process(curr->w.private);
1207 }
1208 spin_unlock_irqrestore(&ctx->wait.lock, flags);
1209 }
1210}
1211
1212static inline void iocb_put(struct aio_kiocb *iocb)
1213{
1214 if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1215 aio_complete(iocb);
1216 iocb_destroy(iocb);
1217 }
1218}
1219
1220/* aio_read_events_ring
1221 * Pull an event off of the ioctx's event ring. Returns the number of
1222 * events fetched
1223 */
1224static long aio_read_events_ring(struct kioctx *ctx,
1225 struct io_event __user *event, long nr)
1226{
1227 struct aio_ring *ring;
1228 unsigned head, tail, pos;
1229 long ret = 0;
1230 int copy_ret;
1231
1232 /*
1233 * The mutex can block and wake us up and that will cause
1234 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1235 * and repeat. This should be rare enough that it doesn't cause
1236 * peformance issues. See the comment in read_events() for more detail.
1237 */
1238 sched_annotate_sleep();
1239 mutex_lock(&ctx->ring_lock);
1240
1241 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1242 ring = page_address(ctx->ring_pages[0]);
1243 head = ring->head;
1244 tail = ring->tail;
1245
1246 /*
1247 * Ensure that once we've read the current tail pointer, that
1248 * we also see the events that were stored up to the tail.
1249 */
1250 smp_rmb();
1251
1252 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1253
1254 if (head == tail)
1255 goto out;
1256
1257 head %= ctx->nr_events;
1258 tail %= ctx->nr_events;
1259
1260 while (ret < nr) {
1261 long avail;
1262 struct io_event *ev;
1263 struct page *page;
1264
1265 avail = (head <= tail ? tail : ctx->nr_events) - head;
1266 if (head == tail)
1267 break;
1268
1269 pos = head + AIO_EVENTS_OFFSET;
1270 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1271 pos %= AIO_EVENTS_PER_PAGE;
1272
1273 avail = min(avail, nr - ret);
1274 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1275
1276 ev = page_address(page);
1277 copy_ret = copy_to_user(event + ret, ev + pos,
1278 sizeof(*ev) * avail);
1279
1280 if (unlikely(copy_ret)) {
1281 ret = -EFAULT;
1282 goto out;
1283 }
1284
1285 ret += avail;
1286 head += avail;
1287 head %= ctx->nr_events;
1288 }
1289
1290 ring = page_address(ctx->ring_pages[0]);
1291 ring->head = head;
1292 flush_dcache_page(ctx->ring_pages[0]);
1293
1294 pr_debug("%li h%u t%u\n", ret, head, tail);
1295out:
1296 mutex_unlock(&ctx->ring_lock);
1297
1298 return ret;
1299}
1300
1301static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1302 struct io_event __user *event, long *i)
1303{
1304 long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1305
1306 if (ret > 0)
1307 *i += ret;
1308
1309 if (unlikely(atomic_read(&ctx->dead)))
1310 ret = -EINVAL;
1311
1312 if (!*i)
1313 *i = ret;
1314
1315 return ret < 0 || *i >= min_nr;
1316}
1317
1318static long read_events(struct kioctx *ctx, long min_nr, long nr,
1319 struct io_event __user *event,
1320 ktime_t until)
1321{
1322 struct hrtimer_sleeper t;
1323 struct aio_waiter w;
1324 long ret = 0, ret2 = 0;
1325
1326 /*
1327 * Note that aio_read_events() is being called as the conditional - i.e.
1328 * we're calling it after prepare_to_wait() has set task state to
1329 * TASK_INTERRUPTIBLE.
1330 *
1331 * But aio_read_events() can block, and if it blocks it's going to flip
1332 * the task state back to TASK_RUNNING.
1333 *
1334 * This should be ok, provided it doesn't flip the state back to
1335 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1336 * will only happen if the mutex_lock() call blocks, and we then find
1337 * the ringbuffer empty. So in practice we should be ok, but it's
1338 * something to be aware of when touching this code.
1339 */
1340 aio_read_events(ctx, min_nr, nr, event, &ret);
1341 if (until == 0 || ret < 0 || ret >= min_nr)
1342 return ret;
1343
1344 hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1345 if (until != KTIME_MAX) {
1346 hrtimer_set_expires_range_ns(&t.timer, until, current->timer_slack_ns);
1347 hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
1348 }
1349
1350 init_wait(&w.w);
1351
1352 while (1) {
1353 unsigned long nr_got = ret;
1354
1355 w.min_nr = min_nr - ret;
1356
1357 ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE);
1358 if (!ret2 && !t.task)
1359 ret2 = -ETIME;
1360
1361 if (aio_read_events(ctx, min_nr, nr, event, &ret) || ret2)
1362 break;
1363
1364 if (nr_got == ret)
1365 schedule();
1366 }
1367
1368 finish_wait(&ctx->wait, &w.w);
1369 hrtimer_cancel(&t.timer);
1370 destroy_hrtimer_on_stack(&t.timer);
1371
1372 return ret;
1373}
1374
1375/* sys_io_setup:
1376 * Create an aio_context capable of receiving at least nr_events.
1377 * ctxp must not point to an aio_context that already exists, and
1378 * must be initialized to 0 prior to the call. On successful
1379 * creation of the aio_context, *ctxp is filled in with the resulting
1380 * handle. May fail with -EINVAL if *ctxp is not initialized,
1381 * if the specified nr_events exceeds internal limits. May fail
1382 * with -EAGAIN if the specified nr_events exceeds the user's limit
1383 * of available events. May fail with -ENOMEM if insufficient kernel
1384 * resources are available. May fail with -EFAULT if an invalid
1385 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1386 * implemented.
1387 */
1388SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1389{
1390 struct kioctx *ioctx = NULL;
1391 unsigned long ctx;
1392 long ret;
1393
1394 ret = get_user(ctx, ctxp);
1395 if (unlikely(ret))
1396 goto out;
1397
1398 ret = -EINVAL;
1399 if (unlikely(ctx || nr_events == 0)) {
1400 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1401 ctx, nr_events);
1402 goto out;
1403 }
1404
1405 ioctx = ioctx_alloc(nr_events);
1406 ret = PTR_ERR(ioctx);
1407 if (!IS_ERR(ioctx)) {
1408 ret = put_user(ioctx->user_id, ctxp);
1409 if (ret)
1410 kill_ioctx(current->mm, ioctx, NULL);
1411 percpu_ref_put(&ioctx->users);
1412 }
1413
1414out:
1415 return ret;
1416}
1417
1418#ifdef CONFIG_COMPAT
1419COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1420{
1421 struct kioctx *ioctx = NULL;
1422 unsigned long ctx;
1423 long ret;
1424
1425 ret = get_user(ctx, ctx32p);
1426 if (unlikely(ret))
1427 goto out;
1428
1429 ret = -EINVAL;
1430 if (unlikely(ctx || nr_events == 0)) {
1431 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1432 ctx, nr_events);
1433 goto out;
1434 }
1435
1436 ioctx = ioctx_alloc(nr_events);
1437 ret = PTR_ERR(ioctx);
1438 if (!IS_ERR(ioctx)) {
1439 /* truncating is ok because it's a user address */
1440 ret = put_user((u32)ioctx->user_id, ctx32p);
1441 if (ret)
1442 kill_ioctx(current->mm, ioctx, NULL);
1443 percpu_ref_put(&ioctx->users);
1444 }
1445
1446out:
1447 return ret;
1448}
1449#endif
1450
1451/* sys_io_destroy:
1452 * Destroy the aio_context specified. May cancel any outstanding
1453 * AIOs and block on completion. Will fail with -ENOSYS if not
1454 * implemented. May fail with -EINVAL if the context pointed to
1455 * is invalid.
1456 */
1457SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1458{
1459 struct kioctx *ioctx = lookup_ioctx(ctx);
1460 if (likely(NULL != ioctx)) {
1461 struct ctx_rq_wait wait;
1462 int ret;
1463
1464 init_completion(&wait.comp);
1465 atomic_set(&wait.count, 1);
1466
1467 /* Pass requests_done to kill_ioctx() where it can be set
1468 * in a thread-safe way. If we try to set it here then we have
1469 * a race condition if two io_destroy() called simultaneously.
1470 */
1471 ret = kill_ioctx(current->mm, ioctx, &wait);
1472 percpu_ref_put(&ioctx->users);
1473
1474 /* Wait until all IO for the context are done. Otherwise kernel
1475 * keep using user-space buffers even if user thinks the context
1476 * is destroyed.
1477 */
1478 if (!ret)
1479 wait_for_completion(&wait.comp);
1480
1481 return ret;
1482 }
1483 pr_debug("EINVAL: invalid context id\n");
1484 return -EINVAL;
1485}
1486
1487static void aio_remove_iocb(struct aio_kiocb *iocb)
1488{
1489 struct kioctx *ctx = iocb->ki_ctx;
1490 unsigned long flags;
1491
1492 spin_lock_irqsave(&ctx->ctx_lock, flags);
1493 list_del(&iocb->ki_list);
1494 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1495}
1496
1497static void aio_complete_rw(struct kiocb *kiocb, long res)
1498{
1499 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1500
1501 if (!list_empty_careful(&iocb->ki_list))
1502 aio_remove_iocb(iocb);
1503
1504 if (kiocb->ki_flags & IOCB_WRITE) {
1505 struct inode *inode = file_inode(kiocb->ki_filp);
1506
1507 if (S_ISREG(inode->i_mode))
1508 kiocb_end_write(kiocb);
1509 }
1510
1511 iocb->ki_res.res = res;
1512 iocb->ki_res.res2 = 0;
1513 iocb_put(iocb);
1514}
1515
1516static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1517{
1518 int ret;
1519
1520 req->ki_complete = aio_complete_rw;
1521 req->private = NULL;
1522 req->ki_pos = iocb->aio_offset;
1523 req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
1524 if (iocb->aio_flags & IOCB_FLAG_RESFD)
1525 req->ki_flags |= IOCB_EVENTFD;
1526 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1527 /*
1528 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1529 * aio_reqprio is interpreted as an I/O scheduling
1530 * class and priority.
1531 */
1532 ret = ioprio_check_cap(iocb->aio_reqprio);
1533 if (ret) {
1534 pr_debug("aio ioprio check cap error: %d\n", ret);
1535 return ret;
1536 }
1537
1538 req->ki_ioprio = iocb->aio_reqprio;
1539 } else
1540 req->ki_ioprio = get_current_ioprio();
1541
1542 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1543 if (unlikely(ret))
1544 return ret;
1545
1546 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1547 return 0;
1548}
1549
1550static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1551 struct iovec **iovec, bool vectored, bool compat,
1552 struct iov_iter *iter)
1553{
1554 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1555 size_t len = iocb->aio_nbytes;
1556
1557 if (!vectored) {
1558 ssize_t ret = import_ubuf(rw, buf, len, iter);
1559 *iovec = NULL;
1560 return ret;
1561 }
1562
1563 return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
1564}
1565
1566static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1567{
1568 switch (ret) {
1569 case -EIOCBQUEUED:
1570 break;
1571 case -ERESTARTSYS:
1572 case -ERESTARTNOINTR:
1573 case -ERESTARTNOHAND:
1574 case -ERESTART_RESTARTBLOCK:
1575 /*
1576 * There's no easy way to restart the syscall since other AIO's
1577 * may be already running. Just fail this IO with EINTR.
1578 */
1579 ret = -EINTR;
1580 fallthrough;
1581 default:
1582 req->ki_complete(req, ret);
1583 }
1584}
1585
1586static int aio_read(struct kiocb *req, const struct iocb *iocb,
1587 bool vectored, bool compat)
1588{
1589 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1590 struct iov_iter iter;
1591 struct file *file;
1592 int ret;
1593
1594 ret = aio_prep_rw(req, iocb);
1595 if (ret)
1596 return ret;
1597 file = req->ki_filp;
1598 if (unlikely(!(file->f_mode & FMODE_READ)))
1599 return -EBADF;
1600 if (unlikely(!file->f_op->read_iter))
1601 return -EINVAL;
1602
1603 ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
1604 if (ret < 0)
1605 return ret;
1606 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1607 if (!ret)
1608 aio_rw_done(req, call_read_iter(file, req, &iter));
1609 kfree(iovec);
1610 return ret;
1611}
1612
1613static int aio_write(struct kiocb *req, const struct iocb *iocb,
1614 bool vectored, bool compat)
1615{
1616 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1617 struct iov_iter iter;
1618 struct file *file;
1619 int ret;
1620
1621 ret = aio_prep_rw(req, iocb);
1622 if (ret)
1623 return ret;
1624 file = req->ki_filp;
1625
1626 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1627 return -EBADF;
1628 if (unlikely(!file->f_op->write_iter))
1629 return -EINVAL;
1630
1631 ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
1632 if (ret < 0)
1633 return ret;
1634 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1635 if (!ret) {
1636 if (S_ISREG(file_inode(file)->i_mode))
1637 kiocb_start_write(req);
1638 req->ki_flags |= IOCB_WRITE;
1639 aio_rw_done(req, call_write_iter(file, req, &iter));
1640 }
1641 kfree(iovec);
1642 return ret;
1643}
1644
1645static void aio_fsync_work(struct work_struct *work)
1646{
1647 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1648 const struct cred *old_cred = override_creds(iocb->fsync.creds);
1649
1650 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1651 revert_creds(old_cred);
1652 put_cred(iocb->fsync.creds);
1653 iocb_put(iocb);
1654}
1655
1656static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1657 bool datasync)
1658{
1659 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1660 iocb->aio_rw_flags))
1661 return -EINVAL;
1662
1663 if (unlikely(!req->file->f_op->fsync))
1664 return -EINVAL;
1665
1666 req->creds = prepare_creds();
1667 if (!req->creds)
1668 return -ENOMEM;
1669
1670 req->datasync = datasync;
1671 INIT_WORK(&req->work, aio_fsync_work);
1672 schedule_work(&req->work);
1673 return 0;
1674}
1675
1676static void aio_poll_put_work(struct work_struct *work)
1677{
1678 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1679 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1680
1681 iocb_put(iocb);
1682}
1683
1684/*
1685 * Safely lock the waitqueue which the request is on, synchronizing with the
1686 * case where the ->poll() provider decides to free its waitqueue early.
1687 *
1688 * Returns true on success, meaning that req->head->lock was locked, req->wait
1689 * is on req->head, and an RCU read lock was taken. Returns false if the
1690 * request was already removed from its waitqueue (which might no longer exist).
1691 */
1692static bool poll_iocb_lock_wq(struct poll_iocb *req)
1693{
1694 wait_queue_head_t *head;
1695
1696 /*
1697 * While we hold the waitqueue lock and the waitqueue is nonempty,
1698 * wake_up_pollfree() will wait for us. However, taking the waitqueue
1699 * lock in the first place can race with the waitqueue being freed.
1700 *
1701 * We solve this as eventpoll does: by taking advantage of the fact that
1702 * all users of wake_up_pollfree() will RCU-delay the actual free. If
1703 * we enter rcu_read_lock() and see that the pointer to the queue is
1704 * non-NULL, we can then lock it without the memory being freed out from
1705 * under us, then check whether the request is still on the queue.
1706 *
1707 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
1708 * case the caller deletes the entry from the queue, leaving it empty.
1709 * In that case, only RCU prevents the queue memory from being freed.
1710 */
1711 rcu_read_lock();
1712 head = smp_load_acquire(&req->head);
1713 if (head) {
1714 spin_lock(&head->lock);
1715 if (!list_empty(&req->wait.entry))
1716 return true;
1717 spin_unlock(&head->lock);
1718 }
1719 rcu_read_unlock();
1720 return false;
1721}
1722
1723static void poll_iocb_unlock_wq(struct poll_iocb *req)
1724{
1725 spin_unlock(&req->head->lock);
1726 rcu_read_unlock();
1727}
1728
1729static void aio_poll_complete_work(struct work_struct *work)
1730{
1731 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1732 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1733 struct poll_table_struct pt = { ._key = req->events };
1734 struct kioctx *ctx = iocb->ki_ctx;
1735 __poll_t mask = 0;
1736
1737 if (!READ_ONCE(req->cancelled))
1738 mask = vfs_poll(req->file, &pt) & req->events;
1739
1740 /*
1741 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1742 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1743 * synchronize with them. In the cancellation case the list_del_init
1744 * itself is not actually needed, but harmless so we keep it in to
1745 * avoid further branches in the fast path.
1746 */
1747 spin_lock_irq(&ctx->ctx_lock);
1748 if (poll_iocb_lock_wq(req)) {
1749 if (!mask && !READ_ONCE(req->cancelled)) {
1750 /*
1751 * The request isn't actually ready to be completed yet.
1752 * Reschedule completion if another wakeup came in.
1753 */
1754 if (req->work_need_resched) {
1755 schedule_work(&req->work);
1756 req->work_need_resched = false;
1757 } else {
1758 req->work_scheduled = false;
1759 }
1760 poll_iocb_unlock_wq(req);
1761 spin_unlock_irq(&ctx->ctx_lock);
1762 return;
1763 }
1764 list_del_init(&req->wait.entry);
1765 poll_iocb_unlock_wq(req);
1766 } /* else, POLLFREE has freed the waitqueue, so we must complete */
1767 list_del_init(&iocb->ki_list);
1768 iocb->ki_res.res = mangle_poll(mask);
1769 spin_unlock_irq(&ctx->ctx_lock);
1770
1771 iocb_put(iocb);
1772}
1773
1774/* assumes we are called with irqs disabled */
1775static int aio_poll_cancel(struct kiocb *iocb)
1776{
1777 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1778 struct poll_iocb *req = &aiocb->poll;
1779
1780 if (poll_iocb_lock_wq(req)) {
1781 WRITE_ONCE(req->cancelled, true);
1782 if (!req->work_scheduled) {
1783 schedule_work(&aiocb->poll.work);
1784 req->work_scheduled = true;
1785 }
1786 poll_iocb_unlock_wq(req);
1787 } /* else, the request was force-cancelled by POLLFREE already */
1788
1789 return 0;
1790}
1791
1792static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1793 void *key)
1794{
1795 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
1796 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1797 __poll_t mask = key_to_poll(key);
1798 unsigned long flags;
1799
1800 /* for instances that support it check for an event match first: */
1801 if (mask && !(mask & req->events))
1802 return 0;
1803
1804 /*
1805 * Complete the request inline if possible. This requires that three
1806 * conditions be met:
1807 * 1. An event mask must have been passed. If a plain wakeup was done
1808 * instead, then mask == 0 and we have to call vfs_poll() to get
1809 * the events, so inline completion isn't possible.
1810 * 2. The completion work must not have already been scheduled.
1811 * 3. ctx_lock must not be busy. We have to use trylock because we
1812 * already hold the waitqueue lock, so this inverts the normal
1813 * locking order. Use irqsave/irqrestore because not all
1814 * filesystems (e.g. fuse) call this function with IRQs disabled,
1815 * yet IRQs have to be disabled before ctx_lock is obtained.
1816 */
1817 if (mask && !req->work_scheduled &&
1818 spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1819 struct kioctx *ctx = iocb->ki_ctx;
1820
1821 list_del_init(&req->wait.entry);
1822 list_del(&iocb->ki_list);
1823 iocb->ki_res.res = mangle_poll(mask);
1824 if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
1825 iocb = NULL;
1826 INIT_WORK(&req->work, aio_poll_put_work);
1827 schedule_work(&req->work);
1828 }
1829 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1830 if (iocb)
1831 iocb_put(iocb);
1832 } else {
1833 /*
1834 * Schedule the completion work if needed. If it was already
1835 * scheduled, record that another wakeup came in.
1836 *
1837 * Don't remove the request from the waitqueue here, as it might
1838 * not actually be complete yet (we won't know until vfs_poll()
1839 * is called), and we must not miss any wakeups. POLLFREE is an
1840 * exception to this; see below.
1841 */
1842 if (req->work_scheduled) {
1843 req->work_need_resched = true;
1844 } else {
1845 schedule_work(&req->work);
1846 req->work_scheduled = true;
1847 }
1848
1849 /*
1850 * If the waitqueue is being freed early but we can't complete
1851 * the request inline, we have to tear down the request as best
1852 * we can. That means immediately removing the request from its
1853 * waitqueue and preventing all further accesses to the
1854 * waitqueue via the request. We also need to schedule the
1855 * completion work (done above). Also mark the request as
1856 * cancelled, to potentially skip an unneeded call to ->poll().
1857 */
1858 if (mask & POLLFREE) {
1859 WRITE_ONCE(req->cancelled, true);
1860 list_del_init(&req->wait.entry);
1861
1862 /*
1863 * Careful: this *must* be the last step, since as soon
1864 * as req->head is NULL'ed out, the request can be
1865 * completed and freed, since aio_poll_complete_work()
1866 * will no longer need to take the waitqueue lock.
1867 */
1868 smp_store_release(&req->head, NULL);
1869 }
1870 }
1871 return 1;
1872}
1873
1874struct aio_poll_table {
1875 struct poll_table_struct pt;
1876 struct aio_kiocb *iocb;
1877 bool queued;
1878 int error;
1879};
1880
1881static void
1882aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1883 struct poll_table_struct *p)
1884{
1885 struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1886
1887 /* multiple wait queues per file are not supported */
1888 if (unlikely(pt->queued)) {
1889 pt->error = -EINVAL;
1890 return;
1891 }
1892
1893 pt->queued = true;
1894 pt->error = 0;
1895 pt->iocb->poll.head = head;
1896 add_wait_queue(head, &pt->iocb->poll.wait);
1897}
1898
1899static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1900{
1901 struct kioctx *ctx = aiocb->ki_ctx;
1902 struct poll_iocb *req = &aiocb->poll;
1903 struct aio_poll_table apt;
1904 bool cancel = false;
1905 __poll_t mask;
1906
1907 /* reject any unknown events outside the normal event mask. */
1908 if ((u16)iocb->aio_buf != iocb->aio_buf)
1909 return -EINVAL;
1910 /* reject fields that are not defined for poll */
1911 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1912 return -EINVAL;
1913
1914 INIT_WORK(&req->work, aio_poll_complete_work);
1915 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1916
1917 req->head = NULL;
1918 req->cancelled = false;
1919 req->work_scheduled = false;
1920 req->work_need_resched = false;
1921
1922 apt.pt._qproc = aio_poll_queue_proc;
1923 apt.pt._key = req->events;
1924 apt.iocb = aiocb;
1925 apt.queued = false;
1926 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1927
1928 /* initialized the list so that we can do list_empty checks */
1929 INIT_LIST_HEAD(&req->wait.entry);
1930 init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1931
1932 mask = vfs_poll(req->file, &apt.pt) & req->events;
1933 spin_lock_irq(&ctx->ctx_lock);
1934 if (likely(apt.queued)) {
1935 bool on_queue = poll_iocb_lock_wq(req);
1936
1937 if (!on_queue || req->work_scheduled) {
1938 /*
1939 * aio_poll_wake() already either scheduled the async
1940 * completion work, or completed the request inline.
1941 */
1942 if (apt.error) /* unsupported case: multiple queues */
1943 cancel = true;
1944 apt.error = 0;
1945 mask = 0;
1946 }
1947 if (mask || apt.error) {
1948 /* Steal to complete synchronously. */
1949 list_del_init(&req->wait.entry);
1950 } else if (cancel) {
1951 /* Cancel if possible (may be too late though). */
1952 WRITE_ONCE(req->cancelled, true);
1953 } else if (on_queue) {
1954 /*
1955 * Actually waiting for an event, so add the request to
1956 * active_reqs so that it can be cancelled if needed.
1957 */
1958 list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1959 aiocb->ki_cancel = aio_poll_cancel;
1960 }
1961 if (on_queue)
1962 poll_iocb_unlock_wq(req);
1963 }
1964 if (mask) { /* no async, we'd stolen it */
1965 aiocb->ki_res.res = mangle_poll(mask);
1966 apt.error = 0;
1967 }
1968 spin_unlock_irq(&ctx->ctx_lock);
1969 if (mask)
1970 iocb_put(aiocb);
1971 return apt.error;
1972}
1973
1974static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1975 struct iocb __user *user_iocb, struct aio_kiocb *req,
1976 bool compat)
1977{
1978 req->ki_filp = fget(iocb->aio_fildes);
1979 if (unlikely(!req->ki_filp))
1980 return -EBADF;
1981
1982 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1983 struct eventfd_ctx *eventfd;
1984 /*
1985 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1986 * instance of the file* now. The file descriptor must be
1987 * an eventfd() fd, and will be signaled for each completed
1988 * event using the eventfd_signal() function.
1989 */
1990 eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1991 if (IS_ERR(eventfd))
1992 return PTR_ERR(eventfd);
1993
1994 req->ki_eventfd = eventfd;
1995 }
1996
1997 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
1998 pr_debug("EFAULT: aio_key\n");
1999 return -EFAULT;
2000 }
2001
2002 req->ki_res.obj = (u64)(unsigned long)user_iocb;
2003 req->ki_res.data = iocb->aio_data;
2004 req->ki_res.res = 0;
2005 req->ki_res.res2 = 0;
2006
2007 switch (iocb->aio_lio_opcode) {
2008 case IOCB_CMD_PREAD:
2009 return aio_read(&req->rw, iocb, false, compat);
2010 case IOCB_CMD_PWRITE:
2011 return aio_write(&req->rw, iocb, false, compat);
2012 case IOCB_CMD_PREADV:
2013 return aio_read(&req->rw, iocb, true, compat);
2014 case IOCB_CMD_PWRITEV:
2015 return aio_write(&req->rw, iocb, true, compat);
2016 case IOCB_CMD_FSYNC:
2017 return aio_fsync(&req->fsync, iocb, false);
2018 case IOCB_CMD_FDSYNC:
2019 return aio_fsync(&req->fsync, iocb, true);
2020 case IOCB_CMD_POLL:
2021 return aio_poll(req, iocb);
2022 default:
2023 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
2024 return -EINVAL;
2025 }
2026}
2027
2028static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
2029 bool compat)
2030{
2031 struct aio_kiocb *req;
2032 struct iocb iocb;
2033 int err;
2034
2035 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
2036 return -EFAULT;
2037
2038 /* enforce forwards compatibility on users */
2039 if (unlikely(iocb.aio_reserved2)) {
2040 pr_debug("EINVAL: reserve field set\n");
2041 return -EINVAL;
2042 }
2043
2044 /* prevent overflows */
2045 if (unlikely(
2046 (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
2047 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
2048 ((ssize_t)iocb.aio_nbytes < 0)
2049 )) {
2050 pr_debug("EINVAL: overflow check\n");
2051 return -EINVAL;
2052 }
2053
2054 req = aio_get_req(ctx);
2055 if (unlikely(!req))
2056 return -EAGAIN;
2057
2058 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
2059
2060 /* Done with the synchronous reference */
2061 iocb_put(req);
2062
2063 /*
2064 * If err is 0, we'd either done aio_complete() ourselves or have
2065 * arranged for that to be done asynchronously. Anything non-zero
2066 * means that we need to destroy req ourselves.
2067 */
2068 if (unlikely(err)) {
2069 iocb_destroy(req);
2070 put_reqs_available(ctx, 1);
2071 }
2072 return err;
2073}
2074
2075/* sys_io_submit:
2076 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
2077 * the number of iocbs queued. May return -EINVAL if the aio_context
2078 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
2079 * *iocbpp[0] is not properly initialized, if the operation specified
2080 * is invalid for the file descriptor in the iocb. May fail with
2081 * -EFAULT if any of the data structures point to invalid data. May
2082 * fail with -EBADF if the file descriptor specified in the first
2083 * iocb is invalid. May fail with -EAGAIN if insufficient resources
2084 * are available to queue any iocbs. Will return 0 if nr is 0. Will
2085 * fail with -ENOSYS if not implemented.
2086 */
2087SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
2088 struct iocb __user * __user *, iocbpp)
2089{
2090 struct kioctx *ctx;
2091 long ret = 0;
2092 int i = 0;
2093 struct blk_plug plug;
2094
2095 if (unlikely(nr < 0))
2096 return -EINVAL;
2097
2098 ctx = lookup_ioctx(ctx_id);
2099 if (unlikely(!ctx)) {
2100 pr_debug("EINVAL: invalid context id\n");
2101 return -EINVAL;
2102 }
2103
2104 if (nr > ctx->nr_events)
2105 nr = ctx->nr_events;
2106
2107 if (nr > AIO_PLUG_THRESHOLD)
2108 blk_start_plug(&plug);
2109 for (i = 0; i < nr; i++) {
2110 struct iocb __user *user_iocb;
2111
2112 if (unlikely(get_user(user_iocb, iocbpp + i))) {
2113 ret = -EFAULT;
2114 break;
2115 }
2116
2117 ret = io_submit_one(ctx, user_iocb, false);
2118 if (ret)
2119 break;
2120 }
2121 if (nr > AIO_PLUG_THRESHOLD)
2122 blk_finish_plug(&plug);
2123
2124 percpu_ref_put(&ctx->users);
2125 return i ? i : ret;
2126}
2127
2128#ifdef CONFIG_COMPAT
2129COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
2130 int, nr, compat_uptr_t __user *, iocbpp)
2131{
2132 struct kioctx *ctx;
2133 long ret = 0;
2134 int i = 0;
2135 struct blk_plug plug;
2136
2137 if (unlikely(nr < 0))
2138 return -EINVAL;
2139
2140 ctx = lookup_ioctx(ctx_id);
2141 if (unlikely(!ctx)) {
2142 pr_debug("EINVAL: invalid context id\n");
2143 return -EINVAL;
2144 }
2145
2146 if (nr > ctx->nr_events)
2147 nr = ctx->nr_events;
2148
2149 if (nr > AIO_PLUG_THRESHOLD)
2150 blk_start_plug(&plug);
2151 for (i = 0; i < nr; i++) {
2152 compat_uptr_t user_iocb;
2153
2154 if (unlikely(get_user(user_iocb, iocbpp + i))) {
2155 ret = -EFAULT;
2156 break;
2157 }
2158
2159 ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
2160 if (ret)
2161 break;
2162 }
2163 if (nr > AIO_PLUG_THRESHOLD)
2164 blk_finish_plug(&plug);
2165
2166 percpu_ref_put(&ctx->users);
2167 return i ? i : ret;
2168}
2169#endif
2170
2171/* sys_io_cancel:
2172 * Attempts to cancel an iocb previously passed to io_submit. If
2173 * the operation is successfully cancelled, the resulting event is
2174 * copied into the memory pointed to by result without being placed
2175 * into the completion queue and 0 is returned. May fail with
2176 * -EFAULT if any of the data structures pointed to are invalid.
2177 * May fail with -EINVAL if aio_context specified by ctx_id is
2178 * invalid. May fail with -EAGAIN if the iocb specified was not
2179 * cancelled. Will fail with -ENOSYS if not implemented.
2180 */
2181SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2182 struct io_event __user *, result)
2183{
2184 struct kioctx *ctx;
2185 struct aio_kiocb *kiocb;
2186 int ret = -EINVAL;
2187 u32 key;
2188 u64 obj = (u64)(unsigned long)iocb;
2189
2190 if (unlikely(get_user(key, &iocb->aio_key)))
2191 return -EFAULT;
2192 if (unlikely(key != KIOCB_KEY))
2193 return -EINVAL;
2194
2195 ctx = lookup_ioctx(ctx_id);
2196 if (unlikely(!ctx))
2197 return -EINVAL;
2198
2199 spin_lock_irq(&ctx->ctx_lock);
2200 /* TODO: use a hash or array, this sucks. */
2201 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2202 if (kiocb->ki_res.obj == obj) {
2203 ret = kiocb->ki_cancel(&kiocb->rw);
2204 list_del_init(&kiocb->ki_list);
2205 break;
2206 }
2207 }
2208 spin_unlock_irq(&ctx->ctx_lock);
2209
2210 if (!ret) {
2211 /*
2212 * The result argument is no longer used - the io_event is
2213 * always delivered via the ring buffer. -EINPROGRESS indicates
2214 * cancellation is progress:
2215 */
2216 ret = -EINPROGRESS;
2217 }
2218
2219 percpu_ref_put(&ctx->users);
2220
2221 return ret;
2222}
2223
2224static long do_io_getevents(aio_context_t ctx_id,
2225 long min_nr,
2226 long nr,
2227 struct io_event __user *events,
2228 struct timespec64 *ts)
2229{
2230 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2231 struct kioctx *ioctx = lookup_ioctx(ctx_id);
2232 long ret = -EINVAL;
2233
2234 if (likely(ioctx)) {
2235 if (likely(min_nr <= nr && min_nr >= 0))
2236 ret = read_events(ioctx, min_nr, nr, events, until);
2237 percpu_ref_put(&ioctx->users);
2238 }
2239
2240 return ret;
2241}
2242
2243/* io_getevents:
2244 * Attempts to read at least min_nr events and up to nr events from
2245 * the completion queue for the aio_context specified by ctx_id. If
2246 * it succeeds, the number of read events is returned. May fail with
2247 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2248 * out of range, if timeout is out of range. May fail with -EFAULT
2249 * if any of the memory specified is invalid. May return 0 or
2250 * < min_nr if the timeout specified by timeout has elapsed
2251 * before sufficient events are available, where timeout == NULL
2252 * specifies an infinite timeout. Note that the timeout pointed to by
2253 * timeout is relative. Will fail with -ENOSYS if not implemented.
2254 */
2255#ifdef CONFIG_64BIT
2256
2257SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2258 long, min_nr,
2259 long, nr,
2260 struct io_event __user *, events,
2261 struct __kernel_timespec __user *, timeout)
2262{
2263 struct timespec64 ts;
2264 int ret;
2265
2266 if (timeout && unlikely(get_timespec64(&ts, timeout)))
2267 return -EFAULT;
2268
2269 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2270 if (!ret && signal_pending(current))
2271 ret = -EINTR;
2272 return ret;
2273}
2274
2275#endif
2276
2277struct __aio_sigset {
2278 const sigset_t __user *sigmask;
2279 size_t sigsetsize;
2280};
2281
2282SYSCALL_DEFINE6(io_pgetevents,
2283 aio_context_t, ctx_id,
2284 long, min_nr,
2285 long, nr,
2286 struct io_event __user *, events,
2287 struct __kernel_timespec __user *, timeout,
2288 const struct __aio_sigset __user *, usig)
2289{
2290 struct __aio_sigset ksig = { NULL, };
2291 struct timespec64 ts;
2292 bool interrupted;
2293 int ret;
2294
2295 if (timeout && unlikely(get_timespec64(&ts, timeout)))
2296 return -EFAULT;
2297
2298 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2299 return -EFAULT;
2300
2301 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2302 if (ret)
2303 return ret;
2304
2305 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2306
2307 interrupted = signal_pending(current);
2308 restore_saved_sigmask_unless(interrupted);
2309 if (interrupted && !ret)
2310 ret = -ERESTARTNOHAND;
2311
2312 return ret;
2313}
2314
2315#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2316
2317SYSCALL_DEFINE6(io_pgetevents_time32,
2318 aio_context_t, ctx_id,
2319 long, min_nr,
2320 long, nr,
2321 struct io_event __user *, events,
2322 struct old_timespec32 __user *, timeout,
2323 const struct __aio_sigset __user *, usig)
2324{
2325 struct __aio_sigset ksig = { NULL, };
2326 struct timespec64 ts;
2327 bool interrupted;
2328 int ret;
2329
2330 if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2331 return -EFAULT;
2332
2333 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2334 return -EFAULT;
2335
2336
2337 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2338 if (ret)
2339 return ret;
2340
2341 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2342
2343 interrupted = signal_pending(current);
2344 restore_saved_sigmask_unless(interrupted);
2345 if (interrupted && !ret)
2346 ret = -ERESTARTNOHAND;
2347
2348 return ret;
2349}
2350
2351#endif
2352
2353#if defined(CONFIG_COMPAT_32BIT_TIME)
2354
2355SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2356 __s32, min_nr,
2357 __s32, nr,
2358 struct io_event __user *, events,
2359 struct old_timespec32 __user *, timeout)
2360{
2361 struct timespec64 t;
2362 int ret;
2363
2364 if (timeout && get_old_timespec32(&t, timeout))
2365 return -EFAULT;
2366
2367 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2368 if (!ret && signal_pending(current))
2369 ret = -EINTR;
2370 return ret;
2371}
2372
2373#endif
2374
2375#ifdef CONFIG_COMPAT
2376
2377struct __compat_aio_sigset {
2378 compat_uptr_t sigmask;
2379 compat_size_t sigsetsize;
2380};
2381
2382#if defined(CONFIG_COMPAT_32BIT_TIME)
2383
2384COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2385 compat_aio_context_t, ctx_id,
2386 compat_long_t, min_nr,
2387 compat_long_t, nr,
2388 struct io_event __user *, events,
2389 struct old_timespec32 __user *, timeout,
2390 const struct __compat_aio_sigset __user *, usig)
2391{
2392 struct __compat_aio_sigset ksig = { 0, };
2393 struct timespec64 t;
2394 bool interrupted;
2395 int ret;
2396
2397 if (timeout && get_old_timespec32(&t, timeout))
2398 return -EFAULT;
2399
2400 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2401 return -EFAULT;
2402
2403 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2404 if (ret)
2405 return ret;
2406
2407 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2408
2409 interrupted = signal_pending(current);
2410 restore_saved_sigmask_unless(interrupted);
2411 if (interrupted && !ret)
2412 ret = -ERESTARTNOHAND;
2413
2414 return ret;
2415}
2416
2417#endif
2418
2419COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2420 compat_aio_context_t, ctx_id,
2421 compat_long_t, min_nr,
2422 compat_long_t, nr,
2423 struct io_event __user *, events,
2424 struct __kernel_timespec __user *, timeout,
2425 const struct __compat_aio_sigset __user *, usig)
2426{
2427 struct __compat_aio_sigset ksig = { 0, };
2428 struct timespec64 t;
2429 bool interrupted;
2430 int ret;
2431
2432 if (timeout && get_timespec64(&t, timeout))
2433 return -EFAULT;
2434
2435 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2436 return -EFAULT;
2437
2438 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2439 if (ret)
2440 return ret;
2441
2442 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2443
2444 interrupted = signal_pending(current);
2445 restore_saved_sigmask_unless(interrupted);
2446 if (interrupted && !ret)
2447 ret = -ERESTARTNOHAND;
2448
2449 return ret;
2450}
2451#endif
1/*
2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
4 *
5 * Implements an efficient asynchronous io interface.
6 *
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
8 *
9 * See ../COPYING for licensing terms.
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/time.h>
15#include <linux/aio_abi.h>
16#include <linux/export.h>
17#include <linux/syscalls.h>
18#include <linux/backing-dev.h>
19#include <linux/uio.h>
20
21#define DEBUG 0
22
23#include <linux/sched.h>
24#include <linux/fs.h>
25#include <linux/file.h>
26#include <linux/mm.h>
27#include <linux/mman.h>
28#include <linux/mmu_context.h>
29#include <linux/slab.h>
30#include <linux/timer.h>
31#include <linux/aio.h>
32#include <linux/highmem.h>
33#include <linux/workqueue.h>
34#include <linux/security.h>
35#include <linux/eventfd.h>
36#include <linux/blkdev.h>
37#include <linux/compat.h>
38
39#include <asm/kmap_types.h>
40#include <asm/uaccess.h>
41
42#if DEBUG > 1
43#define dprintk printk
44#else
45#define dprintk(x...) do { ; } while (0)
46#endif
47
48/*------ sysctl variables----*/
49static DEFINE_SPINLOCK(aio_nr_lock);
50unsigned long aio_nr; /* current system wide number of aio requests */
51unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
52/*----end sysctl variables---*/
53
54static struct kmem_cache *kiocb_cachep;
55static struct kmem_cache *kioctx_cachep;
56
57static struct workqueue_struct *aio_wq;
58
59/* Used for rare fput completion. */
60static void aio_fput_routine(struct work_struct *);
61static DECLARE_WORK(fput_work, aio_fput_routine);
62
63static DEFINE_SPINLOCK(fput_lock);
64static LIST_HEAD(fput_head);
65
66static void aio_kick_handler(struct work_struct *);
67static void aio_queue_work(struct kioctx *);
68
69/* aio_setup
70 * Creates the slab caches used by the aio routines, panic on
71 * failure as this is done early during the boot sequence.
72 */
73static int __init aio_setup(void)
74{
75 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
76 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
77
78 aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
79 BUG_ON(!aio_wq);
80
81 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
82
83 return 0;
84}
85__initcall(aio_setup);
86
87static void aio_free_ring(struct kioctx *ctx)
88{
89 struct aio_ring_info *info = &ctx->ring_info;
90 long i;
91
92 for (i=0; i<info->nr_pages; i++)
93 put_page(info->ring_pages[i]);
94
95 if (info->mmap_size) {
96 BUG_ON(ctx->mm != current->mm);
97 vm_munmap(info->mmap_base, info->mmap_size);
98 }
99
100 if (info->ring_pages && info->ring_pages != info->internal_pages)
101 kfree(info->ring_pages);
102 info->ring_pages = NULL;
103 info->nr = 0;
104}
105
106static int aio_setup_ring(struct kioctx *ctx)
107{
108 struct aio_ring *ring;
109 struct aio_ring_info *info = &ctx->ring_info;
110 unsigned nr_events = ctx->max_reqs;
111 unsigned long size;
112 int nr_pages;
113
114 /* Compensate for the ring buffer's head/tail overlap entry */
115 nr_events += 2; /* 1 is required, 2 for good luck */
116
117 size = sizeof(struct aio_ring);
118 size += sizeof(struct io_event) * nr_events;
119 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
120
121 if (nr_pages < 0)
122 return -EINVAL;
123
124 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
125
126 info->nr = 0;
127 info->ring_pages = info->internal_pages;
128 if (nr_pages > AIO_RING_PAGES) {
129 info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
130 if (!info->ring_pages)
131 return -ENOMEM;
132 }
133
134 info->mmap_size = nr_pages * PAGE_SIZE;
135 dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
136 down_write(&ctx->mm->mmap_sem);
137 info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size,
138 PROT_READ|PROT_WRITE,
139 MAP_ANONYMOUS|MAP_PRIVATE, 0);
140 if (IS_ERR((void *)info->mmap_base)) {
141 up_write(&ctx->mm->mmap_sem);
142 info->mmap_size = 0;
143 aio_free_ring(ctx);
144 return -EAGAIN;
145 }
146
147 dprintk("mmap address: 0x%08lx\n", info->mmap_base);
148 info->nr_pages = get_user_pages(current, ctx->mm,
149 info->mmap_base, nr_pages,
150 1, 0, info->ring_pages, NULL);
151 up_write(&ctx->mm->mmap_sem);
152
153 if (unlikely(info->nr_pages != nr_pages)) {
154 aio_free_ring(ctx);
155 return -EAGAIN;
156 }
157
158 ctx->user_id = info->mmap_base;
159
160 info->nr = nr_events; /* trusted copy */
161
162 ring = kmap_atomic(info->ring_pages[0]);
163 ring->nr = nr_events; /* user copy */
164 ring->id = ctx->user_id;
165 ring->head = ring->tail = 0;
166 ring->magic = AIO_RING_MAGIC;
167 ring->compat_features = AIO_RING_COMPAT_FEATURES;
168 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
169 ring->header_length = sizeof(struct aio_ring);
170 kunmap_atomic(ring);
171
172 return 0;
173}
174
175
176/* aio_ring_event: returns a pointer to the event at the given index from
177 * kmap_atomic(). Release the pointer with put_aio_ring_event();
178 */
179#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
180#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
181#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
182
183#define aio_ring_event(info, nr) ({ \
184 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
185 struct io_event *__event; \
186 __event = kmap_atomic( \
187 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
188 __event += pos % AIO_EVENTS_PER_PAGE; \
189 __event; \
190})
191
192#define put_aio_ring_event(event) do { \
193 struct io_event *__event = (event); \
194 (void)__event; \
195 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
196} while(0)
197
198static void ctx_rcu_free(struct rcu_head *head)
199{
200 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
201 kmem_cache_free(kioctx_cachep, ctx);
202}
203
204/* __put_ioctx
205 * Called when the last user of an aio context has gone away,
206 * and the struct needs to be freed.
207 */
208static void __put_ioctx(struct kioctx *ctx)
209{
210 unsigned nr_events = ctx->max_reqs;
211 BUG_ON(ctx->reqs_active);
212
213 cancel_delayed_work_sync(&ctx->wq);
214 aio_free_ring(ctx);
215 mmdrop(ctx->mm);
216 ctx->mm = NULL;
217 if (nr_events) {
218 spin_lock(&aio_nr_lock);
219 BUG_ON(aio_nr - nr_events > aio_nr);
220 aio_nr -= nr_events;
221 spin_unlock(&aio_nr_lock);
222 }
223 pr_debug("__put_ioctx: freeing %p\n", ctx);
224 call_rcu(&ctx->rcu_head, ctx_rcu_free);
225}
226
227static inline int try_get_ioctx(struct kioctx *kioctx)
228{
229 return atomic_inc_not_zero(&kioctx->users);
230}
231
232static inline void put_ioctx(struct kioctx *kioctx)
233{
234 BUG_ON(atomic_read(&kioctx->users) <= 0);
235 if (unlikely(atomic_dec_and_test(&kioctx->users)))
236 __put_ioctx(kioctx);
237}
238
239/* ioctx_alloc
240 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
241 */
242static struct kioctx *ioctx_alloc(unsigned nr_events)
243{
244 struct mm_struct *mm;
245 struct kioctx *ctx;
246 int err = -ENOMEM;
247
248 /* Prevent overflows */
249 if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
250 (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
251 pr_debug("ENOMEM: nr_events too high\n");
252 return ERR_PTR(-EINVAL);
253 }
254
255 if (!nr_events || (unsigned long)nr_events > aio_max_nr)
256 return ERR_PTR(-EAGAIN);
257
258 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
259 if (!ctx)
260 return ERR_PTR(-ENOMEM);
261
262 ctx->max_reqs = nr_events;
263 mm = ctx->mm = current->mm;
264 atomic_inc(&mm->mm_count);
265
266 atomic_set(&ctx->users, 2);
267 spin_lock_init(&ctx->ctx_lock);
268 spin_lock_init(&ctx->ring_info.ring_lock);
269 init_waitqueue_head(&ctx->wait);
270
271 INIT_LIST_HEAD(&ctx->active_reqs);
272 INIT_LIST_HEAD(&ctx->run_list);
273 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
274
275 if (aio_setup_ring(ctx) < 0)
276 goto out_freectx;
277
278 /* limit the number of system wide aios */
279 spin_lock(&aio_nr_lock);
280 if (aio_nr + nr_events > aio_max_nr ||
281 aio_nr + nr_events < aio_nr) {
282 spin_unlock(&aio_nr_lock);
283 goto out_cleanup;
284 }
285 aio_nr += ctx->max_reqs;
286 spin_unlock(&aio_nr_lock);
287
288 /* now link into global list. */
289 spin_lock(&mm->ioctx_lock);
290 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
291 spin_unlock(&mm->ioctx_lock);
292
293 dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
294 ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
295 return ctx;
296
297out_cleanup:
298 err = -EAGAIN;
299 aio_free_ring(ctx);
300out_freectx:
301 mmdrop(mm);
302 kmem_cache_free(kioctx_cachep, ctx);
303 dprintk("aio: error allocating ioctx %d\n", err);
304 return ERR_PTR(err);
305}
306
307/* kill_ctx
308 * Cancels all outstanding aio requests on an aio context. Used
309 * when the processes owning a context have all exited to encourage
310 * the rapid destruction of the kioctx.
311 */
312static void kill_ctx(struct kioctx *ctx)
313{
314 int (*cancel)(struct kiocb *, struct io_event *);
315 struct task_struct *tsk = current;
316 DECLARE_WAITQUEUE(wait, tsk);
317 struct io_event res;
318
319 spin_lock_irq(&ctx->ctx_lock);
320 ctx->dead = 1;
321 while (!list_empty(&ctx->active_reqs)) {
322 struct list_head *pos = ctx->active_reqs.next;
323 struct kiocb *iocb = list_kiocb(pos);
324 list_del_init(&iocb->ki_list);
325 cancel = iocb->ki_cancel;
326 kiocbSetCancelled(iocb);
327 if (cancel) {
328 iocb->ki_users++;
329 spin_unlock_irq(&ctx->ctx_lock);
330 cancel(iocb, &res);
331 spin_lock_irq(&ctx->ctx_lock);
332 }
333 }
334
335 if (!ctx->reqs_active)
336 goto out;
337
338 add_wait_queue(&ctx->wait, &wait);
339 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
340 while (ctx->reqs_active) {
341 spin_unlock_irq(&ctx->ctx_lock);
342 io_schedule();
343 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
344 spin_lock_irq(&ctx->ctx_lock);
345 }
346 __set_task_state(tsk, TASK_RUNNING);
347 remove_wait_queue(&ctx->wait, &wait);
348
349out:
350 spin_unlock_irq(&ctx->ctx_lock);
351}
352
353/* wait_on_sync_kiocb:
354 * Waits on the given sync kiocb to complete.
355 */
356ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
357{
358 while (iocb->ki_users) {
359 set_current_state(TASK_UNINTERRUPTIBLE);
360 if (!iocb->ki_users)
361 break;
362 io_schedule();
363 }
364 __set_current_state(TASK_RUNNING);
365 return iocb->ki_user_data;
366}
367EXPORT_SYMBOL(wait_on_sync_kiocb);
368
369/* exit_aio: called when the last user of mm goes away. At this point,
370 * there is no way for any new requests to be submited or any of the
371 * io_* syscalls to be called on the context. However, there may be
372 * outstanding requests which hold references to the context; as they
373 * go away, they will call put_ioctx and release any pinned memory
374 * associated with the request (held via struct page * references).
375 */
376void exit_aio(struct mm_struct *mm)
377{
378 struct kioctx *ctx;
379
380 while (!hlist_empty(&mm->ioctx_list)) {
381 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
382 hlist_del_rcu(&ctx->list);
383
384 kill_ctx(ctx);
385
386 if (1 != atomic_read(&ctx->users))
387 printk(KERN_DEBUG
388 "exit_aio:ioctx still alive: %d %d %d\n",
389 atomic_read(&ctx->users), ctx->dead,
390 ctx->reqs_active);
391 /*
392 * We don't need to bother with munmap() here -
393 * exit_mmap(mm) is coming and it'll unmap everything.
394 * Since aio_free_ring() uses non-zero ->mmap_size
395 * as indicator that it needs to unmap the area,
396 * just set it to 0; aio_free_ring() is the only
397 * place that uses ->mmap_size, so it's safe.
398 * That way we get all munmap done to current->mm -
399 * all other callers have ctx->mm == current->mm.
400 */
401 ctx->ring_info.mmap_size = 0;
402 put_ioctx(ctx);
403 }
404}
405
406/* aio_get_req
407 * Allocate a slot for an aio request. Increments the users count
408 * of the kioctx so that the kioctx stays around until all requests are
409 * complete. Returns NULL if no requests are free.
410 *
411 * Returns with kiocb->users set to 2. The io submit code path holds
412 * an extra reference while submitting the i/o.
413 * This prevents races between the aio code path referencing the
414 * req (after submitting it) and aio_complete() freeing the req.
415 */
416static struct kiocb *__aio_get_req(struct kioctx *ctx)
417{
418 struct kiocb *req = NULL;
419
420 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
421 if (unlikely(!req))
422 return NULL;
423
424 req->ki_flags = 0;
425 req->ki_users = 2;
426 req->ki_key = 0;
427 req->ki_ctx = ctx;
428 req->ki_cancel = NULL;
429 req->ki_retry = NULL;
430 req->ki_dtor = NULL;
431 req->private = NULL;
432 req->ki_iovec = NULL;
433 INIT_LIST_HEAD(&req->ki_run_list);
434 req->ki_eventfd = NULL;
435
436 return req;
437}
438
439/*
440 * struct kiocb's are allocated in batches to reduce the number of
441 * times the ctx lock is acquired and released.
442 */
443#define KIOCB_BATCH_SIZE 32L
444struct kiocb_batch {
445 struct list_head head;
446 long count; /* number of requests left to allocate */
447};
448
449static void kiocb_batch_init(struct kiocb_batch *batch, long total)
450{
451 INIT_LIST_HEAD(&batch->head);
452 batch->count = total;
453}
454
455static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
456{
457 struct kiocb *req, *n;
458
459 if (list_empty(&batch->head))
460 return;
461
462 spin_lock_irq(&ctx->ctx_lock);
463 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
464 list_del(&req->ki_batch);
465 list_del(&req->ki_list);
466 kmem_cache_free(kiocb_cachep, req);
467 ctx->reqs_active--;
468 }
469 if (unlikely(!ctx->reqs_active && ctx->dead))
470 wake_up_all(&ctx->wait);
471 spin_unlock_irq(&ctx->ctx_lock);
472}
473
474/*
475 * Allocate a batch of kiocbs. This avoids taking and dropping the
476 * context lock a lot during setup.
477 */
478static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
479{
480 unsigned short allocated, to_alloc;
481 long avail;
482 bool called_fput = false;
483 struct kiocb *req, *n;
484 struct aio_ring *ring;
485
486 to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
487 for (allocated = 0; allocated < to_alloc; allocated++) {
488 req = __aio_get_req(ctx);
489 if (!req)
490 /* allocation failed, go with what we've got */
491 break;
492 list_add(&req->ki_batch, &batch->head);
493 }
494
495 if (allocated == 0)
496 goto out;
497
498retry:
499 spin_lock_irq(&ctx->ctx_lock);
500 ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
501
502 avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
503 BUG_ON(avail < 0);
504 if (avail == 0 && !called_fput) {
505 /*
506 * Handle a potential starvation case. It is possible that
507 * we hold the last reference on a struct file, causing us
508 * to delay the final fput to non-irq context. In this case,
509 * ctx->reqs_active is artificially high. Calling the fput
510 * routine here may free up a slot in the event completion
511 * ring, allowing this allocation to succeed.
512 */
513 kunmap_atomic(ring);
514 spin_unlock_irq(&ctx->ctx_lock);
515 aio_fput_routine(NULL);
516 called_fput = true;
517 goto retry;
518 }
519
520 if (avail < allocated) {
521 /* Trim back the number of requests. */
522 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
523 list_del(&req->ki_batch);
524 kmem_cache_free(kiocb_cachep, req);
525 if (--allocated <= avail)
526 break;
527 }
528 }
529
530 batch->count -= allocated;
531 list_for_each_entry(req, &batch->head, ki_batch) {
532 list_add(&req->ki_list, &ctx->active_reqs);
533 ctx->reqs_active++;
534 }
535
536 kunmap_atomic(ring);
537 spin_unlock_irq(&ctx->ctx_lock);
538
539out:
540 return allocated;
541}
542
543static inline struct kiocb *aio_get_req(struct kioctx *ctx,
544 struct kiocb_batch *batch)
545{
546 struct kiocb *req;
547
548 if (list_empty(&batch->head))
549 if (kiocb_batch_refill(ctx, batch) == 0)
550 return NULL;
551 req = list_first_entry(&batch->head, struct kiocb, ki_batch);
552 list_del(&req->ki_batch);
553 return req;
554}
555
556static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
557{
558 assert_spin_locked(&ctx->ctx_lock);
559
560 if (req->ki_eventfd != NULL)
561 eventfd_ctx_put(req->ki_eventfd);
562 if (req->ki_dtor)
563 req->ki_dtor(req);
564 if (req->ki_iovec != &req->ki_inline_vec)
565 kfree(req->ki_iovec);
566 kmem_cache_free(kiocb_cachep, req);
567 ctx->reqs_active--;
568
569 if (unlikely(!ctx->reqs_active && ctx->dead))
570 wake_up_all(&ctx->wait);
571}
572
573static void aio_fput_routine(struct work_struct *data)
574{
575 spin_lock_irq(&fput_lock);
576 while (likely(!list_empty(&fput_head))) {
577 struct kiocb *req = list_kiocb(fput_head.next);
578 struct kioctx *ctx = req->ki_ctx;
579
580 list_del(&req->ki_list);
581 spin_unlock_irq(&fput_lock);
582
583 /* Complete the fput(s) */
584 if (req->ki_filp != NULL)
585 fput(req->ki_filp);
586
587 /* Link the iocb into the context's free list */
588 rcu_read_lock();
589 spin_lock_irq(&ctx->ctx_lock);
590 really_put_req(ctx, req);
591 /*
592 * at that point ctx might've been killed, but actual
593 * freeing is RCU'd
594 */
595 spin_unlock_irq(&ctx->ctx_lock);
596 rcu_read_unlock();
597
598 spin_lock_irq(&fput_lock);
599 }
600 spin_unlock_irq(&fput_lock);
601}
602
603/* __aio_put_req
604 * Returns true if this put was the last user of the request.
605 */
606static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
607{
608 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
609 req, atomic_long_read(&req->ki_filp->f_count));
610
611 assert_spin_locked(&ctx->ctx_lock);
612
613 req->ki_users--;
614 BUG_ON(req->ki_users < 0);
615 if (likely(req->ki_users))
616 return 0;
617 list_del(&req->ki_list); /* remove from active_reqs */
618 req->ki_cancel = NULL;
619 req->ki_retry = NULL;
620
621 /*
622 * Try to optimize the aio and eventfd file* puts, by avoiding to
623 * schedule work in case it is not final fput() time. In normal cases,
624 * we would not be holding the last reference to the file*, so
625 * this function will be executed w/out any aio kthread wakeup.
626 */
627 if (unlikely(!fput_atomic(req->ki_filp))) {
628 spin_lock(&fput_lock);
629 list_add(&req->ki_list, &fput_head);
630 spin_unlock(&fput_lock);
631 schedule_work(&fput_work);
632 } else {
633 req->ki_filp = NULL;
634 really_put_req(ctx, req);
635 }
636 return 1;
637}
638
639/* aio_put_req
640 * Returns true if this put was the last user of the kiocb,
641 * false if the request is still in use.
642 */
643int aio_put_req(struct kiocb *req)
644{
645 struct kioctx *ctx = req->ki_ctx;
646 int ret;
647 spin_lock_irq(&ctx->ctx_lock);
648 ret = __aio_put_req(ctx, req);
649 spin_unlock_irq(&ctx->ctx_lock);
650 return ret;
651}
652EXPORT_SYMBOL(aio_put_req);
653
654static struct kioctx *lookup_ioctx(unsigned long ctx_id)
655{
656 struct mm_struct *mm = current->mm;
657 struct kioctx *ctx, *ret = NULL;
658 struct hlist_node *n;
659
660 rcu_read_lock();
661
662 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
663 /*
664 * RCU protects us against accessing freed memory but
665 * we have to be careful not to get a reference when the
666 * reference count already dropped to 0 (ctx->dead test
667 * is unreliable because of races).
668 */
669 if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
670 ret = ctx;
671 break;
672 }
673 }
674
675 rcu_read_unlock();
676 return ret;
677}
678
679/*
680 * Queue up a kiocb to be retried. Assumes that the kiocb
681 * has already been marked as kicked, and places it on
682 * the retry run list for the corresponding ioctx, if it
683 * isn't already queued. Returns 1 if it actually queued
684 * the kiocb (to tell the caller to activate the work
685 * queue to process it), or 0, if it found that it was
686 * already queued.
687 */
688static inline int __queue_kicked_iocb(struct kiocb *iocb)
689{
690 struct kioctx *ctx = iocb->ki_ctx;
691
692 assert_spin_locked(&ctx->ctx_lock);
693
694 if (list_empty(&iocb->ki_run_list)) {
695 list_add_tail(&iocb->ki_run_list,
696 &ctx->run_list);
697 return 1;
698 }
699 return 0;
700}
701
702/* aio_run_iocb
703 * This is the core aio execution routine. It is
704 * invoked both for initial i/o submission and
705 * subsequent retries via the aio_kick_handler.
706 * Expects to be invoked with iocb->ki_ctx->lock
707 * already held. The lock is released and reacquired
708 * as needed during processing.
709 *
710 * Calls the iocb retry method (already setup for the
711 * iocb on initial submission) for operation specific
712 * handling, but takes care of most of common retry
713 * execution details for a given iocb. The retry method
714 * needs to be non-blocking as far as possible, to avoid
715 * holding up other iocbs waiting to be serviced by the
716 * retry kernel thread.
717 *
718 * The trickier parts in this code have to do with
719 * ensuring that only one retry instance is in progress
720 * for a given iocb at any time. Providing that guarantee
721 * simplifies the coding of individual aio operations as
722 * it avoids various potential races.
723 */
724static ssize_t aio_run_iocb(struct kiocb *iocb)
725{
726 struct kioctx *ctx = iocb->ki_ctx;
727 ssize_t (*retry)(struct kiocb *);
728 ssize_t ret;
729
730 if (!(retry = iocb->ki_retry)) {
731 printk("aio_run_iocb: iocb->ki_retry = NULL\n");
732 return 0;
733 }
734
735 /*
736 * We don't want the next retry iteration for this
737 * operation to start until this one has returned and
738 * updated the iocb state. However, wait_queue functions
739 * can trigger a kick_iocb from interrupt context in the
740 * meantime, indicating that data is available for the next
741 * iteration. We want to remember that and enable the
742 * next retry iteration _after_ we are through with
743 * this one.
744 *
745 * So, in order to be able to register a "kick", but
746 * prevent it from being queued now, we clear the kick
747 * flag, but make the kick code *think* that the iocb is
748 * still on the run list until we are actually done.
749 * When we are done with this iteration, we check if
750 * the iocb was kicked in the meantime and if so, queue
751 * it up afresh.
752 */
753
754 kiocbClearKicked(iocb);
755
756 /*
757 * This is so that aio_complete knows it doesn't need to
758 * pull the iocb off the run list (We can't just call
759 * INIT_LIST_HEAD because we don't want a kick_iocb to
760 * queue this on the run list yet)
761 */
762 iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
763 spin_unlock_irq(&ctx->ctx_lock);
764
765 /* Quit retrying if the i/o has been cancelled */
766 if (kiocbIsCancelled(iocb)) {
767 ret = -EINTR;
768 aio_complete(iocb, ret, 0);
769 /* must not access the iocb after this */
770 goto out;
771 }
772
773 /*
774 * Now we are all set to call the retry method in async
775 * context.
776 */
777 ret = retry(iocb);
778
779 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
780 /*
781 * There's no easy way to restart the syscall since other AIO's
782 * may be already running. Just fail this IO with EINTR.
783 */
784 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
785 ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
786 ret = -EINTR;
787 aio_complete(iocb, ret, 0);
788 }
789out:
790 spin_lock_irq(&ctx->ctx_lock);
791
792 if (-EIOCBRETRY == ret) {
793 /*
794 * OK, now that we are done with this iteration
795 * and know that there is more left to go,
796 * this is where we let go so that a subsequent
797 * "kick" can start the next iteration
798 */
799
800 /* will make __queue_kicked_iocb succeed from here on */
801 INIT_LIST_HEAD(&iocb->ki_run_list);
802 /* we must queue the next iteration ourselves, if it
803 * has already been kicked */
804 if (kiocbIsKicked(iocb)) {
805 __queue_kicked_iocb(iocb);
806
807 /*
808 * __queue_kicked_iocb will always return 1 here, because
809 * iocb->ki_run_list is empty at this point so it should
810 * be safe to unconditionally queue the context into the
811 * work queue.
812 */
813 aio_queue_work(ctx);
814 }
815 }
816 return ret;
817}
818
819/*
820 * __aio_run_iocbs:
821 * Process all pending retries queued on the ioctx
822 * run list.
823 * Assumes it is operating within the aio issuer's mm
824 * context.
825 */
826static int __aio_run_iocbs(struct kioctx *ctx)
827{
828 struct kiocb *iocb;
829 struct list_head run_list;
830
831 assert_spin_locked(&ctx->ctx_lock);
832
833 list_replace_init(&ctx->run_list, &run_list);
834 while (!list_empty(&run_list)) {
835 iocb = list_entry(run_list.next, struct kiocb,
836 ki_run_list);
837 list_del(&iocb->ki_run_list);
838 /*
839 * Hold an extra reference while retrying i/o.
840 */
841 iocb->ki_users++; /* grab extra reference */
842 aio_run_iocb(iocb);
843 __aio_put_req(ctx, iocb);
844 }
845 if (!list_empty(&ctx->run_list))
846 return 1;
847 return 0;
848}
849
850static void aio_queue_work(struct kioctx * ctx)
851{
852 unsigned long timeout;
853 /*
854 * if someone is waiting, get the work started right
855 * away, otherwise, use a longer delay
856 */
857 smp_mb();
858 if (waitqueue_active(&ctx->wait))
859 timeout = 1;
860 else
861 timeout = HZ/10;
862 queue_delayed_work(aio_wq, &ctx->wq, timeout);
863}
864
865/*
866 * aio_run_all_iocbs:
867 * Process all pending retries queued on the ioctx
868 * run list, and keep running them until the list
869 * stays empty.
870 * Assumes it is operating within the aio issuer's mm context.
871 */
872static inline void aio_run_all_iocbs(struct kioctx *ctx)
873{
874 spin_lock_irq(&ctx->ctx_lock);
875 while (__aio_run_iocbs(ctx))
876 ;
877 spin_unlock_irq(&ctx->ctx_lock);
878}
879
880/*
881 * aio_kick_handler:
882 * Work queue handler triggered to process pending
883 * retries on an ioctx. Takes on the aio issuer's
884 * mm context before running the iocbs, so that
885 * copy_xxx_user operates on the issuer's address
886 * space.
887 * Run on aiod's context.
888 */
889static void aio_kick_handler(struct work_struct *work)
890{
891 struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
892 mm_segment_t oldfs = get_fs();
893 struct mm_struct *mm;
894 int requeue;
895
896 set_fs(USER_DS);
897 use_mm(ctx->mm);
898 spin_lock_irq(&ctx->ctx_lock);
899 requeue =__aio_run_iocbs(ctx);
900 mm = ctx->mm;
901 spin_unlock_irq(&ctx->ctx_lock);
902 unuse_mm(mm);
903 set_fs(oldfs);
904 /*
905 * we're in a worker thread already; no point using non-zero delay
906 */
907 if (requeue)
908 queue_delayed_work(aio_wq, &ctx->wq, 0);
909}
910
911
912/*
913 * Called by kick_iocb to queue the kiocb for retry
914 * and if required activate the aio work queue to process
915 * it
916 */
917static void try_queue_kicked_iocb(struct kiocb *iocb)
918{
919 struct kioctx *ctx = iocb->ki_ctx;
920 unsigned long flags;
921 int run = 0;
922
923 spin_lock_irqsave(&ctx->ctx_lock, flags);
924 /* set this inside the lock so that we can't race with aio_run_iocb()
925 * testing it and putting the iocb on the run list under the lock */
926 if (!kiocbTryKick(iocb))
927 run = __queue_kicked_iocb(iocb);
928 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
929 if (run)
930 aio_queue_work(ctx);
931}
932
933/*
934 * kick_iocb:
935 * Called typically from a wait queue callback context
936 * to trigger a retry of the iocb.
937 * The retry is usually executed by aio workqueue
938 * threads (See aio_kick_handler).
939 */
940void kick_iocb(struct kiocb *iocb)
941{
942 /* sync iocbs are easy: they can only ever be executing from a
943 * single context. */
944 if (is_sync_kiocb(iocb)) {
945 kiocbSetKicked(iocb);
946 wake_up_process(iocb->ki_obj.tsk);
947 return;
948 }
949
950 try_queue_kicked_iocb(iocb);
951}
952EXPORT_SYMBOL(kick_iocb);
953
954/* aio_complete
955 * Called when the io request on the given iocb is complete.
956 * Returns true if this is the last user of the request. The
957 * only other user of the request can be the cancellation code.
958 */
959int aio_complete(struct kiocb *iocb, long res, long res2)
960{
961 struct kioctx *ctx = iocb->ki_ctx;
962 struct aio_ring_info *info;
963 struct aio_ring *ring;
964 struct io_event *event;
965 unsigned long flags;
966 unsigned long tail;
967 int ret;
968
969 /*
970 * Special case handling for sync iocbs:
971 * - events go directly into the iocb for fast handling
972 * - the sync task with the iocb in its stack holds the single iocb
973 * ref, no other paths have a way to get another ref
974 * - the sync task helpfully left a reference to itself in the iocb
975 */
976 if (is_sync_kiocb(iocb)) {
977 BUG_ON(iocb->ki_users != 1);
978 iocb->ki_user_data = res;
979 iocb->ki_users = 0;
980 wake_up_process(iocb->ki_obj.tsk);
981 return 1;
982 }
983
984 info = &ctx->ring_info;
985
986 /* add a completion event to the ring buffer.
987 * must be done holding ctx->ctx_lock to prevent
988 * other code from messing with the tail
989 * pointer since we might be called from irq
990 * context.
991 */
992 spin_lock_irqsave(&ctx->ctx_lock, flags);
993
994 if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
995 list_del_init(&iocb->ki_run_list);
996
997 /*
998 * cancelled requests don't get events, userland was given one
999 * when the event got cancelled.
1000 */
1001 if (kiocbIsCancelled(iocb))
1002 goto put_rq;
1003
1004 ring = kmap_atomic(info->ring_pages[0]);
1005
1006 tail = info->tail;
1007 event = aio_ring_event(info, tail);
1008 if (++tail >= info->nr)
1009 tail = 0;
1010
1011 event->obj = (u64)(unsigned long)iocb->ki_obj.user;
1012 event->data = iocb->ki_user_data;
1013 event->res = res;
1014 event->res2 = res2;
1015
1016 dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
1017 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
1018 res, res2);
1019
1020 /* after flagging the request as done, we
1021 * must never even look at it again
1022 */
1023 smp_wmb(); /* make event visible before updating tail */
1024
1025 info->tail = tail;
1026 ring->tail = tail;
1027
1028 put_aio_ring_event(event);
1029 kunmap_atomic(ring);
1030
1031 pr_debug("added to ring %p at [%lu]\n", iocb, tail);
1032
1033 /*
1034 * Check if the user asked us to deliver the result through an
1035 * eventfd. The eventfd_signal() function is safe to be called
1036 * from IRQ context.
1037 */
1038 if (iocb->ki_eventfd != NULL)
1039 eventfd_signal(iocb->ki_eventfd, 1);
1040
1041put_rq:
1042 /* everything turned out well, dispose of the aiocb. */
1043 ret = __aio_put_req(ctx, iocb);
1044
1045 /*
1046 * We have to order our ring_info tail store above and test
1047 * of the wait list below outside the wait lock. This is
1048 * like in wake_up_bit() where clearing a bit has to be
1049 * ordered with the unlocked test.
1050 */
1051 smp_mb();
1052
1053 if (waitqueue_active(&ctx->wait))
1054 wake_up(&ctx->wait);
1055
1056 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1057 return ret;
1058}
1059EXPORT_SYMBOL(aio_complete);
1060
1061/* aio_read_evt
1062 * Pull an event off of the ioctx's event ring. Returns the number of
1063 * events fetched (0 or 1 ;-)
1064 * FIXME: make this use cmpxchg.
1065 * TODO: make the ringbuffer user mmap()able (requires FIXME).
1066 */
1067static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1068{
1069 struct aio_ring_info *info = &ioctx->ring_info;
1070 struct aio_ring *ring;
1071 unsigned long head;
1072 int ret = 0;
1073
1074 ring = kmap_atomic(info->ring_pages[0]);
1075 dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1076 (unsigned long)ring->head, (unsigned long)ring->tail,
1077 (unsigned long)ring->nr);
1078
1079 if (ring->head == ring->tail)
1080 goto out;
1081
1082 spin_lock(&info->ring_lock);
1083
1084 head = ring->head % info->nr;
1085 if (head != ring->tail) {
1086 struct io_event *evp = aio_ring_event(info, head);
1087 *ent = *evp;
1088 head = (head + 1) % info->nr;
1089 smp_mb(); /* finish reading the event before updatng the head */
1090 ring->head = head;
1091 ret = 1;
1092 put_aio_ring_event(evp);
1093 }
1094 spin_unlock(&info->ring_lock);
1095
1096out:
1097 kunmap_atomic(ring);
1098 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
1099 (unsigned long)ring->head, (unsigned long)ring->tail);
1100 return ret;
1101}
1102
1103struct aio_timeout {
1104 struct timer_list timer;
1105 int timed_out;
1106 struct task_struct *p;
1107};
1108
1109static void timeout_func(unsigned long data)
1110{
1111 struct aio_timeout *to = (struct aio_timeout *)data;
1112
1113 to->timed_out = 1;
1114 wake_up_process(to->p);
1115}
1116
1117static inline void init_timeout(struct aio_timeout *to)
1118{
1119 setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
1120 to->timed_out = 0;
1121 to->p = current;
1122}
1123
1124static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
1125 const struct timespec *ts)
1126{
1127 to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
1128 if (time_after(to->timer.expires, jiffies))
1129 add_timer(&to->timer);
1130 else
1131 to->timed_out = 1;
1132}
1133
1134static inline void clear_timeout(struct aio_timeout *to)
1135{
1136 del_singleshot_timer_sync(&to->timer);
1137}
1138
1139static int read_events(struct kioctx *ctx,
1140 long min_nr, long nr,
1141 struct io_event __user *event,
1142 struct timespec __user *timeout)
1143{
1144 long start_jiffies = jiffies;
1145 struct task_struct *tsk = current;
1146 DECLARE_WAITQUEUE(wait, tsk);
1147 int ret;
1148 int i = 0;
1149 struct io_event ent;
1150 struct aio_timeout to;
1151 int retry = 0;
1152
1153 /* needed to zero any padding within an entry (there shouldn't be
1154 * any, but C is fun!
1155 */
1156 memset(&ent, 0, sizeof(ent));
1157retry:
1158 ret = 0;
1159 while (likely(i < nr)) {
1160 ret = aio_read_evt(ctx, &ent);
1161 if (unlikely(ret <= 0))
1162 break;
1163
1164 dprintk("read event: %Lx %Lx %Lx %Lx\n",
1165 ent.data, ent.obj, ent.res, ent.res2);
1166
1167 /* Could we split the check in two? */
1168 ret = -EFAULT;
1169 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1170 dprintk("aio: lost an event due to EFAULT.\n");
1171 break;
1172 }
1173 ret = 0;
1174
1175 /* Good, event copied to userland, update counts. */
1176 event ++;
1177 i ++;
1178 }
1179
1180 if (min_nr <= i)
1181 return i;
1182 if (ret)
1183 return ret;
1184
1185 /* End fast path */
1186
1187 /* racey check, but it gets redone */
1188 if (!retry && unlikely(!list_empty(&ctx->run_list))) {
1189 retry = 1;
1190 aio_run_all_iocbs(ctx);
1191 goto retry;
1192 }
1193
1194 init_timeout(&to);
1195 if (timeout) {
1196 struct timespec ts;
1197 ret = -EFAULT;
1198 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1199 goto out;
1200
1201 set_timeout(start_jiffies, &to, &ts);
1202 }
1203
1204 while (likely(i < nr)) {
1205 add_wait_queue_exclusive(&ctx->wait, &wait);
1206 do {
1207 set_task_state(tsk, TASK_INTERRUPTIBLE);
1208 ret = aio_read_evt(ctx, &ent);
1209 if (ret)
1210 break;
1211 if (min_nr <= i)
1212 break;
1213 if (unlikely(ctx->dead)) {
1214 ret = -EINVAL;
1215 break;
1216 }
1217 if (to.timed_out) /* Only check after read evt */
1218 break;
1219 /* Try to only show up in io wait if there are ops
1220 * in flight */
1221 if (ctx->reqs_active)
1222 io_schedule();
1223 else
1224 schedule();
1225 if (signal_pending(tsk)) {
1226 ret = -EINTR;
1227 break;
1228 }
1229 /*ret = aio_read_evt(ctx, &ent);*/
1230 } while (1) ;
1231
1232 set_task_state(tsk, TASK_RUNNING);
1233 remove_wait_queue(&ctx->wait, &wait);
1234
1235 if (unlikely(ret <= 0))
1236 break;
1237
1238 ret = -EFAULT;
1239 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1240 dprintk("aio: lost an event due to EFAULT.\n");
1241 break;
1242 }
1243
1244 /* Good, event copied to userland, update counts. */
1245 event ++;
1246 i ++;
1247 }
1248
1249 if (timeout)
1250 clear_timeout(&to);
1251out:
1252 destroy_timer_on_stack(&to.timer);
1253 return i ? i : ret;
1254}
1255
1256/* Take an ioctx and remove it from the list of ioctx's. Protects
1257 * against races with itself via ->dead.
1258 */
1259static void io_destroy(struct kioctx *ioctx)
1260{
1261 struct mm_struct *mm = current->mm;
1262 int was_dead;
1263
1264 /* delete the entry from the list is someone else hasn't already */
1265 spin_lock(&mm->ioctx_lock);
1266 was_dead = ioctx->dead;
1267 ioctx->dead = 1;
1268 hlist_del_rcu(&ioctx->list);
1269 spin_unlock(&mm->ioctx_lock);
1270
1271 dprintk("aio_release(%p)\n", ioctx);
1272 if (likely(!was_dead))
1273 put_ioctx(ioctx); /* twice for the list */
1274
1275 kill_ctx(ioctx);
1276
1277 /*
1278 * Wake up any waiters. The setting of ctx->dead must be seen
1279 * by other CPUs at this point. Right now, we rely on the
1280 * locking done by the above calls to ensure this consistency.
1281 */
1282 wake_up_all(&ioctx->wait);
1283}
1284
1285/* sys_io_setup:
1286 * Create an aio_context capable of receiving at least nr_events.
1287 * ctxp must not point to an aio_context that already exists, and
1288 * must be initialized to 0 prior to the call. On successful
1289 * creation of the aio_context, *ctxp is filled in with the resulting
1290 * handle. May fail with -EINVAL if *ctxp is not initialized,
1291 * if the specified nr_events exceeds internal limits. May fail
1292 * with -EAGAIN if the specified nr_events exceeds the user's limit
1293 * of available events. May fail with -ENOMEM if insufficient kernel
1294 * resources are available. May fail with -EFAULT if an invalid
1295 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1296 * implemented.
1297 */
1298SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1299{
1300 struct kioctx *ioctx = NULL;
1301 unsigned long ctx;
1302 long ret;
1303
1304 ret = get_user(ctx, ctxp);
1305 if (unlikely(ret))
1306 goto out;
1307
1308 ret = -EINVAL;
1309 if (unlikely(ctx || nr_events == 0)) {
1310 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1311 ctx, nr_events);
1312 goto out;
1313 }
1314
1315 ioctx = ioctx_alloc(nr_events);
1316 ret = PTR_ERR(ioctx);
1317 if (!IS_ERR(ioctx)) {
1318 ret = put_user(ioctx->user_id, ctxp);
1319 if (ret)
1320 io_destroy(ioctx);
1321 put_ioctx(ioctx);
1322 }
1323
1324out:
1325 return ret;
1326}
1327
1328/* sys_io_destroy:
1329 * Destroy the aio_context specified. May cancel any outstanding
1330 * AIOs and block on completion. Will fail with -ENOSYS if not
1331 * implemented. May fail with -EINVAL if the context pointed to
1332 * is invalid.
1333 */
1334SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1335{
1336 struct kioctx *ioctx = lookup_ioctx(ctx);
1337 if (likely(NULL != ioctx)) {
1338 io_destroy(ioctx);
1339 put_ioctx(ioctx);
1340 return 0;
1341 }
1342 pr_debug("EINVAL: io_destroy: invalid context id\n");
1343 return -EINVAL;
1344}
1345
1346static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
1347{
1348 struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
1349
1350 BUG_ON(ret <= 0);
1351
1352 while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
1353 ssize_t this = min((ssize_t)iov->iov_len, ret);
1354 iov->iov_base += this;
1355 iov->iov_len -= this;
1356 iocb->ki_left -= this;
1357 ret -= this;
1358 if (iov->iov_len == 0) {
1359 iocb->ki_cur_seg++;
1360 iov++;
1361 }
1362 }
1363
1364 /* the caller should not have done more io than what fit in
1365 * the remaining iovecs */
1366 BUG_ON(ret > 0 && iocb->ki_left == 0);
1367}
1368
1369static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1370{
1371 struct file *file = iocb->ki_filp;
1372 struct address_space *mapping = file->f_mapping;
1373 struct inode *inode = mapping->host;
1374 ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
1375 unsigned long, loff_t);
1376 ssize_t ret = 0;
1377 unsigned short opcode;
1378
1379 if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
1380 (iocb->ki_opcode == IOCB_CMD_PREAD)) {
1381 rw_op = file->f_op->aio_read;
1382 opcode = IOCB_CMD_PREADV;
1383 } else {
1384 rw_op = file->f_op->aio_write;
1385 opcode = IOCB_CMD_PWRITEV;
1386 }
1387
1388 /* This matches the pread()/pwrite() logic */
1389 if (iocb->ki_pos < 0)
1390 return -EINVAL;
1391
1392 do {
1393 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
1394 iocb->ki_nr_segs - iocb->ki_cur_seg,
1395 iocb->ki_pos);
1396 if (ret > 0)
1397 aio_advance_iovec(iocb, ret);
1398
1399 /* retry all partial writes. retry partial reads as long as its a
1400 * regular file. */
1401 } while (ret > 0 && iocb->ki_left > 0 &&
1402 (opcode == IOCB_CMD_PWRITEV ||
1403 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
1404
1405 /* This means we must have transferred all that we could */
1406 /* No need to retry anymore */
1407 if ((ret == 0) || (iocb->ki_left == 0))
1408 ret = iocb->ki_nbytes - iocb->ki_left;
1409
1410 /* If we managed to write some out we return that, rather than
1411 * the eventual error. */
1412 if (opcode == IOCB_CMD_PWRITEV
1413 && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
1414 && iocb->ki_nbytes - iocb->ki_left)
1415 ret = iocb->ki_nbytes - iocb->ki_left;
1416
1417 return ret;
1418}
1419
1420static ssize_t aio_fdsync(struct kiocb *iocb)
1421{
1422 struct file *file = iocb->ki_filp;
1423 ssize_t ret = -EINVAL;
1424
1425 if (file->f_op->aio_fsync)
1426 ret = file->f_op->aio_fsync(iocb, 1);
1427 return ret;
1428}
1429
1430static ssize_t aio_fsync(struct kiocb *iocb)
1431{
1432 struct file *file = iocb->ki_filp;
1433 ssize_t ret = -EINVAL;
1434
1435 if (file->f_op->aio_fsync)
1436 ret = file->f_op->aio_fsync(iocb, 0);
1437 return ret;
1438}
1439
1440static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1441{
1442 ssize_t ret;
1443
1444#ifdef CONFIG_COMPAT
1445 if (compat)
1446 ret = compat_rw_copy_check_uvector(type,
1447 (struct compat_iovec __user *)kiocb->ki_buf,
1448 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1449 &kiocb->ki_iovec);
1450 else
1451#endif
1452 ret = rw_copy_check_uvector(type,
1453 (struct iovec __user *)kiocb->ki_buf,
1454 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1455 &kiocb->ki_iovec);
1456 if (ret < 0)
1457 goto out;
1458
1459 ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret);
1460 if (ret < 0)
1461 goto out;
1462
1463 kiocb->ki_nr_segs = kiocb->ki_nbytes;
1464 kiocb->ki_cur_seg = 0;
1465 /* ki_nbytes/left now reflect bytes instead of segs */
1466 kiocb->ki_nbytes = ret;
1467 kiocb->ki_left = ret;
1468
1469 ret = 0;
1470out:
1471 return ret;
1472}
1473
1474static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb)
1475{
1476 int bytes;
1477
1478 bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left);
1479 if (bytes < 0)
1480 return bytes;
1481
1482 kiocb->ki_iovec = &kiocb->ki_inline_vec;
1483 kiocb->ki_iovec->iov_base = kiocb->ki_buf;
1484 kiocb->ki_iovec->iov_len = bytes;
1485 kiocb->ki_nr_segs = 1;
1486 kiocb->ki_cur_seg = 0;
1487 return 0;
1488}
1489
1490/*
1491 * aio_setup_iocb:
1492 * Performs the initial checks and aio retry method
1493 * setup for the kiocb at the time of io submission.
1494 */
1495static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1496{
1497 struct file *file = kiocb->ki_filp;
1498 ssize_t ret = 0;
1499
1500 switch (kiocb->ki_opcode) {
1501 case IOCB_CMD_PREAD:
1502 ret = -EBADF;
1503 if (unlikely(!(file->f_mode & FMODE_READ)))
1504 break;
1505 ret = -EFAULT;
1506 if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
1507 kiocb->ki_left)))
1508 break;
1509 ret = aio_setup_single_vector(READ, file, kiocb);
1510 if (ret)
1511 break;
1512 ret = -EINVAL;
1513 if (file->f_op->aio_read)
1514 kiocb->ki_retry = aio_rw_vect_retry;
1515 break;
1516 case IOCB_CMD_PWRITE:
1517 ret = -EBADF;
1518 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1519 break;
1520 ret = -EFAULT;
1521 if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
1522 kiocb->ki_left)))
1523 break;
1524 ret = aio_setup_single_vector(WRITE, file, kiocb);
1525 if (ret)
1526 break;
1527 ret = -EINVAL;
1528 if (file->f_op->aio_write)
1529 kiocb->ki_retry = aio_rw_vect_retry;
1530 break;
1531 case IOCB_CMD_PREADV:
1532 ret = -EBADF;
1533 if (unlikely(!(file->f_mode & FMODE_READ)))
1534 break;
1535 ret = aio_setup_vectored_rw(READ, kiocb, compat);
1536 if (ret)
1537 break;
1538 ret = -EINVAL;
1539 if (file->f_op->aio_read)
1540 kiocb->ki_retry = aio_rw_vect_retry;
1541 break;
1542 case IOCB_CMD_PWRITEV:
1543 ret = -EBADF;
1544 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1545 break;
1546 ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
1547 if (ret)
1548 break;
1549 ret = -EINVAL;
1550 if (file->f_op->aio_write)
1551 kiocb->ki_retry = aio_rw_vect_retry;
1552 break;
1553 case IOCB_CMD_FDSYNC:
1554 ret = -EINVAL;
1555 if (file->f_op->aio_fsync)
1556 kiocb->ki_retry = aio_fdsync;
1557 break;
1558 case IOCB_CMD_FSYNC:
1559 ret = -EINVAL;
1560 if (file->f_op->aio_fsync)
1561 kiocb->ki_retry = aio_fsync;
1562 break;
1563 default:
1564 dprintk("EINVAL: io_submit: no operation provided\n");
1565 ret = -EINVAL;
1566 }
1567
1568 if (!kiocb->ki_retry)
1569 return ret;
1570
1571 return 0;
1572}
1573
1574static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1575 struct iocb *iocb, struct kiocb_batch *batch,
1576 bool compat)
1577{
1578 struct kiocb *req;
1579 struct file *file;
1580 ssize_t ret;
1581
1582 /* enforce forwards compatibility on users */
1583 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1584 pr_debug("EINVAL: io_submit: reserve field set\n");
1585 return -EINVAL;
1586 }
1587
1588 /* prevent overflows */
1589 if (unlikely(
1590 (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1591 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1592 ((ssize_t)iocb->aio_nbytes < 0)
1593 )) {
1594 pr_debug("EINVAL: io_submit: overflow check\n");
1595 return -EINVAL;
1596 }
1597
1598 file = fget(iocb->aio_fildes);
1599 if (unlikely(!file))
1600 return -EBADF;
1601
1602 req = aio_get_req(ctx, batch); /* returns with 2 references to req */
1603 if (unlikely(!req)) {
1604 fput(file);
1605 return -EAGAIN;
1606 }
1607 req->ki_filp = file;
1608 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1609 /*
1610 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1611 * instance of the file* now. The file descriptor must be
1612 * an eventfd() fd, and will be signaled for each completed
1613 * event using the eventfd_signal() function.
1614 */
1615 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1616 if (IS_ERR(req->ki_eventfd)) {
1617 ret = PTR_ERR(req->ki_eventfd);
1618 req->ki_eventfd = NULL;
1619 goto out_put_req;
1620 }
1621 }
1622
1623 ret = put_user(req->ki_key, &user_iocb->aio_key);
1624 if (unlikely(ret)) {
1625 dprintk("EFAULT: aio_key\n");
1626 goto out_put_req;
1627 }
1628
1629 req->ki_obj.user = user_iocb;
1630 req->ki_user_data = iocb->aio_data;
1631 req->ki_pos = iocb->aio_offset;
1632
1633 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1634 req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1635 req->ki_opcode = iocb->aio_lio_opcode;
1636
1637 ret = aio_setup_iocb(req, compat);
1638
1639 if (ret)
1640 goto out_put_req;
1641
1642 spin_lock_irq(&ctx->ctx_lock);
1643 /*
1644 * We could have raced with io_destroy() and are currently holding a
1645 * reference to ctx which should be destroyed. We cannot submit IO
1646 * since ctx gets freed as soon as io_submit() puts its reference. The
1647 * check here is reliable: io_destroy() sets ctx->dead before waiting
1648 * for outstanding IO and the barrier between these two is realized by
1649 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we
1650 * increment ctx->reqs_active before checking for ctx->dead and the
1651 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
1652 * don't see ctx->dead set here, io_destroy() waits for our IO to
1653 * finish.
1654 */
1655 if (ctx->dead) {
1656 spin_unlock_irq(&ctx->ctx_lock);
1657 ret = -EINVAL;
1658 goto out_put_req;
1659 }
1660 aio_run_iocb(req);
1661 if (!list_empty(&ctx->run_list)) {
1662 /* drain the run list */
1663 while (__aio_run_iocbs(ctx))
1664 ;
1665 }
1666 spin_unlock_irq(&ctx->ctx_lock);
1667
1668 aio_put_req(req); /* drop extra ref to req */
1669 return 0;
1670
1671out_put_req:
1672 aio_put_req(req); /* drop extra ref to req */
1673 aio_put_req(req); /* drop i/o ref to req */
1674 return ret;
1675}
1676
1677long do_io_submit(aio_context_t ctx_id, long nr,
1678 struct iocb __user *__user *iocbpp, bool compat)
1679{
1680 struct kioctx *ctx;
1681 long ret = 0;
1682 int i = 0;
1683 struct blk_plug plug;
1684 struct kiocb_batch batch;
1685
1686 if (unlikely(nr < 0))
1687 return -EINVAL;
1688
1689 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1690 nr = LONG_MAX/sizeof(*iocbpp);
1691
1692 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1693 return -EFAULT;
1694
1695 ctx = lookup_ioctx(ctx_id);
1696 if (unlikely(!ctx)) {
1697 pr_debug("EINVAL: io_submit: invalid context id\n");
1698 return -EINVAL;
1699 }
1700
1701 kiocb_batch_init(&batch, nr);
1702
1703 blk_start_plug(&plug);
1704
1705 /*
1706 * AKPM: should this return a partial result if some of the IOs were
1707 * successfully submitted?
1708 */
1709 for (i=0; i<nr; i++) {
1710 struct iocb __user *user_iocb;
1711 struct iocb tmp;
1712
1713 if (unlikely(__get_user(user_iocb, iocbpp + i))) {
1714 ret = -EFAULT;
1715 break;
1716 }
1717
1718 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
1719 ret = -EFAULT;
1720 break;
1721 }
1722
1723 ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat);
1724 if (ret)
1725 break;
1726 }
1727 blk_finish_plug(&plug);
1728
1729 kiocb_batch_free(ctx, &batch);
1730 put_ioctx(ctx);
1731 return i ? i : ret;
1732}
1733
1734/* sys_io_submit:
1735 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1736 * the number of iocbs queued. May return -EINVAL if the aio_context
1737 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1738 * *iocbpp[0] is not properly initialized, if the operation specified
1739 * is invalid for the file descriptor in the iocb. May fail with
1740 * -EFAULT if any of the data structures point to invalid data. May
1741 * fail with -EBADF if the file descriptor specified in the first
1742 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1743 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1744 * fail with -ENOSYS if not implemented.
1745 */
1746SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1747 struct iocb __user * __user *, iocbpp)
1748{
1749 return do_io_submit(ctx_id, nr, iocbpp, 0);
1750}
1751
1752/* lookup_kiocb
1753 * Finds a given iocb for cancellation.
1754 */
1755static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1756 u32 key)
1757{
1758 struct list_head *pos;
1759
1760 assert_spin_locked(&ctx->ctx_lock);
1761
1762 /* TODO: use a hash or array, this sucks. */
1763 list_for_each(pos, &ctx->active_reqs) {
1764 struct kiocb *kiocb = list_kiocb(pos);
1765 if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
1766 return kiocb;
1767 }
1768 return NULL;
1769}
1770
1771/* sys_io_cancel:
1772 * Attempts to cancel an iocb previously passed to io_submit. If
1773 * the operation is successfully cancelled, the resulting event is
1774 * copied into the memory pointed to by result without being placed
1775 * into the completion queue and 0 is returned. May fail with
1776 * -EFAULT if any of the data structures pointed to are invalid.
1777 * May fail with -EINVAL if aio_context specified by ctx_id is
1778 * invalid. May fail with -EAGAIN if the iocb specified was not
1779 * cancelled. Will fail with -ENOSYS if not implemented.
1780 */
1781SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1782 struct io_event __user *, result)
1783{
1784 int (*cancel)(struct kiocb *iocb, struct io_event *res);
1785 struct kioctx *ctx;
1786 struct kiocb *kiocb;
1787 u32 key;
1788 int ret;
1789
1790 ret = get_user(key, &iocb->aio_key);
1791 if (unlikely(ret))
1792 return -EFAULT;
1793
1794 ctx = lookup_ioctx(ctx_id);
1795 if (unlikely(!ctx))
1796 return -EINVAL;
1797
1798 spin_lock_irq(&ctx->ctx_lock);
1799 ret = -EAGAIN;
1800 kiocb = lookup_kiocb(ctx, iocb, key);
1801 if (kiocb && kiocb->ki_cancel) {
1802 cancel = kiocb->ki_cancel;
1803 kiocb->ki_users ++;
1804 kiocbSetCancelled(kiocb);
1805 } else
1806 cancel = NULL;
1807 spin_unlock_irq(&ctx->ctx_lock);
1808
1809 if (NULL != cancel) {
1810 struct io_event tmp;
1811 pr_debug("calling cancel\n");
1812 memset(&tmp, 0, sizeof(tmp));
1813 tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
1814 tmp.data = kiocb->ki_user_data;
1815 ret = cancel(kiocb, &tmp);
1816 if (!ret) {
1817 /* Cancellation succeeded -- copy the result
1818 * into the user's buffer.
1819 */
1820 if (copy_to_user(result, &tmp, sizeof(tmp)))
1821 ret = -EFAULT;
1822 }
1823 } else
1824 ret = -EINVAL;
1825
1826 put_ioctx(ctx);
1827
1828 return ret;
1829}
1830
1831/* io_getevents:
1832 * Attempts to read at least min_nr events and up to nr events from
1833 * the completion queue for the aio_context specified by ctx_id. If
1834 * it succeeds, the number of read events is returned. May fail with
1835 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1836 * out of range, if timeout is out of range. May fail with -EFAULT
1837 * if any of the memory specified is invalid. May return 0 or
1838 * < min_nr if the timeout specified by timeout has elapsed
1839 * before sufficient events are available, where timeout == NULL
1840 * specifies an infinite timeout. Note that the timeout pointed to by
1841 * timeout is relative and will be updated if not NULL and the
1842 * operation blocks. Will fail with -ENOSYS if not implemented.
1843 */
1844SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1845 long, min_nr,
1846 long, nr,
1847 struct io_event __user *, events,
1848 struct timespec __user *, timeout)
1849{
1850 struct kioctx *ioctx = lookup_ioctx(ctx_id);
1851 long ret = -EINVAL;
1852
1853 if (likely(ioctx)) {
1854 if (likely(min_nr <= nr && min_nr >= 0))
1855 ret = read_events(ioctx, min_nr, nr, events, timeout);
1856 put_ioctx(ioctx);
1857 }
1858
1859 asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
1860 return ret;
1861}