Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/nospec.h>
9#include <linux/hugetlb.h>
10#include <linux/compat.h>
11#include <linux/io_uring.h>
12
13#include <uapi/linux/io_uring.h>
14
15#include "io_uring.h"
16#include "openclose.h"
17#include "rsrc.h"
18
19struct io_rsrc_update {
20 struct file *file;
21 u64 arg;
22 u32 nr_args;
23 u32 offset;
24};
25
26static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
27 struct io_mapped_ubuf **pimu,
28 struct page **last_hpage);
29
30#define IO_RSRC_REF_BATCH 100
31
32/* only define max */
33#define IORING_MAX_FIXED_FILES (1U << 20)
34#define IORING_MAX_REG_BUFFERS (1U << 14)
35
36void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
37 __must_hold(&ctx->uring_lock)
38{
39 if (ctx->rsrc_cached_refs) {
40 io_rsrc_put_node(ctx->rsrc_node, ctx->rsrc_cached_refs);
41 ctx->rsrc_cached_refs = 0;
42 }
43}
44
45int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
46{
47 unsigned long page_limit, cur_pages, new_pages;
48
49 if (!nr_pages)
50 return 0;
51
52 /* Don't allow more pages than we can safely lock */
53 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
54
55 cur_pages = atomic_long_read(&user->locked_vm);
56 do {
57 new_pages = cur_pages + nr_pages;
58 if (new_pages > page_limit)
59 return -ENOMEM;
60 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
61 &cur_pages, new_pages));
62 return 0;
63}
64
65static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
66{
67 if (ctx->user)
68 __io_unaccount_mem(ctx->user, nr_pages);
69
70 if (ctx->mm_account)
71 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
72}
73
74static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
75{
76 int ret;
77
78 if (ctx->user) {
79 ret = __io_account_mem(ctx->user, nr_pages);
80 if (ret)
81 return ret;
82 }
83
84 if (ctx->mm_account)
85 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
86
87 return 0;
88}
89
90static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
91 void __user *arg, unsigned index)
92{
93 struct iovec __user *src;
94
95#ifdef CONFIG_COMPAT
96 if (ctx->compat) {
97 struct compat_iovec __user *ciovs;
98 struct compat_iovec ciov;
99
100 ciovs = (struct compat_iovec __user *) arg;
101 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
102 return -EFAULT;
103
104 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
105 dst->iov_len = ciov.iov_len;
106 return 0;
107 }
108#endif
109 src = (struct iovec __user *) arg;
110 if (copy_from_user(dst, &src[index], sizeof(*dst)))
111 return -EFAULT;
112 return 0;
113}
114
115static int io_buffer_validate(struct iovec *iov)
116{
117 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
118
119 /*
120 * Don't impose further limits on the size and buffer
121 * constraints here, we'll -EINVAL later when IO is
122 * submitted if they are wrong.
123 */
124 if (!iov->iov_base)
125 return iov->iov_len ? -EFAULT : 0;
126 if (!iov->iov_len)
127 return -EFAULT;
128
129 /* arbitrary limit, but we need something */
130 if (iov->iov_len > SZ_1G)
131 return -EFAULT;
132
133 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
134 return -EOVERFLOW;
135
136 return 0;
137}
138
139static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
140{
141 struct io_mapped_ubuf *imu = *slot;
142 unsigned int i;
143
144 if (imu != ctx->dummy_ubuf) {
145 for (i = 0; i < imu->nr_bvecs; i++)
146 unpin_user_page(imu->bvec[i].bv_page);
147 if (imu->acct_pages)
148 io_unaccount_mem(ctx, imu->acct_pages);
149 kvfree(imu);
150 }
151 *slot = NULL;
152}
153
154void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
155 __must_hold(&ctx->uring_lock)
156{
157 ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH;
158 percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH);
159}
160
161static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
162{
163 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
164 struct io_ring_ctx *ctx = rsrc_data->ctx;
165 struct io_rsrc_put *prsrc, *tmp;
166
167 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
168 list_del(&prsrc->list);
169
170 if (prsrc->tag) {
171 if (ctx->flags & IORING_SETUP_IOPOLL) {
172 mutex_lock(&ctx->uring_lock);
173 io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
174 mutex_unlock(&ctx->uring_lock);
175 } else {
176 io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
177 }
178 }
179
180 rsrc_data->do_put(ctx, prsrc);
181 kfree(prsrc);
182 }
183
184 io_rsrc_node_destroy(ref_node);
185 if (atomic_dec_and_test(&rsrc_data->refs))
186 complete(&rsrc_data->done);
187}
188
189void io_rsrc_put_work(struct work_struct *work)
190{
191 struct io_ring_ctx *ctx;
192 struct llist_node *node;
193
194 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
195 node = llist_del_all(&ctx->rsrc_put_llist);
196
197 while (node) {
198 struct io_rsrc_node *ref_node;
199 struct llist_node *next = node->next;
200
201 ref_node = llist_entry(node, struct io_rsrc_node, llist);
202 __io_rsrc_put_work(ref_node);
203 node = next;
204 }
205}
206
207void io_rsrc_put_tw(struct callback_head *cb)
208{
209 struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
210 rsrc_put_tw);
211
212 io_rsrc_put_work(&ctx->rsrc_put_work.work);
213}
214
215void io_wait_rsrc_data(struct io_rsrc_data *data)
216{
217 if (data && !atomic_dec_and_test(&data->refs))
218 wait_for_completion(&data->done);
219}
220
221void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
222{
223 percpu_ref_exit(&ref_node->refs);
224 kfree(ref_node);
225}
226
227static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
228{
229 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
230 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
231 unsigned long flags;
232 bool first_add = false;
233 unsigned long delay = HZ;
234
235 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
236 node->done = true;
237
238 /* if we are mid-quiesce then do not delay */
239 if (node->rsrc_data->quiesce)
240 delay = 0;
241
242 while (!list_empty(&ctx->rsrc_ref_list)) {
243 node = list_first_entry(&ctx->rsrc_ref_list,
244 struct io_rsrc_node, node);
245 /* recycle ref nodes in order */
246 if (!node->done)
247 break;
248 list_del(&node->node);
249 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
250 }
251 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
252
253 if (!first_add)
254 return;
255
256 if (ctx->submitter_task) {
257 if (!task_work_add(ctx->submitter_task, &ctx->rsrc_put_tw,
258 ctx->notify_method))
259 return;
260 }
261 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
262}
263
264static struct io_rsrc_node *io_rsrc_node_alloc(void)
265{
266 struct io_rsrc_node *ref_node;
267
268 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
269 if (!ref_node)
270 return NULL;
271
272 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
273 0, GFP_KERNEL)) {
274 kfree(ref_node);
275 return NULL;
276 }
277 INIT_LIST_HEAD(&ref_node->node);
278 INIT_LIST_HEAD(&ref_node->rsrc_list);
279 ref_node->done = false;
280 return ref_node;
281}
282
283void io_rsrc_node_switch(struct io_ring_ctx *ctx,
284 struct io_rsrc_data *data_to_kill)
285 __must_hold(&ctx->uring_lock)
286{
287 WARN_ON_ONCE(!ctx->rsrc_backup_node);
288 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
289
290 io_rsrc_refs_drop(ctx);
291
292 if (data_to_kill) {
293 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
294
295 rsrc_node->rsrc_data = data_to_kill;
296 spin_lock_irq(&ctx->rsrc_ref_lock);
297 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
298 spin_unlock_irq(&ctx->rsrc_ref_lock);
299
300 atomic_inc(&data_to_kill->refs);
301 percpu_ref_kill(&rsrc_node->refs);
302 ctx->rsrc_node = NULL;
303 }
304
305 if (!ctx->rsrc_node) {
306 ctx->rsrc_node = ctx->rsrc_backup_node;
307 ctx->rsrc_backup_node = NULL;
308 }
309}
310
311int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
312{
313 if (ctx->rsrc_backup_node)
314 return 0;
315 ctx->rsrc_backup_node = io_rsrc_node_alloc();
316 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
317}
318
319__cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
320 struct io_ring_ctx *ctx)
321{
322 int ret;
323
324 /* As we may drop ->uring_lock, other task may have started quiesce */
325 if (data->quiesce)
326 return -ENXIO;
327 ret = io_rsrc_node_switch_start(ctx);
328 if (ret)
329 return ret;
330 io_rsrc_node_switch(ctx, data);
331
332 /* kill initial ref, already quiesced if zero */
333 if (atomic_dec_and_test(&data->refs))
334 return 0;
335
336 data->quiesce = true;
337 mutex_unlock(&ctx->uring_lock);
338 do {
339 ret = io_run_task_work_sig(ctx);
340 if (ret < 0) {
341 atomic_inc(&data->refs);
342 /* wait for all works potentially completing data->done */
343 flush_delayed_work(&ctx->rsrc_put_work);
344 reinit_completion(&data->done);
345 mutex_lock(&ctx->uring_lock);
346 break;
347 }
348
349 flush_delayed_work(&ctx->rsrc_put_work);
350 ret = wait_for_completion_interruptible(&data->done);
351 if (!ret) {
352 mutex_lock(&ctx->uring_lock);
353 if (atomic_read(&data->refs) <= 0)
354 break;
355 /*
356 * it has been revived by another thread while
357 * we were unlocked
358 */
359 mutex_unlock(&ctx->uring_lock);
360 }
361 } while (1);
362 data->quiesce = false;
363
364 return ret;
365}
366
367static void io_free_page_table(void **table, size_t size)
368{
369 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
370
371 for (i = 0; i < nr_tables; i++)
372 kfree(table[i]);
373 kfree(table);
374}
375
376static void io_rsrc_data_free(struct io_rsrc_data *data)
377{
378 size_t size = data->nr * sizeof(data->tags[0][0]);
379
380 if (data->tags)
381 io_free_page_table((void **)data->tags, size);
382 kfree(data);
383}
384
385static __cold void **io_alloc_page_table(size_t size)
386{
387 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
388 size_t init_size = size;
389 void **table;
390
391 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
392 if (!table)
393 return NULL;
394
395 for (i = 0; i < nr_tables; i++) {
396 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
397
398 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
399 if (!table[i]) {
400 io_free_page_table(table, init_size);
401 return NULL;
402 }
403 size -= this_size;
404 }
405 return table;
406}
407
408__cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
409 rsrc_put_fn *do_put, u64 __user *utags,
410 unsigned nr, struct io_rsrc_data **pdata)
411{
412 struct io_rsrc_data *data;
413 int ret = -ENOMEM;
414 unsigned i;
415
416 data = kzalloc(sizeof(*data), GFP_KERNEL);
417 if (!data)
418 return -ENOMEM;
419 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
420 if (!data->tags) {
421 kfree(data);
422 return -ENOMEM;
423 }
424
425 data->nr = nr;
426 data->ctx = ctx;
427 data->do_put = do_put;
428 if (utags) {
429 ret = -EFAULT;
430 for (i = 0; i < nr; i++) {
431 u64 *tag_slot = io_get_tag_slot(data, i);
432
433 if (copy_from_user(tag_slot, &utags[i],
434 sizeof(*tag_slot)))
435 goto fail;
436 }
437 }
438
439 atomic_set(&data->refs, 1);
440 init_completion(&data->done);
441 *pdata = data;
442 return 0;
443fail:
444 io_rsrc_data_free(data);
445 return ret;
446}
447
448static int __io_sqe_files_update(struct io_ring_ctx *ctx,
449 struct io_uring_rsrc_update2 *up,
450 unsigned nr_args)
451{
452 u64 __user *tags = u64_to_user_ptr(up->tags);
453 __s32 __user *fds = u64_to_user_ptr(up->data);
454 struct io_rsrc_data *data = ctx->file_data;
455 struct io_fixed_file *file_slot;
456 struct file *file;
457 int fd, i, err = 0;
458 unsigned int done;
459 bool needs_switch = false;
460
461 if (!ctx->file_data)
462 return -ENXIO;
463 if (up->offset + nr_args > ctx->nr_user_files)
464 return -EINVAL;
465
466 for (done = 0; done < nr_args; done++) {
467 u64 tag = 0;
468
469 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
470 copy_from_user(&fd, &fds[done], sizeof(fd))) {
471 err = -EFAULT;
472 break;
473 }
474 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
475 err = -EINVAL;
476 break;
477 }
478 if (fd == IORING_REGISTER_FILES_SKIP)
479 continue;
480
481 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
482 file_slot = io_fixed_file_slot(&ctx->file_table, i);
483
484 if (file_slot->file_ptr) {
485 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
486 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
487 if (err)
488 break;
489 file_slot->file_ptr = 0;
490 io_file_bitmap_clear(&ctx->file_table, i);
491 needs_switch = true;
492 }
493 if (fd != -1) {
494 file = fget(fd);
495 if (!file) {
496 err = -EBADF;
497 break;
498 }
499 /*
500 * Don't allow io_uring instances to be registered. If
501 * UNIX isn't enabled, then this causes a reference
502 * cycle and this instance can never get freed. If UNIX
503 * is enabled we'll handle it just fine, but there's
504 * still no point in allowing a ring fd as it doesn't
505 * support regular read/write anyway.
506 */
507 if (io_is_uring_fops(file)) {
508 fput(file);
509 err = -EBADF;
510 break;
511 }
512 err = io_scm_file_account(ctx, file);
513 if (err) {
514 fput(file);
515 break;
516 }
517 *io_get_tag_slot(data, i) = tag;
518 io_fixed_file_set(file_slot, file);
519 io_file_bitmap_set(&ctx->file_table, i);
520 }
521 }
522
523 if (needs_switch)
524 io_rsrc_node_switch(ctx, data);
525 return done ? done : err;
526}
527
528static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
529 struct io_uring_rsrc_update2 *up,
530 unsigned int nr_args)
531{
532 u64 __user *tags = u64_to_user_ptr(up->tags);
533 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
534 struct page *last_hpage = NULL;
535 bool needs_switch = false;
536 __u32 done;
537 int i, err;
538
539 if (!ctx->buf_data)
540 return -ENXIO;
541 if (up->offset + nr_args > ctx->nr_user_bufs)
542 return -EINVAL;
543
544 for (done = 0; done < nr_args; done++) {
545 struct io_mapped_ubuf *imu;
546 int offset = up->offset + done;
547 u64 tag = 0;
548
549 err = io_copy_iov(ctx, &iov, iovs, done);
550 if (err)
551 break;
552 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
553 err = -EFAULT;
554 break;
555 }
556 err = io_buffer_validate(&iov);
557 if (err)
558 break;
559 if (!iov.iov_base && tag) {
560 err = -EINVAL;
561 break;
562 }
563 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
564 if (err)
565 break;
566
567 i = array_index_nospec(offset, ctx->nr_user_bufs);
568 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
569 err = io_queue_rsrc_removal(ctx->buf_data, i,
570 ctx->rsrc_node, ctx->user_bufs[i]);
571 if (unlikely(err)) {
572 io_buffer_unmap(ctx, &imu);
573 break;
574 }
575 ctx->user_bufs[i] = ctx->dummy_ubuf;
576 needs_switch = true;
577 }
578
579 ctx->user_bufs[i] = imu;
580 *io_get_tag_slot(ctx->buf_data, offset) = tag;
581 }
582
583 if (needs_switch)
584 io_rsrc_node_switch(ctx, ctx->buf_data);
585 return done ? done : err;
586}
587
588static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
589 struct io_uring_rsrc_update2 *up,
590 unsigned nr_args)
591{
592 __u32 tmp;
593 int err;
594
595 if (check_add_overflow(up->offset, nr_args, &tmp))
596 return -EOVERFLOW;
597 err = io_rsrc_node_switch_start(ctx);
598 if (err)
599 return err;
600
601 switch (type) {
602 case IORING_RSRC_FILE:
603 return __io_sqe_files_update(ctx, up, nr_args);
604 case IORING_RSRC_BUFFER:
605 return __io_sqe_buffers_update(ctx, up, nr_args);
606 }
607 return -EINVAL;
608}
609
610int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
611 unsigned nr_args)
612{
613 struct io_uring_rsrc_update2 up;
614
615 if (!nr_args)
616 return -EINVAL;
617 memset(&up, 0, sizeof(up));
618 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
619 return -EFAULT;
620 if (up.resv || up.resv2)
621 return -EINVAL;
622 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
623}
624
625int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
626 unsigned size, unsigned type)
627{
628 struct io_uring_rsrc_update2 up;
629
630 if (size != sizeof(up))
631 return -EINVAL;
632 if (copy_from_user(&up, arg, sizeof(up)))
633 return -EFAULT;
634 if (!up.nr || up.resv || up.resv2)
635 return -EINVAL;
636 return __io_register_rsrc_update(ctx, type, &up, up.nr);
637}
638
639__cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
640 unsigned int size, unsigned int type)
641{
642 struct io_uring_rsrc_register rr;
643
644 /* keep it extendible */
645 if (size != sizeof(rr))
646 return -EINVAL;
647
648 memset(&rr, 0, sizeof(rr));
649 if (copy_from_user(&rr, arg, size))
650 return -EFAULT;
651 if (!rr.nr || rr.resv2)
652 return -EINVAL;
653 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
654 return -EINVAL;
655
656 switch (type) {
657 case IORING_RSRC_FILE:
658 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
659 break;
660 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
661 rr.nr, u64_to_user_ptr(rr.tags));
662 case IORING_RSRC_BUFFER:
663 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
664 break;
665 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
666 rr.nr, u64_to_user_ptr(rr.tags));
667 }
668 return -EINVAL;
669}
670
671int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
672{
673 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
674
675 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
676 return -EINVAL;
677 if (sqe->rw_flags || sqe->splice_fd_in)
678 return -EINVAL;
679
680 up->offset = READ_ONCE(sqe->off);
681 up->nr_args = READ_ONCE(sqe->len);
682 if (!up->nr_args)
683 return -EINVAL;
684 up->arg = READ_ONCE(sqe->addr);
685 return 0;
686}
687
688static int io_files_update_with_index_alloc(struct io_kiocb *req,
689 unsigned int issue_flags)
690{
691 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
692 __s32 __user *fds = u64_to_user_ptr(up->arg);
693 unsigned int done;
694 struct file *file;
695 int ret, fd;
696
697 if (!req->ctx->file_data)
698 return -ENXIO;
699
700 for (done = 0; done < up->nr_args; done++) {
701 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
702 ret = -EFAULT;
703 break;
704 }
705
706 file = fget(fd);
707 if (!file) {
708 ret = -EBADF;
709 break;
710 }
711 ret = io_fixed_fd_install(req, issue_flags, file,
712 IORING_FILE_INDEX_ALLOC);
713 if (ret < 0)
714 break;
715 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
716 __io_close_fixed(req->ctx, issue_flags, ret);
717 ret = -EFAULT;
718 break;
719 }
720 }
721
722 if (done)
723 return done;
724 return ret;
725}
726
727int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
728{
729 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
730 struct io_ring_ctx *ctx = req->ctx;
731 struct io_uring_rsrc_update2 up2;
732 int ret;
733
734 up2.offset = up->offset;
735 up2.data = up->arg;
736 up2.nr = 0;
737 up2.tags = 0;
738 up2.resv = 0;
739 up2.resv2 = 0;
740
741 if (up->offset == IORING_FILE_INDEX_ALLOC) {
742 ret = io_files_update_with_index_alloc(req, issue_flags);
743 } else {
744 io_ring_submit_lock(ctx, issue_flags);
745 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
746 &up2, up->nr_args);
747 io_ring_submit_unlock(ctx, issue_flags);
748 }
749
750 if (ret < 0)
751 req_set_fail(req);
752 io_req_set_res(req, ret, 0);
753 return IOU_OK;
754}
755
756int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
757 struct io_rsrc_node *node, void *rsrc)
758{
759 u64 *tag_slot = io_get_tag_slot(data, idx);
760 struct io_rsrc_put *prsrc;
761
762 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
763 if (!prsrc)
764 return -ENOMEM;
765
766 prsrc->tag = *tag_slot;
767 *tag_slot = 0;
768 prsrc->rsrc = rsrc;
769 list_add(&prsrc->list, &node->rsrc_list);
770 return 0;
771}
772
773void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
774{
775 int i;
776
777 for (i = 0; i < ctx->nr_user_files; i++) {
778 struct file *file = io_file_from_index(&ctx->file_table, i);
779
780 /* skip scm accounted files, they'll be freed by ->ring_sock */
781 if (!file || io_file_need_scm(file))
782 continue;
783 io_file_bitmap_clear(&ctx->file_table, i);
784 fput(file);
785 }
786
787#if defined(CONFIG_UNIX)
788 if (ctx->ring_sock) {
789 struct sock *sock = ctx->ring_sock->sk;
790 struct sk_buff *skb;
791
792 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
793 kfree_skb(skb);
794 }
795#endif
796 io_free_file_tables(&ctx->file_table);
797 io_rsrc_data_free(ctx->file_data);
798 ctx->file_data = NULL;
799 ctx->nr_user_files = 0;
800}
801
802int io_sqe_files_unregister(struct io_ring_ctx *ctx)
803{
804 unsigned nr = ctx->nr_user_files;
805 int ret;
806
807 if (!ctx->file_data)
808 return -ENXIO;
809
810 /*
811 * Quiesce may unlock ->uring_lock, and while it's not held
812 * prevent new requests using the table.
813 */
814 ctx->nr_user_files = 0;
815 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
816 ctx->nr_user_files = nr;
817 if (!ret)
818 __io_sqe_files_unregister(ctx);
819 return ret;
820}
821
822/*
823 * Ensure the UNIX gc is aware of our file set, so we are certain that
824 * the io_uring can be safely unregistered on process exit, even if we have
825 * loops in the file referencing. We account only files that can hold other
826 * files because otherwise they can't form a loop and so are not interesting
827 * for GC.
828 */
829int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
830{
831#if defined(CONFIG_UNIX)
832 struct sock *sk = ctx->ring_sock->sk;
833 struct sk_buff_head *head = &sk->sk_receive_queue;
834 struct scm_fp_list *fpl;
835 struct sk_buff *skb;
836
837 if (likely(!io_file_need_scm(file)))
838 return 0;
839
840 /*
841 * See if we can merge this file into an existing skb SCM_RIGHTS
842 * file set. If there's no room, fall back to allocating a new skb
843 * and filling it in.
844 */
845 spin_lock_irq(&head->lock);
846 skb = skb_peek(head);
847 if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
848 __skb_unlink(skb, head);
849 else
850 skb = NULL;
851 spin_unlock_irq(&head->lock);
852
853 if (!skb) {
854 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
855 if (!fpl)
856 return -ENOMEM;
857
858 skb = alloc_skb(0, GFP_KERNEL);
859 if (!skb) {
860 kfree(fpl);
861 return -ENOMEM;
862 }
863
864 fpl->user = get_uid(current_user());
865 fpl->max = SCM_MAX_FD;
866 fpl->count = 0;
867
868 UNIXCB(skb).fp = fpl;
869 skb->sk = sk;
870 skb->scm_io_uring = 1;
871 skb->destructor = unix_destruct_scm;
872 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
873 }
874
875 fpl = UNIXCB(skb).fp;
876 fpl->fp[fpl->count++] = get_file(file);
877 unix_inflight(fpl->user, file);
878 skb_queue_head(head, skb);
879 fput(file);
880#endif
881 return 0;
882}
883
884static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
885{
886 struct file *file = prsrc->file;
887#if defined(CONFIG_UNIX)
888 struct sock *sock = ctx->ring_sock->sk;
889 struct sk_buff_head list, *head = &sock->sk_receive_queue;
890 struct sk_buff *skb;
891 int i;
892
893 if (!io_file_need_scm(file)) {
894 fput(file);
895 return;
896 }
897
898 __skb_queue_head_init(&list);
899
900 /*
901 * Find the skb that holds this file in its SCM_RIGHTS. When found,
902 * remove this entry and rearrange the file array.
903 */
904 skb = skb_dequeue(head);
905 while (skb) {
906 struct scm_fp_list *fp;
907
908 fp = UNIXCB(skb).fp;
909 for (i = 0; i < fp->count; i++) {
910 int left;
911
912 if (fp->fp[i] != file)
913 continue;
914
915 unix_notinflight(fp->user, fp->fp[i]);
916 left = fp->count - 1 - i;
917 if (left) {
918 memmove(&fp->fp[i], &fp->fp[i + 1],
919 left * sizeof(struct file *));
920 }
921 fp->count--;
922 if (!fp->count) {
923 kfree_skb(skb);
924 skb = NULL;
925 } else {
926 __skb_queue_tail(&list, skb);
927 }
928 fput(file);
929 file = NULL;
930 break;
931 }
932
933 if (!file)
934 break;
935
936 __skb_queue_tail(&list, skb);
937
938 skb = skb_dequeue(head);
939 }
940
941 if (skb_peek(&list)) {
942 spin_lock_irq(&head->lock);
943 while ((skb = __skb_dequeue(&list)) != NULL)
944 __skb_queue_tail(head, skb);
945 spin_unlock_irq(&head->lock);
946 }
947#else
948 fput(file);
949#endif
950}
951
952int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
953 unsigned nr_args, u64 __user *tags)
954{
955 __s32 __user *fds = (__s32 __user *) arg;
956 struct file *file;
957 int fd, ret;
958 unsigned i;
959
960 if (ctx->file_data)
961 return -EBUSY;
962 if (!nr_args)
963 return -EINVAL;
964 if (nr_args > IORING_MAX_FIXED_FILES)
965 return -EMFILE;
966 if (nr_args > rlimit(RLIMIT_NOFILE))
967 return -EMFILE;
968 ret = io_rsrc_node_switch_start(ctx);
969 if (ret)
970 return ret;
971 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
972 &ctx->file_data);
973 if (ret)
974 return ret;
975
976 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
977 io_rsrc_data_free(ctx->file_data);
978 ctx->file_data = NULL;
979 return -ENOMEM;
980 }
981
982 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
983 struct io_fixed_file *file_slot;
984
985 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
986 ret = -EFAULT;
987 goto fail;
988 }
989 /* allow sparse sets */
990 if (!fds || fd == -1) {
991 ret = -EINVAL;
992 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
993 goto fail;
994 continue;
995 }
996
997 file = fget(fd);
998 ret = -EBADF;
999 if (unlikely(!file))
1000 goto fail;
1001
1002 /*
1003 * Don't allow io_uring instances to be registered. If UNIX
1004 * isn't enabled, then this causes a reference cycle and this
1005 * instance can never get freed. If UNIX is enabled we'll
1006 * handle it just fine, but there's still no point in allowing
1007 * a ring fd as it doesn't support regular read/write anyway.
1008 */
1009 if (io_is_uring_fops(file)) {
1010 fput(file);
1011 goto fail;
1012 }
1013 ret = io_scm_file_account(ctx, file);
1014 if (ret) {
1015 fput(file);
1016 goto fail;
1017 }
1018 file_slot = io_fixed_file_slot(&ctx->file_table, i);
1019 io_fixed_file_set(file_slot, file);
1020 io_file_bitmap_set(&ctx->file_table, i);
1021 }
1022
1023 /* default it to the whole table */
1024 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
1025 io_rsrc_node_switch(ctx, NULL);
1026 return 0;
1027fail:
1028 __io_sqe_files_unregister(ctx);
1029 return ret;
1030}
1031
1032static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
1033{
1034 io_buffer_unmap(ctx, &prsrc->buf);
1035 prsrc->buf = NULL;
1036}
1037
1038void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
1039{
1040 unsigned int i;
1041
1042 for (i = 0; i < ctx->nr_user_bufs; i++)
1043 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
1044 kfree(ctx->user_bufs);
1045 io_rsrc_data_free(ctx->buf_data);
1046 ctx->user_bufs = NULL;
1047 ctx->buf_data = NULL;
1048 ctx->nr_user_bufs = 0;
1049}
1050
1051int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
1052{
1053 unsigned nr = ctx->nr_user_bufs;
1054 int ret;
1055
1056 if (!ctx->buf_data)
1057 return -ENXIO;
1058
1059 /*
1060 * Quiesce may unlock ->uring_lock, and while it's not held
1061 * prevent new requests using the table.
1062 */
1063 ctx->nr_user_bufs = 0;
1064 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
1065 ctx->nr_user_bufs = nr;
1066 if (!ret)
1067 __io_sqe_buffers_unregister(ctx);
1068 return ret;
1069}
1070
1071/*
1072 * Not super efficient, but this is just a registration time. And we do cache
1073 * the last compound head, so generally we'll only do a full search if we don't
1074 * match that one.
1075 *
1076 * We check if the given compound head page has already been accounted, to
1077 * avoid double accounting it. This allows us to account the full size of the
1078 * page, not just the constituent pages of a huge page.
1079 */
1080static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
1081 int nr_pages, struct page *hpage)
1082{
1083 int i, j;
1084
1085 /* check current page array */
1086 for (i = 0; i < nr_pages; i++) {
1087 if (!PageCompound(pages[i]))
1088 continue;
1089 if (compound_head(pages[i]) == hpage)
1090 return true;
1091 }
1092
1093 /* check previously registered pages */
1094 for (i = 0; i < ctx->nr_user_bufs; i++) {
1095 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
1096
1097 for (j = 0; j < imu->nr_bvecs; j++) {
1098 if (!PageCompound(imu->bvec[j].bv_page))
1099 continue;
1100 if (compound_head(imu->bvec[j].bv_page) == hpage)
1101 return true;
1102 }
1103 }
1104
1105 return false;
1106}
1107
1108static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
1109 int nr_pages, struct io_mapped_ubuf *imu,
1110 struct page **last_hpage)
1111{
1112 int i, ret;
1113
1114 imu->acct_pages = 0;
1115 for (i = 0; i < nr_pages; i++) {
1116 if (!PageCompound(pages[i])) {
1117 imu->acct_pages++;
1118 } else {
1119 struct page *hpage;
1120
1121 hpage = compound_head(pages[i]);
1122 if (hpage == *last_hpage)
1123 continue;
1124 *last_hpage = hpage;
1125 if (headpage_already_acct(ctx, pages, i, hpage))
1126 continue;
1127 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
1128 }
1129 }
1130
1131 if (!imu->acct_pages)
1132 return 0;
1133
1134 ret = io_account_mem(ctx, imu->acct_pages);
1135 if (ret)
1136 imu->acct_pages = 0;
1137 return ret;
1138}
1139
1140struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
1141{
1142 unsigned long start, end, nr_pages;
1143 struct vm_area_struct **vmas = NULL;
1144 struct page **pages = NULL;
1145 int i, pret, ret = -ENOMEM;
1146
1147 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1148 start = ubuf >> PAGE_SHIFT;
1149 nr_pages = end - start;
1150
1151 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
1152 if (!pages)
1153 goto done;
1154
1155 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
1156 GFP_KERNEL);
1157 if (!vmas)
1158 goto done;
1159
1160 ret = 0;
1161 mmap_read_lock(current->mm);
1162 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
1163 pages, vmas);
1164 if (pret == nr_pages) {
1165 /* don't support file backed memory */
1166 for (i = 0; i < nr_pages; i++) {
1167 struct vm_area_struct *vma = vmas[i];
1168
1169 if (vma_is_shmem(vma))
1170 continue;
1171 if (vma->vm_file &&
1172 !is_file_hugepages(vma->vm_file)) {
1173 ret = -EOPNOTSUPP;
1174 break;
1175 }
1176 }
1177 *npages = nr_pages;
1178 } else {
1179 ret = pret < 0 ? pret : -EFAULT;
1180 }
1181 mmap_read_unlock(current->mm);
1182 if (ret) {
1183 /*
1184 * if we did partial map, or found file backed vmas,
1185 * release any pages we did get
1186 */
1187 if (pret > 0)
1188 unpin_user_pages(pages, pret);
1189 goto done;
1190 }
1191 ret = 0;
1192done:
1193 kvfree(vmas);
1194 if (ret < 0) {
1195 kvfree(pages);
1196 pages = ERR_PTR(ret);
1197 }
1198 return pages;
1199}
1200
1201static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
1202 struct io_mapped_ubuf **pimu,
1203 struct page **last_hpage)
1204{
1205 struct io_mapped_ubuf *imu = NULL;
1206 struct page **pages = NULL;
1207 unsigned long off;
1208 size_t size;
1209 int ret, nr_pages, i;
1210
1211 *pimu = ctx->dummy_ubuf;
1212 if (!iov->iov_base)
1213 return 0;
1214
1215 ret = -ENOMEM;
1216 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
1217 &nr_pages);
1218 if (IS_ERR(pages)) {
1219 ret = PTR_ERR(pages);
1220 pages = NULL;
1221 goto done;
1222 }
1223
1224 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
1225 if (!imu)
1226 goto done;
1227
1228 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
1229 if (ret) {
1230 unpin_user_pages(pages, nr_pages);
1231 goto done;
1232 }
1233
1234 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
1235 size = iov->iov_len;
1236 for (i = 0; i < nr_pages; i++) {
1237 size_t vec_len;
1238
1239 vec_len = min_t(size_t, size, PAGE_SIZE - off);
1240 imu->bvec[i].bv_page = pages[i];
1241 imu->bvec[i].bv_len = vec_len;
1242 imu->bvec[i].bv_offset = off;
1243 off = 0;
1244 size -= vec_len;
1245 }
1246 /* store original address for later verification */
1247 imu->ubuf = (unsigned long) iov->iov_base;
1248 imu->ubuf_end = imu->ubuf + iov->iov_len;
1249 imu->nr_bvecs = nr_pages;
1250 *pimu = imu;
1251 ret = 0;
1252done:
1253 if (ret)
1254 kvfree(imu);
1255 kvfree(pages);
1256 return ret;
1257}
1258
1259static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1260{
1261 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1262 return ctx->user_bufs ? 0 : -ENOMEM;
1263}
1264
1265int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1266 unsigned int nr_args, u64 __user *tags)
1267{
1268 struct page *last_hpage = NULL;
1269 struct io_rsrc_data *data;
1270 int i, ret;
1271 struct iovec iov;
1272
1273 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1274
1275 if (ctx->user_bufs)
1276 return -EBUSY;
1277 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1278 return -EINVAL;
1279 ret = io_rsrc_node_switch_start(ctx);
1280 if (ret)
1281 return ret;
1282 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
1283 if (ret)
1284 return ret;
1285 ret = io_buffers_map_alloc(ctx, nr_args);
1286 if (ret) {
1287 io_rsrc_data_free(data);
1288 return ret;
1289 }
1290
1291 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1292 if (arg) {
1293 ret = io_copy_iov(ctx, &iov, arg, i);
1294 if (ret)
1295 break;
1296 ret = io_buffer_validate(&iov);
1297 if (ret)
1298 break;
1299 } else {
1300 memset(&iov, 0, sizeof(iov));
1301 }
1302
1303 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1304 ret = -EINVAL;
1305 break;
1306 }
1307
1308 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1309 &last_hpage);
1310 if (ret)
1311 break;
1312 }
1313
1314 WARN_ON_ONCE(ctx->buf_data);
1315
1316 ctx->buf_data = data;
1317 if (ret)
1318 __io_sqe_buffers_unregister(ctx);
1319 else
1320 io_rsrc_node_switch(ctx, NULL);
1321 return ret;
1322}
1323
1324int io_import_fixed(int ddir, struct iov_iter *iter,
1325 struct io_mapped_ubuf *imu,
1326 u64 buf_addr, size_t len)
1327{
1328 u64 buf_end;
1329 size_t offset;
1330
1331 if (WARN_ON_ONCE(!imu))
1332 return -EFAULT;
1333 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1334 return -EFAULT;
1335 /* not inside the mapped region */
1336 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1337 return -EFAULT;
1338
1339 /*
1340 * May not be a start of buffer, set size appropriately
1341 * and advance us to the beginning.
1342 */
1343 offset = buf_addr - imu->ubuf;
1344 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1345
1346 if (offset) {
1347 /*
1348 * Don't use iov_iter_advance() here, as it's really slow for
1349 * using the latter parts of a big fixed buffer - it iterates
1350 * over each segment manually. We can cheat a bit here, because
1351 * we know that:
1352 *
1353 * 1) it's a BVEC iter, we set it up
1354 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1355 * first and last bvec
1356 *
1357 * So just find our index, and adjust the iterator afterwards.
1358 * If the offset is within the first bvec (or the whole first
1359 * bvec, just use iov_iter_advance(). This makes it easier
1360 * since we can just skip the first segment, which may not
1361 * be PAGE_SIZE aligned.
1362 */
1363 const struct bio_vec *bvec = imu->bvec;
1364
1365 if (offset <= bvec->bv_len) {
1366 iov_iter_advance(iter, offset);
1367 } else {
1368 unsigned long seg_skip;
1369
1370 /* skip first vec */
1371 offset -= bvec->bv_len;
1372 seg_skip = 1 + (offset >> PAGE_SHIFT);
1373
1374 iter->bvec = bvec + seg_skip;
1375 iter->nr_segs -= seg_skip;
1376 iter->count -= bvec->bv_len + offset;
1377 iter->iov_offset = offset & ~PAGE_MASK;
1378 }
1379 }
1380
1381 return 0;
1382}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/nospec.h>
9#include <linux/hugetlb.h>
10#include <linux/compat.h>
11#include <linux/io_uring.h>
12
13#include <uapi/linux/io_uring.h>
14
15#include "io_uring.h"
16#include "openclose.h"
17#include "rsrc.h"
18
19struct io_rsrc_update {
20 struct file *file;
21 u64 arg;
22 u32 nr_args;
23 u32 offset;
24};
25
26static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
27static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
28 struct io_mapped_ubuf **pimu,
29 struct page **last_hpage);
30
31/* only define max */
32#define IORING_MAX_FIXED_FILES (1U << 20)
33#define IORING_MAX_REG_BUFFERS (1U << 14)
34
35static const struct io_mapped_ubuf dummy_ubuf = {
36 /* set invalid range, so io_import_fixed() fails meeting it */
37 .ubuf = -1UL,
38 .ubuf_end = 0,
39};
40
41int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
42{
43 unsigned long page_limit, cur_pages, new_pages;
44
45 if (!nr_pages)
46 return 0;
47
48 /* Don't allow more pages than we can safely lock */
49 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
50
51 cur_pages = atomic_long_read(&user->locked_vm);
52 do {
53 new_pages = cur_pages + nr_pages;
54 if (new_pages > page_limit)
55 return -ENOMEM;
56 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
57 &cur_pages, new_pages));
58 return 0;
59}
60
61static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
62{
63 if (ctx->user)
64 __io_unaccount_mem(ctx->user, nr_pages);
65
66 if (ctx->mm_account)
67 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
68}
69
70static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
71{
72 int ret;
73
74 if (ctx->user) {
75 ret = __io_account_mem(ctx->user, nr_pages);
76 if (ret)
77 return ret;
78 }
79
80 if (ctx->mm_account)
81 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
82
83 return 0;
84}
85
86static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
87 void __user *arg, unsigned index)
88{
89 struct iovec __user *src;
90
91#ifdef CONFIG_COMPAT
92 if (ctx->compat) {
93 struct compat_iovec __user *ciovs;
94 struct compat_iovec ciov;
95
96 ciovs = (struct compat_iovec __user *) arg;
97 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
98 return -EFAULT;
99
100 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
101 dst->iov_len = ciov.iov_len;
102 return 0;
103 }
104#endif
105 src = (struct iovec __user *) arg;
106 if (copy_from_user(dst, &src[index], sizeof(*dst)))
107 return -EFAULT;
108 return 0;
109}
110
111static int io_buffer_validate(struct iovec *iov)
112{
113 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
114
115 /*
116 * Don't impose further limits on the size and buffer
117 * constraints here, we'll -EINVAL later when IO is
118 * submitted if they are wrong.
119 */
120 if (!iov->iov_base)
121 return iov->iov_len ? -EFAULT : 0;
122 if (!iov->iov_len)
123 return -EFAULT;
124
125 /* arbitrary limit, but we need something */
126 if (iov->iov_len > SZ_1G)
127 return -EFAULT;
128
129 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
130 return -EOVERFLOW;
131
132 return 0;
133}
134
135static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
136{
137 struct io_mapped_ubuf *imu = *slot;
138 unsigned int i;
139
140 if (imu != &dummy_ubuf) {
141 for (i = 0; i < imu->nr_bvecs; i++)
142 unpin_user_page(imu->bvec[i].bv_page);
143 if (imu->acct_pages)
144 io_unaccount_mem(ctx, imu->acct_pages);
145 kvfree(imu);
146 }
147 *slot = NULL;
148}
149
150static void io_rsrc_put_work(struct io_rsrc_node *node)
151{
152 struct io_rsrc_put *prsrc = &node->item;
153
154 if (prsrc->tag)
155 io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0);
156
157 switch (node->type) {
158 case IORING_RSRC_FILE:
159 fput(prsrc->file);
160 break;
161 case IORING_RSRC_BUFFER:
162 io_rsrc_buf_put(node->ctx, prsrc);
163 break;
164 default:
165 WARN_ON_ONCE(1);
166 break;
167 }
168}
169
170void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
171{
172 if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
173 kfree(node);
174}
175
176void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
177 __must_hold(&node->ctx->uring_lock)
178{
179 struct io_ring_ctx *ctx = node->ctx;
180
181 while (!list_empty(&ctx->rsrc_ref_list)) {
182 node = list_first_entry(&ctx->rsrc_ref_list,
183 struct io_rsrc_node, node);
184 /* recycle ref nodes in order */
185 if (node->refs)
186 break;
187 list_del(&node->node);
188
189 if (likely(!node->empty))
190 io_rsrc_put_work(node);
191 io_rsrc_node_destroy(ctx, node);
192 }
193 if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
194 wake_up_all(&ctx->rsrc_quiesce_wq);
195}
196
197struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
198{
199 struct io_rsrc_node *ref_node;
200 struct io_cache_entry *entry;
201
202 entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
203 if (entry) {
204 ref_node = container_of(entry, struct io_rsrc_node, cache);
205 } else {
206 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
207 if (!ref_node)
208 return NULL;
209 }
210
211 ref_node->ctx = ctx;
212 ref_node->empty = 0;
213 ref_node->refs = 1;
214 return ref_node;
215}
216
217__cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
218 struct io_ring_ctx *ctx)
219{
220 struct io_rsrc_node *backup;
221 DEFINE_WAIT(we);
222 int ret;
223
224 /* As We may drop ->uring_lock, other task may have started quiesce */
225 if (data->quiesce)
226 return -ENXIO;
227
228 backup = io_rsrc_node_alloc(ctx);
229 if (!backup)
230 return -ENOMEM;
231 ctx->rsrc_node->empty = true;
232 ctx->rsrc_node->type = -1;
233 list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
234 io_put_rsrc_node(ctx, ctx->rsrc_node);
235 ctx->rsrc_node = backup;
236
237 if (list_empty(&ctx->rsrc_ref_list))
238 return 0;
239
240 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
241 atomic_set(&ctx->cq_wait_nr, 1);
242 smp_mb();
243 }
244
245 ctx->rsrc_quiesce++;
246 data->quiesce = true;
247 do {
248 prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
249 mutex_unlock(&ctx->uring_lock);
250
251 ret = io_run_task_work_sig(ctx);
252 if (ret < 0) {
253 mutex_lock(&ctx->uring_lock);
254 if (list_empty(&ctx->rsrc_ref_list))
255 ret = 0;
256 break;
257 }
258
259 schedule();
260 __set_current_state(TASK_RUNNING);
261 mutex_lock(&ctx->uring_lock);
262 ret = 0;
263 } while (!list_empty(&ctx->rsrc_ref_list));
264
265 finish_wait(&ctx->rsrc_quiesce_wq, &we);
266 data->quiesce = false;
267 ctx->rsrc_quiesce--;
268
269 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
270 atomic_set(&ctx->cq_wait_nr, 0);
271 smp_mb();
272 }
273 return ret;
274}
275
276static void io_free_page_table(void **table, size_t size)
277{
278 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
279
280 for (i = 0; i < nr_tables; i++)
281 kfree(table[i]);
282 kfree(table);
283}
284
285static void io_rsrc_data_free(struct io_rsrc_data *data)
286{
287 size_t size = data->nr * sizeof(data->tags[0][0]);
288
289 if (data->tags)
290 io_free_page_table((void **)data->tags, size);
291 kfree(data);
292}
293
294static __cold void **io_alloc_page_table(size_t size)
295{
296 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
297 size_t init_size = size;
298 void **table;
299
300 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
301 if (!table)
302 return NULL;
303
304 for (i = 0; i < nr_tables; i++) {
305 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
306
307 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
308 if (!table[i]) {
309 io_free_page_table(table, init_size);
310 return NULL;
311 }
312 size -= this_size;
313 }
314 return table;
315}
316
317__cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type,
318 u64 __user *utags,
319 unsigned nr, struct io_rsrc_data **pdata)
320{
321 struct io_rsrc_data *data;
322 int ret = 0;
323 unsigned i;
324
325 data = kzalloc(sizeof(*data), GFP_KERNEL);
326 if (!data)
327 return -ENOMEM;
328 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
329 if (!data->tags) {
330 kfree(data);
331 return -ENOMEM;
332 }
333
334 data->nr = nr;
335 data->ctx = ctx;
336 data->rsrc_type = type;
337 if (utags) {
338 ret = -EFAULT;
339 for (i = 0; i < nr; i++) {
340 u64 *tag_slot = io_get_tag_slot(data, i);
341
342 if (copy_from_user(tag_slot, &utags[i],
343 sizeof(*tag_slot)))
344 goto fail;
345 }
346 }
347 *pdata = data;
348 return 0;
349fail:
350 io_rsrc_data_free(data);
351 return ret;
352}
353
354static int __io_sqe_files_update(struct io_ring_ctx *ctx,
355 struct io_uring_rsrc_update2 *up,
356 unsigned nr_args)
357{
358 u64 __user *tags = u64_to_user_ptr(up->tags);
359 __s32 __user *fds = u64_to_user_ptr(up->data);
360 struct io_rsrc_data *data = ctx->file_data;
361 struct io_fixed_file *file_slot;
362 int fd, i, err = 0;
363 unsigned int done;
364
365 if (!ctx->file_data)
366 return -ENXIO;
367 if (up->offset + nr_args > ctx->nr_user_files)
368 return -EINVAL;
369
370 for (done = 0; done < nr_args; done++) {
371 u64 tag = 0;
372
373 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
374 copy_from_user(&fd, &fds[done], sizeof(fd))) {
375 err = -EFAULT;
376 break;
377 }
378 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
379 err = -EINVAL;
380 break;
381 }
382 if (fd == IORING_REGISTER_FILES_SKIP)
383 continue;
384
385 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
386 file_slot = io_fixed_file_slot(&ctx->file_table, i);
387
388 if (file_slot->file_ptr) {
389 err = io_queue_rsrc_removal(data, i,
390 io_slot_file(file_slot));
391 if (err)
392 break;
393 file_slot->file_ptr = 0;
394 io_file_bitmap_clear(&ctx->file_table, i);
395 }
396 if (fd != -1) {
397 struct file *file = fget(fd);
398
399 if (!file) {
400 err = -EBADF;
401 break;
402 }
403 /*
404 * Don't allow io_uring instances to be registered.
405 */
406 if (io_is_uring_fops(file)) {
407 fput(file);
408 err = -EBADF;
409 break;
410 }
411 *io_get_tag_slot(data, i) = tag;
412 io_fixed_file_set(file_slot, file);
413 io_file_bitmap_set(&ctx->file_table, i);
414 }
415 }
416 return done ? done : err;
417}
418
419static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
420 struct io_uring_rsrc_update2 *up,
421 unsigned int nr_args)
422{
423 u64 __user *tags = u64_to_user_ptr(up->tags);
424 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
425 struct page *last_hpage = NULL;
426 __u32 done;
427 int i, err;
428
429 if (!ctx->buf_data)
430 return -ENXIO;
431 if (up->offset + nr_args > ctx->nr_user_bufs)
432 return -EINVAL;
433
434 for (done = 0; done < nr_args; done++) {
435 struct io_mapped_ubuf *imu;
436 u64 tag = 0;
437
438 err = io_copy_iov(ctx, &iov, iovs, done);
439 if (err)
440 break;
441 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
442 err = -EFAULT;
443 break;
444 }
445 err = io_buffer_validate(&iov);
446 if (err)
447 break;
448 if (!iov.iov_base && tag) {
449 err = -EINVAL;
450 break;
451 }
452 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
453 if (err)
454 break;
455
456 i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
457 if (ctx->user_bufs[i] != &dummy_ubuf) {
458 err = io_queue_rsrc_removal(ctx->buf_data, i,
459 ctx->user_bufs[i]);
460 if (unlikely(err)) {
461 io_buffer_unmap(ctx, &imu);
462 break;
463 }
464 ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf;
465 }
466
467 ctx->user_bufs[i] = imu;
468 *io_get_tag_slot(ctx->buf_data, i) = tag;
469 }
470 return done ? done : err;
471}
472
473static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
474 struct io_uring_rsrc_update2 *up,
475 unsigned nr_args)
476{
477 __u32 tmp;
478
479 lockdep_assert_held(&ctx->uring_lock);
480
481 if (check_add_overflow(up->offset, nr_args, &tmp))
482 return -EOVERFLOW;
483
484 switch (type) {
485 case IORING_RSRC_FILE:
486 return __io_sqe_files_update(ctx, up, nr_args);
487 case IORING_RSRC_BUFFER:
488 return __io_sqe_buffers_update(ctx, up, nr_args);
489 }
490 return -EINVAL;
491}
492
493int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
494 unsigned nr_args)
495{
496 struct io_uring_rsrc_update2 up;
497
498 if (!nr_args)
499 return -EINVAL;
500 memset(&up, 0, sizeof(up));
501 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
502 return -EFAULT;
503 if (up.resv || up.resv2)
504 return -EINVAL;
505 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
506}
507
508int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
509 unsigned size, unsigned type)
510{
511 struct io_uring_rsrc_update2 up;
512
513 if (size != sizeof(up))
514 return -EINVAL;
515 if (copy_from_user(&up, arg, sizeof(up)))
516 return -EFAULT;
517 if (!up.nr || up.resv || up.resv2)
518 return -EINVAL;
519 return __io_register_rsrc_update(ctx, type, &up, up.nr);
520}
521
522__cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
523 unsigned int size, unsigned int type)
524{
525 struct io_uring_rsrc_register rr;
526
527 /* keep it extendible */
528 if (size != sizeof(rr))
529 return -EINVAL;
530
531 memset(&rr, 0, sizeof(rr));
532 if (copy_from_user(&rr, arg, size))
533 return -EFAULT;
534 if (!rr.nr || rr.resv2)
535 return -EINVAL;
536 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
537 return -EINVAL;
538
539 switch (type) {
540 case IORING_RSRC_FILE:
541 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
542 break;
543 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
544 rr.nr, u64_to_user_ptr(rr.tags));
545 case IORING_RSRC_BUFFER:
546 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
547 break;
548 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
549 rr.nr, u64_to_user_ptr(rr.tags));
550 }
551 return -EINVAL;
552}
553
554int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
555{
556 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
557
558 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
559 return -EINVAL;
560 if (sqe->rw_flags || sqe->splice_fd_in)
561 return -EINVAL;
562
563 up->offset = READ_ONCE(sqe->off);
564 up->nr_args = READ_ONCE(sqe->len);
565 if (!up->nr_args)
566 return -EINVAL;
567 up->arg = READ_ONCE(sqe->addr);
568 return 0;
569}
570
571static int io_files_update_with_index_alloc(struct io_kiocb *req,
572 unsigned int issue_flags)
573{
574 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
575 __s32 __user *fds = u64_to_user_ptr(up->arg);
576 unsigned int done;
577 struct file *file;
578 int ret, fd;
579
580 if (!req->ctx->file_data)
581 return -ENXIO;
582
583 for (done = 0; done < up->nr_args; done++) {
584 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
585 ret = -EFAULT;
586 break;
587 }
588
589 file = fget(fd);
590 if (!file) {
591 ret = -EBADF;
592 break;
593 }
594 ret = io_fixed_fd_install(req, issue_flags, file,
595 IORING_FILE_INDEX_ALLOC);
596 if (ret < 0)
597 break;
598 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
599 __io_close_fixed(req->ctx, issue_flags, ret);
600 ret = -EFAULT;
601 break;
602 }
603 }
604
605 if (done)
606 return done;
607 return ret;
608}
609
610int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
611{
612 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
613 struct io_ring_ctx *ctx = req->ctx;
614 struct io_uring_rsrc_update2 up2;
615 int ret;
616
617 up2.offset = up->offset;
618 up2.data = up->arg;
619 up2.nr = 0;
620 up2.tags = 0;
621 up2.resv = 0;
622 up2.resv2 = 0;
623
624 if (up->offset == IORING_FILE_INDEX_ALLOC) {
625 ret = io_files_update_with_index_alloc(req, issue_flags);
626 } else {
627 io_ring_submit_lock(ctx, issue_flags);
628 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
629 &up2, up->nr_args);
630 io_ring_submit_unlock(ctx, issue_flags);
631 }
632
633 if (ret < 0)
634 req_set_fail(req);
635 io_req_set_res(req, ret, 0);
636 return IOU_OK;
637}
638
639int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
640{
641 struct io_ring_ctx *ctx = data->ctx;
642 struct io_rsrc_node *node = ctx->rsrc_node;
643 u64 *tag_slot = io_get_tag_slot(data, idx);
644
645 ctx->rsrc_node = io_rsrc_node_alloc(ctx);
646 if (unlikely(!ctx->rsrc_node)) {
647 ctx->rsrc_node = node;
648 return -ENOMEM;
649 }
650
651 node->item.rsrc = rsrc;
652 node->type = data->rsrc_type;
653 node->item.tag = *tag_slot;
654 *tag_slot = 0;
655 list_add_tail(&node->node, &ctx->rsrc_ref_list);
656 io_put_rsrc_node(ctx, node);
657 return 0;
658}
659
660void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
661{
662 int i;
663
664 for (i = 0; i < ctx->nr_user_files; i++) {
665 struct file *file = io_file_from_index(&ctx->file_table, i);
666
667 if (!file)
668 continue;
669 io_file_bitmap_clear(&ctx->file_table, i);
670 fput(file);
671 }
672
673 io_free_file_tables(&ctx->file_table);
674 io_file_table_set_alloc_range(ctx, 0, 0);
675 io_rsrc_data_free(ctx->file_data);
676 ctx->file_data = NULL;
677 ctx->nr_user_files = 0;
678}
679
680int io_sqe_files_unregister(struct io_ring_ctx *ctx)
681{
682 unsigned nr = ctx->nr_user_files;
683 int ret;
684
685 if (!ctx->file_data)
686 return -ENXIO;
687
688 /*
689 * Quiesce may unlock ->uring_lock, and while it's not held
690 * prevent new requests using the table.
691 */
692 ctx->nr_user_files = 0;
693 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
694 ctx->nr_user_files = nr;
695 if (!ret)
696 __io_sqe_files_unregister(ctx);
697 return ret;
698}
699
700int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
701 unsigned nr_args, u64 __user *tags)
702{
703 __s32 __user *fds = (__s32 __user *) arg;
704 struct file *file;
705 int fd, ret;
706 unsigned i;
707
708 if (ctx->file_data)
709 return -EBUSY;
710 if (!nr_args)
711 return -EINVAL;
712 if (nr_args > IORING_MAX_FIXED_FILES)
713 return -EMFILE;
714 if (nr_args > rlimit(RLIMIT_NOFILE))
715 return -EMFILE;
716 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args,
717 &ctx->file_data);
718 if (ret)
719 return ret;
720
721 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
722 io_rsrc_data_free(ctx->file_data);
723 ctx->file_data = NULL;
724 return -ENOMEM;
725 }
726
727 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
728 struct io_fixed_file *file_slot;
729
730 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
731 ret = -EFAULT;
732 goto fail;
733 }
734 /* allow sparse sets */
735 if (!fds || fd == -1) {
736 ret = -EINVAL;
737 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
738 goto fail;
739 continue;
740 }
741
742 file = fget(fd);
743 ret = -EBADF;
744 if (unlikely(!file))
745 goto fail;
746
747 /*
748 * Don't allow io_uring instances to be registered.
749 */
750 if (io_is_uring_fops(file)) {
751 fput(file);
752 goto fail;
753 }
754 file_slot = io_fixed_file_slot(&ctx->file_table, i);
755 io_fixed_file_set(file_slot, file);
756 io_file_bitmap_set(&ctx->file_table, i);
757 }
758
759 /* default it to the whole table */
760 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
761 return 0;
762fail:
763 __io_sqe_files_unregister(ctx);
764 return ret;
765}
766
767static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
768{
769 io_buffer_unmap(ctx, &prsrc->buf);
770 prsrc->buf = NULL;
771}
772
773void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
774{
775 unsigned int i;
776
777 for (i = 0; i < ctx->nr_user_bufs; i++)
778 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
779 kfree(ctx->user_bufs);
780 io_rsrc_data_free(ctx->buf_data);
781 ctx->user_bufs = NULL;
782 ctx->buf_data = NULL;
783 ctx->nr_user_bufs = 0;
784}
785
786int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
787{
788 unsigned nr = ctx->nr_user_bufs;
789 int ret;
790
791 if (!ctx->buf_data)
792 return -ENXIO;
793
794 /*
795 * Quiesce may unlock ->uring_lock, and while it's not held
796 * prevent new requests using the table.
797 */
798 ctx->nr_user_bufs = 0;
799 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
800 ctx->nr_user_bufs = nr;
801 if (!ret)
802 __io_sqe_buffers_unregister(ctx);
803 return ret;
804}
805
806/*
807 * Not super efficient, but this is just a registration time. And we do cache
808 * the last compound head, so generally we'll only do a full search if we don't
809 * match that one.
810 *
811 * We check if the given compound head page has already been accounted, to
812 * avoid double accounting it. This allows us to account the full size of the
813 * page, not just the constituent pages of a huge page.
814 */
815static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
816 int nr_pages, struct page *hpage)
817{
818 int i, j;
819
820 /* check current page array */
821 for (i = 0; i < nr_pages; i++) {
822 if (!PageCompound(pages[i]))
823 continue;
824 if (compound_head(pages[i]) == hpage)
825 return true;
826 }
827
828 /* check previously registered pages */
829 for (i = 0; i < ctx->nr_user_bufs; i++) {
830 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
831
832 for (j = 0; j < imu->nr_bvecs; j++) {
833 if (!PageCompound(imu->bvec[j].bv_page))
834 continue;
835 if (compound_head(imu->bvec[j].bv_page) == hpage)
836 return true;
837 }
838 }
839
840 return false;
841}
842
843static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
844 int nr_pages, struct io_mapped_ubuf *imu,
845 struct page **last_hpage)
846{
847 int i, ret;
848
849 imu->acct_pages = 0;
850 for (i = 0; i < nr_pages; i++) {
851 if (!PageCompound(pages[i])) {
852 imu->acct_pages++;
853 } else {
854 struct page *hpage;
855
856 hpage = compound_head(pages[i]);
857 if (hpage == *last_hpage)
858 continue;
859 *last_hpage = hpage;
860 if (headpage_already_acct(ctx, pages, i, hpage))
861 continue;
862 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
863 }
864 }
865
866 if (!imu->acct_pages)
867 return 0;
868
869 ret = io_account_mem(ctx, imu->acct_pages);
870 if (ret)
871 imu->acct_pages = 0;
872 return ret;
873}
874
875struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
876{
877 unsigned long start, end, nr_pages;
878 struct page **pages = NULL;
879 int ret;
880
881 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
882 start = ubuf >> PAGE_SHIFT;
883 nr_pages = end - start;
884 WARN_ON(!nr_pages);
885
886 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
887 if (!pages)
888 return ERR_PTR(-ENOMEM);
889
890 mmap_read_lock(current->mm);
891 ret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, pages);
892 mmap_read_unlock(current->mm);
893
894 /* success, mapped all pages */
895 if (ret == nr_pages) {
896 *npages = nr_pages;
897 return pages;
898 }
899
900 /* partial map, or didn't map anything */
901 if (ret >= 0) {
902 /* if we did partial map, release any pages we did get */
903 if (ret)
904 unpin_user_pages(pages, ret);
905 ret = -EFAULT;
906 }
907 kvfree(pages);
908 return ERR_PTR(ret);
909}
910
911static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
912 struct io_mapped_ubuf **pimu,
913 struct page **last_hpage)
914{
915 struct io_mapped_ubuf *imu = NULL;
916 struct page **pages = NULL;
917 unsigned long off;
918 size_t size;
919 int ret, nr_pages, i;
920 struct folio *folio = NULL;
921
922 *pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
923 if (!iov->iov_base)
924 return 0;
925
926 ret = -ENOMEM;
927 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
928 &nr_pages);
929 if (IS_ERR(pages)) {
930 ret = PTR_ERR(pages);
931 pages = NULL;
932 goto done;
933 }
934
935 /* If it's a huge page, try to coalesce them into a single bvec entry */
936 if (nr_pages > 1) {
937 folio = page_folio(pages[0]);
938 for (i = 1; i < nr_pages; i++) {
939 /*
940 * Pages must be consecutive and on the same folio for
941 * this to work
942 */
943 if (page_folio(pages[i]) != folio ||
944 pages[i] != pages[i - 1] + 1) {
945 folio = NULL;
946 break;
947 }
948 }
949 if (folio) {
950 /*
951 * The pages are bound to the folio, it doesn't
952 * actually unpin them but drops all but one reference,
953 * which is usually put down by io_buffer_unmap().
954 * Note, needs a better helper.
955 */
956 unpin_user_pages(&pages[1], nr_pages - 1);
957 nr_pages = 1;
958 }
959 }
960
961 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
962 if (!imu)
963 goto done;
964
965 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
966 if (ret) {
967 unpin_user_pages(pages, nr_pages);
968 goto done;
969 }
970
971 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
972 size = iov->iov_len;
973 /* store original address for later verification */
974 imu->ubuf = (unsigned long) iov->iov_base;
975 imu->ubuf_end = imu->ubuf + iov->iov_len;
976 imu->nr_bvecs = nr_pages;
977 *pimu = imu;
978 ret = 0;
979
980 if (folio) {
981 bvec_set_page(&imu->bvec[0], pages[0], size, off);
982 goto done;
983 }
984 for (i = 0; i < nr_pages; i++) {
985 size_t vec_len;
986
987 vec_len = min_t(size_t, size, PAGE_SIZE - off);
988 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
989 off = 0;
990 size -= vec_len;
991 }
992done:
993 if (ret)
994 kvfree(imu);
995 kvfree(pages);
996 return ret;
997}
998
999static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1000{
1001 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1002 return ctx->user_bufs ? 0 : -ENOMEM;
1003}
1004
1005int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1006 unsigned int nr_args, u64 __user *tags)
1007{
1008 struct page *last_hpage = NULL;
1009 struct io_rsrc_data *data;
1010 int i, ret;
1011 struct iovec iov;
1012
1013 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1014
1015 if (ctx->user_bufs)
1016 return -EBUSY;
1017 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1018 return -EINVAL;
1019 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data);
1020 if (ret)
1021 return ret;
1022 ret = io_buffers_map_alloc(ctx, nr_args);
1023 if (ret) {
1024 io_rsrc_data_free(data);
1025 return ret;
1026 }
1027
1028 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1029 if (arg) {
1030 ret = io_copy_iov(ctx, &iov, arg, i);
1031 if (ret)
1032 break;
1033 ret = io_buffer_validate(&iov);
1034 if (ret)
1035 break;
1036 } else {
1037 memset(&iov, 0, sizeof(iov));
1038 }
1039
1040 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1041 ret = -EINVAL;
1042 break;
1043 }
1044
1045 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1046 &last_hpage);
1047 if (ret)
1048 break;
1049 }
1050
1051 WARN_ON_ONCE(ctx->buf_data);
1052
1053 ctx->buf_data = data;
1054 if (ret)
1055 __io_sqe_buffers_unregister(ctx);
1056 return ret;
1057}
1058
1059int io_import_fixed(int ddir, struct iov_iter *iter,
1060 struct io_mapped_ubuf *imu,
1061 u64 buf_addr, size_t len)
1062{
1063 u64 buf_end;
1064 size_t offset;
1065
1066 if (WARN_ON_ONCE(!imu))
1067 return -EFAULT;
1068 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1069 return -EFAULT;
1070 /* not inside the mapped region */
1071 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1072 return -EFAULT;
1073
1074 /*
1075 * Might not be a start of buffer, set size appropriately
1076 * and advance us to the beginning.
1077 */
1078 offset = buf_addr - imu->ubuf;
1079 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1080
1081 if (offset) {
1082 /*
1083 * Don't use iov_iter_advance() here, as it's really slow for
1084 * using the latter parts of a big fixed buffer - it iterates
1085 * over each segment manually. We can cheat a bit here, because
1086 * we know that:
1087 *
1088 * 1) it's a BVEC iter, we set it up
1089 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1090 * first and last bvec
1091 *
1092 * So just find our index, and adjust the iterator afterwards.
1093 * If the offset is within the first bvec (or the whole first
1094 * bvec, just use iov_iter_advance(). This makes it easier
1095 * since we can just skip the first segment, which may not
1096 * be PAGE_SIZE aligned.
1097 */
1098 const struct bio_vec *bvec = imu->bvec;
1099
1100 if (offset < bvec->bv_len) {
1101 /*
1102 * Note, huge pages buffers consists of one large
1103 * bvec entry and should always go this way. The other
1104 * branch doesn't expect non PAGE_SIZE'd chunks.
1105 */
1106 iter->bvec = bvec;
1107 iter->nr_segs = bvec->bv_len;
1108 iter->count -= offset;
1109 iter->iov_offset = offset;
1110 } else {
1111 unsigned long seg_skip;
1112
1113 /* skip first vec */
1114 offset -= bvec->bv_len;
1115 seg_skip = 1 + (offset >> PAGE_SHIFT);
1116
1117 iter->bvec = bvec + seg_skip;
1118 iter->nr_segs -= seg_skip;
1119 iter->count -= bvec->bv_len + offset;
1120 iter->iov_offset = offset & ~PAGE_MASK;
1121 }
1122 }
1123
1124 return 0;
1125}