Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/errno.h>
  4#include <linux/fs.h>
  5#include <linux/file.h>
  6#include <linux/mm.h>
  7#include <linux/slab.h>
  8#include <linux/namei.h>
  9#include <linux/poll.h>
 10#include <linux/vmalloc.h>
 11#include <linux/io_uring.h>
 12
 13#include <uapi/linux/io_uring.h>
 14
 15#include "io_uring.h"
 16#include "opdef.h"
 17#include "kbuf.h"
 18#include "memmap.h"
 
 
 
 19
 20/* BIDs are addressed by a 16-bit field in a CQE */
 21#define MAX_BIDS_PER_BGID (1 << 16)
 22
 23struct kmem_cache *io_buf_cachep;
 24
 25struct io_provide_buf {
 26	struct file			*file;
 27	__u64				addr;
 28	__u32				len;
 29	__u32				bgid;
 30	__u32				nbufs;
 31	__u16				bid;
 32};
 33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
 35							unsigned int bgid)
 36{
 37	lockdep_assert_held(&ctx->uring_lock);
 38
 39	return xa_load(&ctx->io_bl_xa, bgid);
 40}
 41
 42static int io_buffer_add_list(struct io_ring_ctx *ctx,
 43			      struct io_buffer_list *bl, unsigned int bgid)
 44{
 45	/*
 46	 * Store buffer group ID and finally mark the list as visible.
 47	 * The normal lookup doesn't care about the visibility as we're
 48	 * always under the ->uring_lock, but the RCU lookup from mmap does.
 49	 */
 50	bl->bgid = bgid;
 51	atomic_set(&bl->refs, 1);
 
 
 
 
 52	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
 53}
 54
 55bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
 56{
 57	struct io_ring_ctx *ctx = req->ctx;
 58	struct io_buffer_list *bl;
 59	struct io_buffer *buf;
 60
 
 
 
 
 
 
 
 
 
 61	io_ring_submit_lock(ctx, issue_flags);
 62
 63	buf = req->kbuf;
 64	bl = io_buffer_get_list(ctx, buf->bgid);
 65	list_add(&buf->list, &bl->buf_list);
 66	req->flags &= ~REQ_F_BUFFER_SELECTED;
 67	req->buf_index = buf->bgid;
 68
 69	io_ring_submit_unlock(ctx, issue_flags);
 70	return true;
 71}
 72
 73void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags)
 74{
 
 
 75	/*
 76	 * We can add this buffer back to two lists:
 77	 *
 78	 * 1) The io_buffers_cache list. This one is protected by the
 79	 *    ctx->uring_lock. If we already hold this lock, add back to this
 80	 *    list as we can grab it from issue as well.
 81	 * 2) The io_buffers_comp list. This one is protected by the
 82	 *    ctx->completion_lock.
 83	 *
 84	 * We migrate buffers from the comp_list to the issue cache list
 85	 * when we need one.
 86	 */
 87	if (issue_flags & IO_URING_F_UNLOCKED) {
 
 
 
 88		struct io_ring_ctx *ctx = req->ctx;
 89
 90		spin_lock(&ctx->completion_lock);
 91		__io_put_kbuf_list(req, len, &ctx->io_buffers_comp);
 92		spin_unlock(&ctx->completion_lock);
 93	} else {
 94		lockdep_assert_held(&req->ctx->uring_lock);
 95
 96		__io_put_kbuf_list(req, len, &req->ctx->io_buffers_cache);
 97	}
 
 98}
 99
100static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
101					      struct io_buffer_list *bl)
102{
103	if (!list_empty(&bl->buf_list)) {
104		struct io_buffer *kbuf;
105
106		kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
107		list_del(&kbuf->list);
108		if (*len == 0 || *len > kbuf->len)
109			*len = kbuf->len;
110		if (list_empty(&bl->buf_list))
111			req->flags |= REQ_F_BL_EMPTY;
112		req->flags |= REQ_F_BUFFER_SELECTED;
113		req->kbuf = kbuf;
114		req->buf_index = kbuf->bid;
115		return u64_to_user_ptr(kbuf->addr);
116	}
117	return NULL;
118}
119
120static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
121				      struct io_buffer_list *bl,
122				      struct iovec *iov)
123{
124	void __user *buf;
125
126	buf = io_provided_buffer_select(req, len, bl);
127	if (unlikely(!buf))
128		return -ENOBUFS;
129
130	iov[0].iov_base = buf;
131	iov[0].iov_len = *len;
132	return 1;
133}
134
135static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
136					  struct io_buffer_list *bl,
137					  unsigned int issue_flags)
138{
139	struct io_uring_buf_ring *br = bl->buf_ring;
140	__u16 tail, head = bl->head;
141	struct io_uring_buf *buf;
142	void __user *ret;
143
144	tail = smp_load_acquire(&br->tail);
145	if (unlikely(tail == head))
146		return NULL;
147
148	if (head + 1 == tail)
149		req->flags |= REQ_F_BL_EMPTY;
150
151	buf = io_ring_head_to_buf(br, head, bl->mask);
 
 
 
 
 
 
152	if (*len == 0 || *len > buf->len)
153		*len = buf->len;
154	req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
155	req->buf_list = bl;
156	req->buf_index = buf->bid;
157	ret = u64_to_user_ptr(buf->addr);
158
159	if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
160		/*
161		 * If we came in unlocked, we have no choice but to consume the
162		 * buffer here, otherwise nothing ensures that the buffer won't
163		 * get used by others. This does mean it'll be pinned until the
164		 * IO completes, coming in unlocked means we're being called from
165		 * io-wq context and there may be further retries in async hybrid
166		 * mode. For the locked case, the caller must call commit when
167		 * the transfer completes (or if we get -EAGAIN and must poll of
168		 * retry).
169		 */
170		io_kbuf_commit(req, bl, *len, 1);
171		req->buf_list = NULL;
 
172	}
173	return ret;
174}
175
176void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
177			      unsigned int issue_flags)
178{
179	struct io_ring_ctx *ctx = req->ctx;
180	struct io_buffer_list *bl;
181	void __user *ret = NULL;
182
183	io_ring_submit_lock(req->ctx, issue_flags);
184
185	bl = io_buffer_get_list(ctx, req->buf_index);
186	if (likely(bl)) {
187		if (bl->flags & IOBL_BUF_RING)
188			ret = io_ring_buffer_select(req, len, bl, issue_flags);
189		else
190			ret = io_provided_buffer_select(req, len, bl);
191	}
192	io_ring_submit_unlock(req->ctx, issue_flags);
193	return ret;
194}
195
196/* cap it at a reasonable 256, will be one page even for 4K */
197#define PEEK_MAX_IMPORT		256
198
199static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
200				struct io_buffer_list *bl)
201{
202	struct io_uring_buf_ring *br = bl->buf_ring;
203	struct iovec *iov = arg->iovs;
204	int nr_iovs = arg->nr_iovs;
205	__u16 nr_avail, tail, head;
206	struct io_uring_buf *buf;
207
208	tail = smp_load_acquire(&br->tail);
209	head = bl->head;
210	nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
211	if (unlikely(!nr_avail))
212		return -ENOBUFS;
213
214	buf = io_ring_head_to_buf(br, head, bl->mask);
215	if (arg->max_len) {
216		u32 len = READ_ONCE(buf->len);
217
218		if (unlikely(!len))
219			return -ENOBUFS;
220		/*
221		 * Limit incremental buffers to 1 segment. No point trying
222		 * to peek ahead and map more than we need, when the buffers
223		 * themselves should be large when setup with
224		 * IOU_PBUF_RING_INC.
225		 */
226		if (bl->flags & IOBL_INC) {
227			nr_avail = 1;
228		} else {
229			size_t needed;
230
231			needed = (arg->max_len + len - 1) / len;
232			needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
233			if (nr_avail > needed)
234				nr_avail = needed;
235		}
236	}
237
238	/*
239	 * only alloc a bigger array if we know we have data to map, eg not
240	 * a speculative peek operation.
241	 */
242	if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
243		iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
244		if (unlikely(!iov))
245			return -ENOMEM;
246		if (arg->mode & KBUF_MODE_FREE)
247			kfree(arg->iovs);
248		arg->iovs = iov;
249		nr_iovs = nr_avail;
250	} else if (nr_avail < nr_iovs) {
251		nr_iovs = nr_avail;
252	}
253
254	/* set it to max, if not set, so we can use it unconditionally */
255	if (!arg->max_len)
256		arg->max_len = INT_MAX;
257
258	req->buf_index = buf->bid;
259	do {
260		u32 len = buf->len;
261
262		/* truncate end piece, if needed, for non partial buffers */
263		if (len > arg->max_len) {
264			len = arg->max_len;
265			if (!(bl->flags & IOBL_INC))
266				buf->len = len;
267		}
268
269		iov->iov_base = u64_to_user_ptr(buf->addr);
270		iov->iov_len = len;
271		iov++;
272
273		arg->out_len += len;
274		arg->max_len -= len;
275		if (!arg->max_len)
276			break;
277
278		buf = io_ring_head_to_buf(br, ++head, bl->mask);
279	} while (--nr_iovs);
280
281	if (head == tail)
282		req->flags |= REQ_F_BL_EMPTY;
283
284	req->flags |= REQ_F_BUFFER_RING;
285	req->buf_list = bl;
286	return iov - arg->iovs;
287}
288
289int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
290		      unsigned int issue_flags)
291{
292	struct io_ring_ctx *ctx = req->ctx;
293	struct io_buffer_list *bl;
294	int ret = -ENOENT;
295
296	io_ring_submit_lock(ctx, issue_flags);
297	bl = io_buffer_get_list(ctx, req->buf_index);
298	if (unlikely(!bl))
299		goto out_unlock;
300
301	if (bl->flags & IOBL_BUF_RING) {
302		ret = io_ring_buffers_peek(req, arg, bl);
303		/*
304		 * Don't recycle these buffers if we need to go through poll.
305		 * Nobody else can use them anyway, and holding on to provided
306		 * buffers for a send/write operation would happen on the app
307		 * side anyway with normal buffers. Besides, we already
308		 * committed them, they cannot be put back in the queue.
309		 */
310		if (ret > 0) {
311			req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
312			io_kbuf_commit(req, bl, arg->out_len, ret);
313		}
314	} else {
315		ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
316	}
317out_unlock:
318	io_ring_submit_unlock(ctx, issue_flags);
319	return ret;
320}
321
322int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
323{
324	struct io_ring_ctx *ctx = req->ctx;
325	struct io_buffer_list *bl;
326	int ret;
327
328	lockdep_assert_held(&ctx->uring_lock);
329
330	bl = io_buffer_get_list(ctx, req->buf_index);
331	if (unlikely(!bl))
332		return -ENOENT;
333
334	if (bl->flags & IOBL_BUF_RING) {
335		ret = io_ring_buffers_peek(req, arg, bl);
336		if (ret > 0)
337			req->flags |= REQ_F_BUFFERS_COMMIT;
338		return ret;
339	}
340
341	/* don't support multiple buffer selections for legacy */
342	return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
343}
344
345static int __io_remove_buffers(struct io_ring_ctx *ctx,
346			       struct io_buffer_list *bl, unsigned nbufs)
347{
348	unsigned i = 0;
349
350	/* shouldn't happen */
351	if (!nbufs)
352		return 0;
353
354	if (bl->flags & IOBL_BUF_RING) {
355		i = bl->buf_ring->tail - bl->head;
356		if (bl->buf_nr_pages) {
 
 
 
 
 
 
 
 
357			int j;
358
359			if (!(bl->flags & IOBL_MMAP)) {
360				for (j = 0; j < bl->buf_nr_pages; j++)
361					unpin_user_page(bl->buf_pages[j]);
362			}
363			io_pages_unmap(bl->buf_ring, &bl->buf_pages,
364					&bl->buf_nr_pages, bl->flags & IOBL_MMAP);
365			bl->flags &= ~IOBL_MMAP;
366		}
367		/* make sure it's seen as empty */
368		INIT_LIST_HEAD(&bl->buf_list);
369		bl->flags &= ~IOBL_BUF_RING;
370		return i;
371	}
372
373	/* protects io_buffers_cache */
374	lockdep_assert_held(&ctx->uring_lock);
375
376	while (!list_empty(&bl->buf_list)) {
377		struct io_buffer *nxt;
378
379		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
380		list_move(&nxt->list, &ctx->io_buffers_cache);
381		if (++i == nbufs)
382			return i;
383		cond_resched();
384	}
385
386	return i;
387}
388
389void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
390{
391	if (atomic_dec_and_test(&bl->refs)) {
392		__io_remove_buffers(ctx, bl, -1U);
393		kfree_rcu(bl, rcu);
394	}
395}
396
397void io_destroy_buffers(struct io_ring_ctx *ctx)
398{
399	struct io_buffer_list *bl;
400	struct list_head *item, *tmp;
401	struct io_buffer *buf;
402	unsigned long index;
 
 
 
 
 
 
 
403
404	xa_for_each(&ctx->io_bl_xa, index, bl) {
405		xa_erase(&ctx->io_bl_xa, bl->bgid);
406		io_put_bl(ctx, bl);
 
407	}
408
409	/*
410	 * Move deferred locked entries to cache before pruning
411	 */
412	spin_lock(&ctx->completion_lock);
413	if (!list_empty(&ctx->io_buffers_comp))
414		list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache);
415	spin_unlock(&ctx->completion_lock);
416
417	list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
418		buf = list_entry(item, struct io_buffer, list);
419		kmem_cache_free(io_buf_cachep, buf);
420	}
421}
422
423static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
424{
425	xa_erase(&ctx->io_bl_xa, bl->bgid);
426	io_put_bl(ctx, bl);
427}
428
429int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
430{
431	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
432	u64 tmp;
433
434	if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
435	    sqe->splice_fd_in)
436		return -EINVAL;
437
438	tmp = READ_ONCE(sqe->fd);
439	if (!tmp || tmp > MAX_BIDS_PER_BGID)
440		return -EINVAL;
441
442	memset(p, 0, sizeof(*p));
443	p->nbufs = tmp;
444	p->bgid = READ_ONCE(sqe->buf_group);
445	return 0;
446}
447
448int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
449{
450	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
451	struct io_ring_ctx *ctx = req->ctx;
452	struct io_buffer_list *bl;
453	int ret = 0;
454
455	io_ring_submit_lock(ctx, issue_flags);
456
457	ret = -ENOENT;
458	bl = io_buffer_get_list(ctx, p->bgid);
459	if (bl) {
460		ret = -EINVAL;
461		/* can't use provide/remove buffers command on mapped buffers */
462		if (!(bl->flags & IOBL_BUF_RING))
463			ret = __io_remove_buffers(ctx, bl, p->nbufs);
464	}
465	io_ring_submit_unlock(ctx, issue_flags);
466	if (ret < 0)
467		req_set_fail(req);
468	io_req_set_res(req, ret, 0);
469	return IOU_OK;
470}
471
472int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
473{
474	unsigned long size, tmp_check;
475	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
476	u64 tmp;
477
478	if (sqe->rw_flags || sqe->splice_fd_in)
479		return -EINVAL;
480
481	tmp = READ_ONCE(sqe->fd);
482	if (!tmp || tmp > MAX_BIDS_PER_BGID)
483		return -E2BIG;
484	p->nbufs = tmp;
485	p->addr = READ_ONCE(sqe->addr);
486	p->len = READ_ONCE(sqe->len);
487
488	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
489				&size))
490		return -EOVERFLOW;
491	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
492		return -EOVERFLOW;
493
494	size = (unsigned long)p->len * p->nbufs;
495	if (!access_ok(u64_to_user_ptr(p->addr), size))
496		return -EFAULT;
497
498	p->bgid = READ_ONCE(sqe->buf_group);
499	tmp = READ_ONCE(sqe->off);
500	if (tmp > USHRT_MAX)
501		return -E2BIG;
502	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
503		return -EINVAL;
504	p->bid = tmp;
505	return 0;
506}
507
508#define IO_BUFFER_ALLOC_BATCH 64
509
510static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
511{
512	struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
513	int allocated;
514
515	/*
516	 * Completions that don't happen inline (eg not under uring_lock) will
517	 * add to ->io_buffers_comp. If we don't have any free buffers, check
518	 * the completion list and splice those entries first.
519	 */
520	if (!list_empty_careful(&ctx->io_buffers_comp)) {
521		spin_lock(&ctx->completion_lock);
522		if (!list_empty(&ctx->io_buffers_comp)) {
523			list_splice_init(&ctx->io_buffers_comp,
524						&ctx->io_buffers_cache);
525			spin_unlock(&ctx->completion_lock);
526			return 0;
527		}
528		spin_unlock(&ctx->completion_lock);
529	}
530
531	/*
532	 * No free buffers and no completion entries either. Allocate a new
533	 * batch of buffer entries and add those to our freelist.
534	 */
535
536	allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
537					  ARRAY_SIZE(bufs), (void **) bufs);
538	if (unlikely(!allocated)) {
539		/*
540		 * Bulk alloc is all-or-nothing. If we fail to get a batch,
541		 * retry single alloc to be on the safe side.
542		 */
543		bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
544		if (!bufs[0])
545			return -ENOMEM;
546		allocated = 1;
547	}
548
549	while (allocated)
550		list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
551
552	return 0;
553}
554
555static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
556			  struct io_buffer_list *bl)
557{
558	struct io_buffer *buf;
559	u64 addr = pbuf->addr;
560	int i, bid = pbuf->bid;
561
562	for (i = 0; i < pbuf->nbufs; i++) {
563		if (list_empty(&ctx->io_buffers_cache) &&
564		    io_refill_buffer_cache(ctx))
565			break;
566		buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
567					list);
568		list_move_tail(&buf->list, &bl->buf_list);
569		buf->addr = addr;
570		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
571		buf->bid = bid;
572		buf->bgid = pbuf->bgid;
573		addr += pbuf->len;
574		bid++;
575		cond_resched();
576	}
577
578	return i ? 0 : -ENOMEM;
579}
580
581int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
582{
583	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
584	struct io_ring_ctx *ctx = req->ctx;
585	struct io_buffer_list *bl;
586	int ret = 0;
587
588	io_ring_submit_lock(ctx, issue_flags);
589
 
 
 
 
 
 
590	bl = io_buffer_get_list(ctx, p->bgid);
591	if (unlikely(!bl)) {
592		bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
593		if (!bl) {
594			ret = -ENOMEM;
595			goto err;
596		}
597		INIT_LIST_HEAD(&bl->buf_list);
598		ret = io_buffer_add_list(ctx, bl, p->bgid);
599		if (ret) {
600			/*
601			 * Doesn't need rcu free as it was never visible, but
602			 * let's keep it consistent throughout.
 
 
603			 */
604			kfree_rcu(bl, rcu);
 
 
 
605			goto err;
606		}
607	}
608	/* can't add buffers via this command for a mapped buffer ring */
609	if (bl->flags & IOBL_BUF_RING) {
610		ret = -EINVAL;
611		goto err;
612	}
613
614	ret = io_add_buffers(ctx, p, bl);
615err:
616	io_ring_submit_unlock(ctx, issue_flags);
617
618	if (ret < 0)
619		req_set_fail(req);
620	io_req_set_res(req, ret, 0);
621	return IOU_OK;
622}
623
624static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
625			    struct io_buffer_list *bl)
626{
627	struct io_uring_buf_ring *br = NULL;
628	struct page **pages;
629	int nr_pages, ret;
630
631	pages = io_pin_pages(reg->ring_addr,
632			     flex_array_size(br, bufs, reg->ring_entries),
633			     &nr_pages);
634	if (IS_ERR(pages))
635		return PTR_ERR(pages);
636
637	br = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
638	if (!br) {
639		ret = -ENOMEM;
640		goto error_unpin;
641	}
 
 
 
 
 
642
 
643#ifdef SHM_COLOUR
644	/*
645	 * On platforms that have specific aliasing requirements, SHM_COLOUR
646	 * is set and we must guarantee that the kernel and user side align
647	 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
648	 * the application mmap's the provided ring buffer. Fail the request
649	 * if we, by chance, don't end up with aligned addresses. The app
650	 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
651	 * this transparently.
652	 */
653	if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) {
654		ret = -EINVAL;
655		goto error_unpin;
656	}
657#endif
658	bl->buf_pages = pages;
659	bl->buf_nr_pages = nr_pages;
660	bl->buf_ring = br;
661	bl->flags |= IOBL_BUF_RING;
662	bl->flags &= ~IOBL_MMAP;
663	return 0;
664error_unpin:
665	unpin_user_pages(pages, nr_pages);
 
666	kvfree(pages);
667	vunmap(br);
668	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
669}
670
671static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
672			      struct io_uring_buf_reg *reg,
673			      struct io_buffer_list *bl)
674{
 
675	size_t ring_size;
 
676
677	ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
678
679	bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size);
680	if (IS_ERR(bl->buf_ring)) {
681		bl->buf_ring = NULL;
682		return -ENOMEM;
683	}
684
685	bl->flags |= (IOBL_BUF_RING | IOBL_MMAP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
686	return 0;
687}
688
689int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
690{
691	struct io_uring_buf_reg reg;
692	struct io_buffer_list *bl, *free_bl = NULL;
693	int ret;
694
695	lockdep_assert_held(&ctx->uring_lock);
696
697	if (copy_from_user(&reg, arg, sizeof(reg)))
698		return -EFAULT;
699
700	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
701		return -EINVAL;
702	if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
703		return -EINVAL;
704	if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
705		if (!reg.ring_addr)
706			return -EFAULT;
707		if (reg.ring_addr & ~PAGE_MASK)
708			return -EINVAL;
709	} else {
710		if (reg.ring_addr)
711			return -EINVAL;
712	}
713
714	if (!is_power_of_2(reg.ring_entries))
715		return -EINVAL;
716
717	/* cannot disambiguate full vs empty due to head/tail size */
718	if (reg.ring_entries >= 65536)
719		return -EINVAL;
720
 
 
 
 
 
 
721	bl = io_buffer_get_list(ctx, reg.bgid);
722	if (bl) {
723		/* if mapped buffer ring OR classic exists, don't allow */
724		if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
725			return -EEXIST;
726		io_destroy_bl(ctx, bl);
 
 
 
727	}
728
729	free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
730	if (!bl)
731		return -ENOMEM;
732
733	if (!(reg.flags & IOU_PBUF_RING_MMAP))
734		ret = io_pin_pbuf_ring(&reg, bl);
735	else
736		ret = io_alloc_pbuf_ring(ctx, &reg, bl);
737
738	if (!ret) {
739		bl->nr_entries = reg.ring_entries;
740		bl->mask = reg.ring_entries - 1;
741		if (reg.flags & IOU_PBUF_RING_INC)
742			bl->flags |= IOBL_INC;
743
744		io_buffer_add_list(ctx, bl, reg.bgid);
745		return 0;
746	}
747
748	kfree_rcu(free_bl, rcu);
749	return ret;
750}
751
752int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
753{
754	struct io_uring_buf_reg reg;
755	struct io_buffer_list *bl;
756
757	lockdep_assert_held(&ctx->uring_lock);
758
759	if (copy_from_user(&reg, arg, sizeof(reg)))
760		return -EFAULT;
761	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
762		return -EINVAL;
763	if (reg.flags)
764		return -EINVAL;
765
766	bl = io_buffer_get_list(ctx, reg.bgid);
767	if (!bl)
768		return -ENOENT;
769	if (!(bl->flags & IOBL_BUF_RING))
770		return -EINVAL;
771
772	xa_erase(&ctx->io_bl_xa, bl->bgid);
773	io_put_bl(ctx, bl);
 
 
 
774	return 0;
775}
776
777int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
778{
779	struct io_uring_buf_status buf_status;
780	struct io_buffer_list *bl;
781	int i;
782
783	if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
784		return -EFAULT;
785
786	for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++)
787		if (buf_status.resv[i])
788			return -EINVAL;
789
790	bl = io_buffer_get_list(ctx, buf_status.buf_group);
791	if (!bl)
792		return -ENOENT;
793	if (!(bl->flags & IOBL_BUF_RING))
794		return -EINVAL;
795
796	buf_status.head = bl->head;
797	if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
798		return -EFAULT;
799
800	return 0;
801}
802
803struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
804				      unsigned long bgid)
805{
806	struct io_buffer_list *bl;
807	bool ret;
808
 
 
 
 
809	/*
810	 * We have to be a bit careful here - we're inside mmap and cannot grab
811	 * the uring_lock. This means the buffer_list could be simultaneously
812	 * going away, if someone is trying to be sneaky. Look it up under rcu
813	 * so we know it's not going away, and attempt to grab a reference to
814	 * it. If the ref is already zero, then fail the mapping. If successful,
815	 * the caller will call io_put_bl() to drop the the reference at at the
816	 * end. This may then safely free the buffer_list (and drop the pages)
817	 * at that point, vm_insert_pages() would've already grabbed the
818	 * necessary vma references.
819	 */
820	rcu_read_lock();
821	bl = xa_load(&ctx->io_bl_xa, bgid);
822	/* must be a mmap'able buffer ring and have pages */
823	ret = false;
824	if (bl && bl->flags & IOBL_MMAP)
825		ret = atomic_inc_not_zero(&bl->refs);
826	rcu_read_unlock();
827
828	if (ret)
829		return bl;
830
831	return ERR_PTR(-EINVAL);
832}
833
834int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma)
835{
836	struct io_ring_ctx *ctx = file->private_data;
837	loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT;
838	struct io_buffer_list *bl;
839	int bgid, ret;
840
841	bgid = (pgoff & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
842	bl = io_pbuf_get_bl(ctx, bgid);
843	if (IS_ERR(bl))
844		return PTR_ERR(bl);
845
846	ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages);
847	io_put_bl(ctx, bl);
848	return ret;
849}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/errno.h>
  4#include <linux/fs.h>
  5#include <linux/file.h>
  6#include <linux/mm.h>
  7#include <linux/slab.h>
  8#include <linux/namei.h>
  9#include <linux/poll.h>
 
 10#include <linux/io_uring.h>
 11
 12#include <uapi/linux/io_uring.h>
 13
 14#include "io_uring.h"
 15#include "opdef.h"
 16#include "kbuf.h"
 17
 18#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
 19
 20#define BGID_ARRAY	64
 21
 22/* BIDs are addressed by a 16-bit field in a CQE */
 23#define MAX_BIDS_PER_BGID (1 << 16)
 24
 25struct kmem_cache *io_buf_cachep;
 26
 27struct io_provide_buf {
 28	struct file			*file;
 29	__u64				addr;
 30	__u32				len;
 31	__u32				bgid;
 32	__u32				nbufs;
 33	__u16				bid;
 34};
 35
 36struct io_buf_free {
 37	struct hlist_node		list;
 38	void				*mem;
 39	size_t				size;
 40	int				inuse;
 41};
 42
 43static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
 44						   struct io_buffer_list *bl,
 45						   unsigned int bgid)
 46{
 47	if (bl && bgid < BGID_ARRAY)
 48		return &bl[bgid];
 49
 50	return xa_load(&ctx->io_bl_xa, bgid);
 51}
 52
 53static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
 54							unsigned int bgid)
 55{
 56	lockdep_assert_held(&ctx->uring_lock);
 57
 58	return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
 59}
 60
 61static int io_buffer_add_list(struct io_ring_ctx *ctx,
 62			      struct io_buffer_list *bl, unsigned int bgid)
 63{
 64	/*
 65	 * Store buffer group ID and finally mark the list as visible.
 66	 * The normal lookup doesn't care about the visibility as we're
 67	 * always under the ->uring_lock, but the RCU lookup from mmap does.
 68	 */
 69	bl->bgid = bgid;
 70	smp_store_release(&bl->is_ready, 1);
 71
 72	if (bgid < BGID_ARRAY)
 73		return 0;
 74
 75	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
 76}
 77
 78bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
 79{
 80	struct io_ring_ctx *ctx = req->ctx;
 81	struct io_buffer_list *bl;
 82	struct io_buffer *buf;
 83
 84	/*
 85	 * For legacy provided buffer mode, don't recycle if we already did
 86	 * IO to this buffer. For ring-mapped provided buffer mode, we should
 87	 * increment ring->head to explicitly monopolize the buffer to avoid
 88	 * multiple use.
 89	 */
 90	if (req->flags & REQ_F_PARTIAL_IO)
 91		return false;
 92
 93	io_ring_submit_lock(ctx, issue_flags);
 94
 95	buf = req->kbuf;
 96	bl = io_buffer_get_list(ctx, buf->bgid);
 97	list_add(&buf->list, &bl->buf_list);
 98	req->flags &= ~REQ_F_BUFFER_SELECTED;
 99	req->buf_index = buf->bgid;
100
101	io_ring_submit_unlock(ctx, issue_flags);
102	return true;
103}
104
105unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
106{
107	unsigned int cflags;
108
109	/*
110	 * We can add this buffer back to two lists:
111	 *
112	 * 1) The io_buffers_cache list. This one is protected by the
113	 *    ctx->uring_lock. If we already hold this lock, add back to this
114	 *    list as we can grab it from issue as well.
115	 * 2) The io_buffers_comp list. This one is protected by the
116	 *    ctx->completion_lock.
117	 *
118	 * We migrate buffers from the comp_list to the issue cache list
119	 * when we need one.
120	 */
121	if (req->flags & REQ_F_BUFFER_RING) {
122		/* no buffers to recycle for this case */
123		cflags = __io_put_kbuf_list(req, NULL);
124	} else if (issue_flags & IO_URING_F_UNLOCKED) {
125		struct io_ring_ctx *ctx = req->ctx;
126
127		spin_lock(&ctx->completion_lock);
128		cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
129		spin_unlock(&ctx->completion_lock);
130	} else {
131		lockdep_assert_held(&req->ctx->uring_lock);
132
133		cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
134	}
135	return cflags;
136}
137
138static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
139					      struct io_buffer_list *bl)
140{
141	if (!list_empty(&bl->buf_list)) {
142		struct io_buffer *kbuf;
143
144		kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
145		list_del(&kbuf->list);
146		if (*len == 0 || *len > kbuf->len)
147			*len = kbuf->len;
 
 
148		req->flags |= REQ_F_BUFFER_SELECTED;
149		req->kbuf = kbuf;
150		req->buf_index = kbuf->bid;
151		return u64_to_user_ptr(kbuf->addr);
152	}
153	return NULL;
154}
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
157					  struct io_buffer_list *bl,
158					  unsigned int issue_flags)
159{
160	struct io_uring_buf_ring *br = bl->buf_ring;
 
161	struct io_uring_buf *buf;
162	__u16 head = bl->head;
163
164	if (unlikely(smp_load_acquire(&br->tail) == head))
 
165		return NULL;
166
167	head &= bl->mask;
168	/* mmaped buffers are always contig */
169	if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) {
170		buf = &br->bufs[head];
171	} else {
172		int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
173		int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
174		buf = page_address(bl->buf_pages[index]);
175		buf += off;
176	}
177	if (*len == 0 || *len > buf->len)
178		*len = buf->len;
179	req->flags |= REQ_F_BUFFER_RING;
180	req->buf_list = bl;
181	req->buf_index = buf->bid;
 
182
183	if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
184		/*
185		 * If we came in unlocked, we have no choice but to consume the
186		 * buffer here, otherwise nothing ensures that the buffer won't
187		 * get used by others. This does mean it'll be pinned until the
188		 * IO completes, coming in unlocked means we're being called from
189		 * io-wq context and there may be further retries in async hybrid
190		 * mode. For the locked case, the caller must call commit when
191		 * the transfer completes (or if we get -EAGAIN and must poll of
192		 * retry).
193		 */
 
194		req->buf_list = NULL;
195		bl->head++;
196	}
197	return u64_to_user_ptr(buf->addr);
198}
199
200void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
201			      unsigned int issue_flags)
202{
203	struct io_ring_ctx *ctx = req->ctx;
204	struct io_buffer_list *bl;
205	void __user *ret = NULL;
206
207	io_ring_submit_lock(req->ctx, issue_flags);
208
209	bl = io_buffer_get_list(ctx, req->buf_index);
210	if (likely(bl)) {
211		if (bl->is_mapped)
212			ret = io_ring_buffer_select(req, len, bl, issue_flags);
213		else
214			ret = io_provided_buffer_select(req, len, bl);
215	}
216	io_ring_submit_unlock(req->ctx, issue_flags);
217	return ret;
218}
219
220static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
 
 
 
 
221{
222	struct io_buffer_list *bl;
223	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
224
225	bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
226	if (!bl)
227		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
229	for (i = 0; i < BGID_ARRAY; i++) {
230		INIT_LIST_HEAD(&bl[i].buf_list);
231		bl[i].bgid = i;
 
 
 
 
 
 
 
 
 
 
 
232	}
233
234	smp_store_release(&ctx->io_bl, bl);
235	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236}
237
238/*
239 * Mark the given mapped range as free for reuse
240 */
241static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
242{
243	struct io_buf_free *ibf;
244
245	hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
246		if (bl->buf_ring == ibf->mem) {
247			ibf->inuse = 0;
248			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
249		}
 
 
250	}
 
 
 
 
251
252	/* can't happen... */
253	WARN_ON_ONCE(1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254}
255
256static int __io_remove_buffers(struct io_ring_ctx *ctx,
257			       struct io_buffer_list *bl, unsigned nbufs)
258{
259	unsigned i = 0;
260
261	/* shouldn't happen */
262	if (!nbufs)
263		return 0;
264
265	if (bl->is_mapped) {
266		i = bl->buf_ring->tail - bl->head;
267		if (bl->is_mmap) {
268			/*
269			 * io_kbuf_list_free() will free the page(s) at
270			 * ->release() time.
271			 */
272			io_kbuf_mark_free(ctx, bl);
273			bl->buf_ring = NULL;
274			bl->is_mmap = 0;
275		} else if (bl->buf_nr_pages) {
276			int j;
277
278			for (j = 0; j < bl->buf_nr_pages; j++)
279				unpin_user_page(bl->buf_pages[j]);
280			kvfree(bl->buf_pages);
281			bl->buf_pages = NULL;
282			bl->buf_nr_pages = 0;
 
 
283		}
284		/* make sure it's seen as empty */
285		INIT_LIST_HEAD(&bl->buf_list);
286		bl->is_mapped = 0;
287		return i;
288	}
289
290	/* protects io_buffers_cache */
291	lockdep_assert_held(&ctx->uring_lock);
292
293	while (!list_empty(&bl->buf_list)) {
294		struct io_buffer *nxt;
295
296		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
297		list_move(&nxt->list, &ctx->io_buffers_cache);
298		if (++i == nbufs)
299			return i;
300		cond_resched();
301	}
302
303	return i;
304}
305
 
 
 
 
 
 
 
 
306void io_destroy_buffers(struct io_ring_ctx *ctx)
307{
308	struct io_buffer_list *bl;
309	struct list_head *item, *tmp;
310	struct io_buffer *buf;
311	unsigned long index;
312	int i;
313
314	for (i = 0; i < BGID_ARRAY; i++) {
315		if (!ctx->io_bl)
316			break;
317		__io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
318	}
319
320	xa_for_each(&ctx->io_bl_xa, index, bl) {
321		xa_erase(&ctx->io_bl_xa, bl->bgid);
322		__io_remove_buffers(ctx, bl, -1U);
323		kfree_rcu(bl, rcu);
324	}
325
326	/*
327	 * Move deferred locked entries to cache before pruning
328	 */
329	spin_lock(&ctx->completion_lock);
330	if (!list_empty(&ctx->io_buffers_comp))
331		list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache);
332	spin_unlock(&ctx->completion_lock);
333
334	list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
335		buf = list_entry(item, struct io_buffer, list);
336		kmem_cache_free(io_buf_cachep, buf);
337	}
338}
339
 
 
 
 
 
 
340int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
341{
342	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
343	u64 tmp;
344
345	if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
346	    sqe->splice_fd_in)
347		return -EINVAL;
348
349	tmp = READ_ONCE(sqe->fd);
350	if (!tmp || tmp > MAX_BIDS_PER_BGID)
351		return -EINVAL;
352
353	memset(p, 0, sizeof(*p));
354	p->nbufs = tmp;
355	p->bgid = READ_ONCE(sqe->buf_group);
356	return 0;
357}
358
359int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
360{
361	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
362	struct io_ring_ctx *ctx = req->ctx;
363	struct io_buffer_list *bl;
364	int ret = 0;
365
366	io_ring_submit_lock(ctx, issue_flags);
367
368	ret = -ENOENT;
369	bl = io_buffer_get_list(ctx, p->bgid);
370	if (bl) {
371		ret = -EINVAL;
372		/* can't use provide/remove buffers command on mapped buffers */
373		if (!bl->is_mapped)
374			ret = __io_remove_buffers(ctx, bl, p->nbufs);
375	}
376	io_ring_submit_unlock(ctx, issue_flags);
377	if (ret < 0)
378		req_set_fail(req);
379	io_req_set_res(req, ret, 0);
380	return IOU_OK;
381}
382
383int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
384{
385	unsigned long size, tmp_check;
386	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
387	u64 tmp;
388
389	if (sqe->rw_flags || sqe->splice_fd_in)
390		return -EINVAL;
391
392	tmp = READ_ONCE(sqe->fd);
393	if (!tmp || tmp > MAX_BIDS_PER_BGID)
394		return -E2BIG;
395	p->nbufs = tmp;
396	p->addr = READ_ONCE(sqe->addr);
397	p->len = READ_ONCE(sqe->len);
398
399	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
400				&size))
401		return -EOVERFLOW;
402	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
403		return -EOVERFLOW;
404
405	size = (unsigned long)p->len * p->nbufs;
406	if (!access_ok(u64_to_user_ptr(p->addr), size))
407		return -EFAULT;
408
409	p->bgid = READ_ONCE(sqe->buf_group);
410	tmp = READ_ONCE(sqe->off);
411	if (tmp > USHRT_MAX)
412		return -E2BIG;
413	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
414		return -EINVAL;
415	p->bid = tmp;
416	return 0;
417}
418
419#define IO_BUFFER_ALLOC_BATCH 64
420
421static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
422{
423	struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
424	int allocated;
425
426	/*
427	 * Completions that don't happen inline (eg not under uring_lock) will
428	 * add to ->io_buffers_comp. If we don't have any free buffers, check
429	 * the completion list and splice those entries first.
430	 */
431	if (!list_empty_careful(&ctx->io_buffers_comp)) {
432		spin_lock(&ctx->completion_lock);
433		if (!list_empty(&ctx->io_buffers_comp)) {
434			list_splice_init(&ctx->io_buffers_comp,
435						&ctx->io_buffers_cache);
436			spin_unlock(&ctx->completion_lock);
437			return 0;
438		}
439		spin_unlock(&ctx->completion_lock);
440	}
441
442	/*
443	 * No free buffers and no completion entries either. Allocate a new
444	 * batch of buffer entries and add those to our freelist.
445	 */
446
447	allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
448					  ARRAY_SIZE(bufs), (void **) bufs);
449	if (unlikely(!allocated)) {
450		/*
451		 * Bulk alloc is all-or-nothing. If we fail to get a batch,
452		 * retry single alloc to be on the safe side.
453		 */
454		bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
455		if (!bufs[0])
456			return -ENOMEM;
457		allocated = 1;
458	}
459
460	while (allocated)
461		list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
462
463	return 0;
464}
465
466static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
467			  struct io_buffer_list *bl)
468{
469	struct io_buffer *buf;
470	u64 addr = pbuf->addr;
471	int i, bid = pbuf->bid;
472
473	for (i = 0; i < pbuf->nbufs; i++) {
474		if (list_empty(&ctx->io_buffers_cache) &&
475		    io_refill_buffer_cache(ctx))
476			break;
477		buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
478					list);
479		list_move_tail(&buf->list, &bl->buf_list);
480		buf->addr = addr;
481		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
482		buf->bid = bid;
483		buf->bgid = pbuf->bgid;
484		addr += pbuf->len;
485		bid++;
486		cond_resched();
487	}
488
489	return i ? 0 : -ENOMEM;
490}
491
492int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
493{
494	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
495	struct io_ring_ctx *ctx = req->ctx;
496	struct io_buffer_list *bl;
497	int ret = 0;
498
499	io_ring_submit_lock(ctx, issue_flags);
500
501	if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
502		ret = io_init_bl_list(ctx);
503		if (ret)
504			goto err;
505	}
506
507	bl = io_buffer_get_list(ctx, p->bgid);
508	if (unlikely(!bl)) {
509		bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
510		if (!bl) {
511			ret = -ENOMEM;
512			goto err;
513		}
514		INIT_LIST_HEAD(&bl->buf_list);
515		ret = io_buffer_add_list(ctx, bl, p->bgid);
516		if (ret) {
517			/*
518			 * Doesn't need rcu free as it was never visible, but
519			 * let's keep it consistent throughout. Also can't
520			 * be a lower indexed array group, as adding one
521			 * where lookup failed cannot happen.
522			 */
523			if (p->bgid >= BGID_ARRAY)
524				kfree_rcu(bl, rcu);
525			else
526				WARN_ON_ONCE(1);
527			goto err;
528		}
529	}
530	/* can't add buffers via this command for a mapped buffer ring */
531	if (bl->is_mapped) {
532		ret = -EINVAL;
533		goto err;
534	}
535
536	ret = io_add_buffers(ctx, p, bl);
537err:
538	io_ring_submit_unlock(ctx, issue_flags);
539
540	if (ret < 0)
541		req_set_fail(req);
542	io_req_set_res(req, ret, 0);
543	return IOU_OK;
544}
545
546static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
547			    struct io_buffer_list *bl)
548{
549	struct io_uring_buf_ring *br;
550	struct page **pages;
551	int i, nr_pages;
552
553	pages = io_pin_pages(reg->ring_addr,
554			     flex_array_size(br, bufs, reg->ring_entries),
555			     &nr_pages);
556	if (IS_ERR(pages))
557		return PTR_ERR(pages);
558
559	/*
560	 * Apparently some 32-bit boxes (ARM) will return highmem pages,
561	 * which then need to be mapped. We could support that, but it'd
562	 * complicate the code and slowdown the common cases quite a bit.
563	 * So just error out, returning -EINVAL just like we did on kernels
564	 * that didn't support mapped buffer rings.
565	 */
566	for (i = 0; i < nr_pages; i++)
567		if (PageHighMem(pages[i]))
568			goto error_unpin;
569
570	br = page_address(pages[0]);
571#ifdef SHM_COLOUR
572	/*
573	 * On platforms that have specific aliasing requirements, SHM_COLOUR
574	 * is set and we must guarantee that the kernel and user side align
575	 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
576	 * the application mmap's the provided ring buffer. Fail the request
577	 * if we, by chance, don't end up with aligned addresses. The app
578	 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
579	 * this transparently.
580	 */
581	if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1))
 
582		goto error_unpin;
 
583#endif
584	bl->buf_pages = pages;
585	bl->buf_nr_pages = nr_pages;
586	bl->buf_ring = br;
587	bl->is_mapped = 1;
588	bl->is_mmap = 0;
589	return 0;
590error_unpin:
591	for (i = 0; i < nr_pages; i++)
592		unpin_user_page(pages[i]);
593	kvfree(pages);
594	return -EINVAL;
595}
596
597/*
598 * See if we have a suitable region that we can reuse, rather than allocate
599 * both a new io_buf_free and mem region again. We leave it on the list as
600 * even a reused entry will need freeing at ring release.
601 */
602static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx,
603						    size_t ring_size)
604{
605	struct io_buf_free *ibf, *best = NULL;
606	size_t best_dist;
607
608	hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
609		size_t dist;
610
611		if (ibf->inuse || ibf->size < ring_size)
612			continue;
613		dist = ibf->size - ring_size;
614		if (!best || dist < best_dist) {
615			best = ibf;
616			if (!dist)
617				break;
618			best_dist = dist;
619		}
620	}
621
622	return best;
623}
624
625static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
626			      struct io_uring_buf_reg *reg,
627			      struct io_buffer_list *bl)
628{
629	struct io_buf_free *ibf;
630	size_t ring_size;
631	void *ptr;
632
633	ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
634
635	/* Reuse existing entry, if we can */
636	ibf = io_lookup_buf_free_entry(ctx, ring_size);
637	if (!ibf) {
638		ptr = io_mem_alloc(ring_size);
639		if (IS_ERR(ptr))
640			return PTR_ERR(ptr);
641
642		/* Allocate and store deferred free entry */
643		ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
644		if (!ibf) {
645			io_mem_free(ptr);
646			return -ENOMEM;
647		}
648		ibf->mem = ptr;
649		ibf->size = ring_size;
650		hlist_add_head(&ibf->list, &ctx->io_buf_list);
651	}
652	ibf->inuse = 1;
653	bl->buf_ring = ibf->mem;
654	bl->is_mapped = 1;
655	bl->is_mmap = 1;
656	return 0;
657}
658
659int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
660{
661	struct io_uring_buf_reg reg;
662	struct io_buffer_list *bl, *free_bl = NULL;
663	int ret;
664
665	lockdep_assert_held(&ctx->uring_lock);
666
667	if (copy_from_user(&reg, arg, sizeof(reg)))
668		return -EFAULT;
669
670	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
671		return -EINVAL;
672	if (reg.flags & ~IOU_PBUF_RING_MMAP)
673		return -EINVAL;
674	if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
675		if (!reg.ring_addr)
676			return -EFAULT;
677		if (reg.ring_addr & ~PAGE_MASK)
678			return -EINVAL;
679	} else {
680		if (reg.ring_addr)
681			return -EINVAL;
682	}
683
684	if (!is_power_of_2(reg.ring_entries))
685		return -EINVAL;
686
687	/* cannot disambiguate full vs empty due to head/tail size */
688	if (reg.ring_entries >= 65536)
689		return -EINVAL;
690
691	if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
692		int ret = io_init_bl_list(ctx);
693		if (ret)
694			return ret;
695	}
696
697	bl = io_buffer_get_list(ctx, reg.bgid);
698	if (bl) {
699		/* if mapped buffer ring OR classic exists, don't allow */
700		if (bl->is_mapped || !list_empty(&bl->buf_list))
701			return -EEXIST;
702	} else {
703		free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
704		if (!bl)
705			return -ENOMEM;
706	}
707
 
 
 
 
708	if (!(reg.flags & IOU_PBUF_RING_MMAP))
709		ret = io_pin_pbuf_ring(&reg, bl);
710	else
711		ret = io_alloc_pbuf_ring(ctx, &reg, bl);
712
713	if (!ret) {
714		bl->nr_entries = reg.ring_entries;
715		bl->mask = reg.ring_entries - 1;
 
 
716
717		io_buffer_add_list(ctx, bl, reg.bgid);
718		return 0;
719	}
720
721	kfree_rcu(free_bl, rcu);
722	return ret;
723}
724
725int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
726{
727	struct io_uring_buf_reg reg;
728	struct io_buffer_list *bl;
729
730	lockdep_assert_held(&ctx->uring_lock);
731
732	if (copy_from_user(&reg, arg, sizeof(reg)))
733		return -EFAULT;
734	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
735		return -EINVAL;
736	if (reg.flags)
737		return -EINVAL;
738
739	bl = io_buffer_get_list(ctx, reg.bgid);
740	if (!bl)
741		return -ENOENT;
742	if (!bl->is_mapped)
743		return -EINVAL;
744
745	__io_remove_buffers(ctx, bl, -1U);
746	if (bl->bgid >= BGID_ARRAY) {
747		xa_erase(&ctx->io_bl_xa, bl->bgid);
748		kfree_rcu(bl, rcu);
749	}
750	return 0;
751}
752
753int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
754{
755	struct io_uring_buf_status buf_status;
756	struct io_buffer_list *bl;
757	int i;
758
759	if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
760		return -EFAULT;
761
762	for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++)
763		if (buf_status.resv[i])
764			return -EINVAL;
765
766	bl = io_buffer_get_list(ctx, buf_status.buf_group);
767	if (!bl)
768		return -ENOENT;
769	if (!bl->is_mapped)
770		return -EINVAL;
771
772	buf_status.head = bl->head;
773	if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
774		return -EFAULT;
775
776	return 0;
777}
778
779void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
 
780{
781	struct io_buffer_list *bl;
 
782
783	bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
784
785	if (!bl || !bl->is_mmap)
786		return NULL;
787	/*
788	 * Ensure the list is fully setup. Only strictly needed for RCU lookup
789	 * via mmap, and in that case only for the array indexed groups. For
790	 * the xarray lookups, it's either visible and ready, or not at all.
 
 
 
 
 
 
791	 */
792	if (!smp_load_acquire(&bl->is_ready))
793		return NULL;
 
 
 
 
 
 
 
 
794
795	return bl->buf_ring;
796}
797
798/*
799 * Called at or after ->release(), free the mmap'ed buffers that we used
800 * for memory mapped provided buffer rings.
801 */
802void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx)
803{
804	struct io_buf_free *ibf;
805	struct hlist_node *tmp;
806
807	hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) {
808		hlist_del(&ibf->list);
809		io_mem_free(ibf->mem);
810		kfree(ibf);
811	}
 
812}