Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#ifndef IOU_KBUF_H
  3#define IOU_KBUF_H
  4
  5#include <uapi/linux/io_uring.h>
  6
  7enum {
  8	/* ring mapped provided buffers */
  9	IOBL_BUF_RING	= 1,
 10	/* ring mapped provided buffers, but mmap'ed by application */
 11	IOBL_MMAP	= 2,
 12	/* buffers are consumed incrementally rather than always fully */
 13	IOBL_INC	= 4,
 14
 15};
 16
 17struct io_buffer_list {
 18	/*
 19	 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
 20	 * then these are classic provided buffers and ->buf_list is used.
 21	 */
 22	union {
 23		struct list_head buf_list;
 24		struct {
 25			struct page **buf_pages;
 26			struct io_uring_buf_ring *buf_ring;
 27		};
 28		struct rcu_head rcu;
 29	};
 30	__u16 bgid;
 31
 32	/* below is for ring provided buffers */
 33	__u16 buf_nr_pages;
 34	__u16 nr_entries;
 35	__u16 head;
 36	__u16 mask;
 37
 38	__u16 flags;
 39
 40	atomic_t refs;
 
 
 
 41};
 42
 43struct io_buffer {
 44	struct list_head list;
 45	__u64 addr;
 46	__u32 len;
 47	__u16 bid;
 48	__u16 bgid;
 49};
 50
 51enum {
 52	/* can alloc a bigger vec */
 53	KBUF_MODE_EXPAND	= 1,
 54	/* if bigger vec allocated, free old one */
 55	KBUF_MODE_FREE		= 2,
 56};
 57
 58struct buf_sel_arg {
 59	struct iovec *iovs;
 60	size_t out_len;
 61	size_t max_len;
 62	unsigned short nr_iovs;
 63	unsigned short mode;
 64};
 65
 66void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
 67			      unsigned int issue_flags);
 68int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
 69		      unsigned int issue_flags);
 70int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg);
 71void io_destroy_buffers(struct io_ring_ctx *ctx);
 72
 73int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 74int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
 75
 76int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 77int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
 78
 79int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 80int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 81int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
 82
 83void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
 
 
 84
 85bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
 86
 87void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
 88struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
 89				      unsigned long bgid);
 90int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma);
 91
 92static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
 93{
 94	/*
 95	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
 96	 * the flag and hence ensure that bl->head doesn't get incremented.
 97	 * If the tail has already been incremented, hang on to it.
 98	 * The exception is partial io, that case we should increment bl->head
 99	 * to monopolize the buffer.
100	 */
101	if (req->buf_list) {
102		req->buf_index = req->buf_list->bgid;
103		req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
104		return true;
 
 
 
 
 
 
 
 
 
 
 
 
105	}
106	return false;
107}
108
109static inline bool io_do_buffer_select(struct io_kiocb *req)
110{
111	if (!(req->flags & REQ_F_BUFFER_SELECT))
112		return false;
113	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
114}
115
116static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
117{
118	if (req->flags & REQ_F_BL_NO_RECYCLE)
119		return false;
120	if (req->flags & REQ_F_BUFFER_SELECTED)
121		return io_kbuf_recycle_legacy(req, issue_flags);
122	if (req->flags & REQ_F_BUFFER_RING)
123		return io_kbuf_recycle_ring(req);
124	return false;
125}
126
127/* Mapped buffer ring, return io_uring_buf from head */
128#define io_ring_head_to_buf(br, head, mask)	&(br)->bufs[(head) & (mask)]
129
130static inline bool io_kbuf_commit(struct io_kiocb *req,
131				  struct io_buffer_list *bl, int len, int nr)
132{
133	if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
134		return true;
135
136	req->flags &= ~REQ_F_BUFFERS_COMMIT;
137
138	if (unlikely(len < 0))
139		return true;
140
141	if (bl->flags & IOBL_INC) {
142		struct io_uring_buf *buf;
143
144		buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
145		if (WARN_ON_ONCE(len > buf->len))
146			len = buf->len;
147		buf->len -= len;
148		if (buf->len) {
149			buf->addr += len;
150			return false;
151		}
152	}
153
154	bl->head += nr;
155	return true;
156}
157
158static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
159{
160	struct io_buffer_list *bl = req->buf_list;
161	bool ret = true;
162
163	if (bl) {
164		ret = io_kbuf_commit(req, bl, len, nr);
165		req->buf_index = bl->bgid;
166	}
167	req->flags &= ~REQ_F_BUFFER_RING;
168	return ret;
169}
170
171static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
172				      struct list_head *list)
173{
174	if (req->flags & REQ_F_BUFFER_RING) {
175		__io_put_kbuf_ring(req, len, 1);
 
 
 
 
176	} else {
177		req->buf_index = req->kbuf->bgid;
178		list_add(&req->kbuf->list, list);
179		req->flags &= ~REQ_F_BUFFER_SELECTED;
180	}
 
 
181}
182
183static inline void io_kbuf_drop(struct io_kiocb *req)
184{
185	lockdep_assert_held(&req->ctx->completion_lock);
186
187	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
188		return;
189
190	/* len == 0 is fine here, non-ring will always drop all of it */
191	__io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
192}
193
194static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,
195					  int nbufs, unsigned issue_flags)
196{
197	unsigned int ret;
198
199	if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
200		return 0;
201
202	ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
203	if (req->flags & REQ_F_BUFFER_RING) {
204		if (!__io_put_kbuf_ring(req, len, nbufs))
205			ret |= IORING_CQE_F_BUF_MORE;
206	} else {
207		__io_put_kbuf(req, len, issue_flags);
208	}
209	return ret;
210}
211
212static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
213				       unsigned issue_flags)
214{
215	return __io_put_kbufs(req, len, 1, issue_flags);
216}
217
218static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
219					int nbufs, unsigned issue_flags)
220{
221	return __io_put_kbufs(req, len, nbufs, issue_flags);
222}
223#endif
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#ifndef IOU_KBUF_H
  3#define IOU_KBUF_H
  4
  5#include <uapi/linux/io_uring.h>
  6
 
 
 
 
 
 
 
 
 
 
  7struct io_buffer_list {
  8	/*
  9	 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
 10	 * then these are classic provided buffers and ->buf_list is used.
 11	 */
 12	union {
 13		struct list_head buf_list;
 14		struct {
 15			struct page **buf_pages;
 16			struct io_uring_buf_ring *buf_ring;
 17		};
 18		struct rcu_head rcu;
 19	};
 20	__u16 bgid;
 21
 22	/* below is for ring provided buffers */
 23	__u16 buf_nr_pages;
 24	__u16 nr_entries;
 25	__u16 head;
 26	__u16 mask;
 27
 28	/* ring mapped provided buffers */
 29	__u8 is_mapped;
 30	/* ring mapped provided buffers, but mmap'ed by application */
 31	__u8 is_mmap;
 32	/* bl is visible from an RCU point of view for lookup */
 33	__u8 is_ready;
 34};
 35
 36struct io_buffer {
 37	struct list_head list;
 38	__u64 addr;
 39	__u32 len;
 40	__u16 bid;
 41	__u16 bgid;
 42};
 43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
 45			      unsigned int issue_flags);
 
 
 
 46void io_destroy_buffers(struct io_ring_ctx *ctx);
 47
 48int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 49int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
 50
 51int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 52int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
 53
 54int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 55int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 56int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
 57
 58void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
 59
 60unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
 61
 62bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
 63
 64void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
 
 
 
 65
 66static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
 67{
 68	/*
 69	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
 70	 * the flag and hence ensure that bl->head doesn't get incremented.
 71	 * If the tail has already been incremented, hang on to it.
 72	 * The exception is partial io, that case we should increment bl->head
 73	 * to monopolize the buffer.
 74	 */
 75	if (req->buf_list) {
 76		if (req->flags & REQ_F_PARTIAL_IO) {
 77			/*
 78			 * If we end up here, then the io_uring_lock has
 79			 * been kept held since we retrieved the buffer.
 80			 * For the io-wq case, we already cleared
 81			 * req->buf_list when the buffer was retrieved,
 82			 * hence it cannot be set here for that case.
 83			 */
 84			req->buf_list->head++;
 85			req->buf_list = NULL;
 86		} else {
 87			req->buf_index = req->buf_list->bgid;
 88			req->flags &= ~REQ_F_BUFFER_RING;
 89			return true;
 90		}
 91	}
 92	return false;
 93}
 94
 95static inline bool io_do_buffer_select(struct io_kiocb *req)
 96{
 97	if (!(req->flags & REQ_F_BUFFER_SELECT))
 98		return false;
 99	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
100}
101
102static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
103{
 
 
104	if (req->flags & REQ_F_BUFFER_SELECTED)
105		return io_kbuf_recycle_legacy(req, issue_flags);
106	if (req->flags & REQ_F_BUFFER_RING)
107		return io_kbuf_recycle_ring(req);
108	return false;
109}
110
111static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
112					      struct list_head *list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113{
114	unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
 
115
 
 
 
 
 
 
 
 
 
 
 
116	if (req->flags & REQ_F_BUFFER_RING) {
117		if (req->buf_list) {
118			req->buf_index = req->buf_list->bgid;
119			req->buf_list->head++;
120		}
121		req->flags &= ~REQ_F_BUFFER_RING;
122	} else {
123		req->buf_index = req->kbuf->bgid;
124		list_add(&req->kbuf->list, list);
125		req->flags &= ~REQ_F_BUFFER_SELECTED;
126	}
127
128	return ret;
129}
130
131static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
132{
133	lockdep_assert_held(&req->ctx->completion_lock);
134
135	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
 
 
 
 
 
 
 
 
 
 
 
 
136		return 0;
137	return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
 
 
 
 
 
 
 
 
138}
139
140static inline unsigned int io_put_kbuf(struct io_kiocb *req,
141				       unsigned issue_flags)
142{
 
 
143
144	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
145		return 0;
146	return __io_put_kbuf(req, issue_flags);
 
147}
148#endif