Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#ifndef IOU_KBUF_H
  3#define IOU_KBUF_H
  4
  5#include <uapi/linux/io_uring.h>
  6
  7struct io_buffer_list {
  8	/*
  9	 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
 10	 * then these are classic provided buffers and ->buf_list is used.
 11	 */
 12	union {
 13		struct list_head buf_list;
 14		struct {
 15			struct page **buf_pages;
 16			struct io_uring_buf_ring *buf_ring;
 17		};
 
 18	};
 19	__u16 bgid;
 20
 21	/* below is for ring provided buffers */
 22	__u16 buf_nr_pages;
 23	__u16 nr_entries;
 24	__u16 head;
 25	__u16 mask;
 
 
 
 
 
 
 
 26};
 27
 28struct io_buffer {
 29	struct list_head list;
 30	__u64 addr;
 31	__u32 len;
 32	__u16 bid;
 33	__u16 bgid;
 34};
 35
 36void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
 37			      unsigned int issue_flags);
 38void io_destroy_buffers(struct io_ring_ctx *ctx);
 39
 40int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 41int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
 42
 43int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 44int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
 45
 46int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 47int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 
 
 
 48
 49unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
 50
 51void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
 52
 53static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
 
 
 
 
 54{
 55	/*
 56	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
 57	 * the flag and hence ensure that bl->head doesn't get incremented.
 58	 * If the tail has already been incremented, hang on to it.
 59	 * The exception is partial io, that case we should increment bl->head
 60	 * to monopolize the buffer.
 61	 */
 62	if (req->buf_list) {
 63		if (req->flags & REQ_F_PARTIAL_IO) {
 64			/*
 65			 * If we end up here, then the io_uring_lock has
 66			 * been kept held since we retrieved the buffer.
 67			 * For the io-wq case, we already cleared
 68			 * req->buf_list when the buffer was retrieved,
 69			 * hence it cannot be set here for that case.
 70			 */
 71			req->buf_list->head++;
 72			req->buf_list = NULL;
 73		} else {
 74			req->buf_index = req->buf_list->bgid;
 75			req->flags &= ~REQ_F_BUFFER_RING;
 76		}
 77	}
 
 78}
 79
 80static inline bool io_do_buffer_select(struct io_kiocb *req)
 81{
 82	if (!(req->flags & REQ_F_BUFFER_SELECT))
 83		return false;
 84	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
 85}
 86
 87static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 88{
 
 
 89	if (req->flags & REQ_F_BUFFER_SELECTED)
 90		io_kbuf_recycle_legacy(req, issue_flags);
 91	if (req->flags & REQ_F_BUFFER_RING)
 92		io_kbuf_recycle_ring(req);
 
 93}
 94
 95static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
 96					      struct list_head *list)
 97{
 98	unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
 
 
 
 
 
 99
 
 
 
100	if (req->flags & REQ_F_BUFFER_RING) {
101		if (req->buf_list) {
102			req->buf_index = req->buf_list->bgid;
103			req->buf_list->head++;
104		}
105		req->flags &= ~REQ_F_BUFFER_RING;
106	} else {
107		req->buf_index = req->kbuf->bgid;
108		list_add(&req->kbuf->list, list);
109		req->flags &= ~REQ_F_BUFFER_SELECTED;
110	}
111
112	return ret;
113}
114
115static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
116{
 
 
117	lockdep_assert_held(&req->ctx->completion_lock);
118
119	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
120		return 0;
121	return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
 
 
 
122}
123
124static inline unsigned int io_put_kbuf(struct io_kiocb *req,
125				       unsigned issue_flags)
126{
 
127
128	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
129		return 0;
130	return __io_put_kbuf(req, issue_flags);
 
 
 
 
 
 
131}
132#endif
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2#ifndef IOU_KBUF_H
  3#define IOU_KBUF_H
  4
  5#include <uapi/linux/io_uring.h>
  6
  7struct io_buffer_list {
  8	/*
  9	 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
 10	 * then these are classic provided buffers and ->buf_list is used.
 11	 */
 12	union {
 13		struct list_head buf_list;
 14		struct {
 15			struct page **buf_pages;
 16			struct io_uring_buf_ring *buf_ring;
 17		};
 18		struct rcu_head rcu;
 19	};
 20	__u16 bgid;
 21
 22	/* below is for ring provided buffers */
 23	__u16 buf_nr_pages;
 24	__u16 nr_entries;
 25	__u16 head;
 26	__u16 mask;
 27
 28	atomic_t refs;
 29
 30	/* ring mapped provided buffers */
 31	__u8 is_buf_ring;
 32	/* ring mapped provided buffers, but mmap'ed by application */
 33	__u8 is_mmap;
 34};
 35
 36struct io_buffer {
 37	struct list_head list;
 38	__u64 addr;
 39	__u32 len;
 40	__u16 bid;
 41	__u16 bgid;
 42};
 43
 44void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
 45			      unsigned int issue_flags);
 46void io_destroy_buffers(struct io_ring_ctx *ctx);
 47
 48int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 49int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
 50
 51int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 52int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
 53
 54int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 55int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 56int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
 57
 58void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
 59
 60void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
 61
 62bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
 63
 64void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
 65struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
 66				      unsigned long bgid);
 67
 68static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
 69{
 70	/*
 71	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
 72	 * the flag and hence ensure that bl->head doesn't get incremented.
 73	 * If the tail has already been incremented, hang on to it.
 74	 * The exception is partial io, that case we should increment bl->head
 75	 * to monopolize the buffer.
 76	 */
 77	if (req->buf_list) {
 78		req->buf_index = req->buf_list->bgid;
 79		req->flags &= ~REQ_F_BUFFER_RING;
 80		return true;
 
 
 
 
 
 
 
 
 
 
 
 81	}
 82	return false;
 83}
 84
 85static inline bool io_do_buffer_select(struct io_kiocb *req)
 86{
 87	if (!(req->flags & REQ_F_BUFFER_SELECT))
 88		return false;
 89	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
 90}
 91
 92static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 93{
 94	if (req->flags & REQ_F_BL_NO_RECYCLE)
 95		return false;
 96	if (req->flags & REQ_F_BUFFER_SELECTED)
 97		return io_kbuf_recycle_legacy(req, issue_flags);
 98	if (req->flags & REQ_F_BUFFER_RING)
 99		return io_kbuf_recycle_ring(req);
100	return false;
101}
102
103static inline void __io_put_kbuf_ring(struct io_kiocb *req)
 
104{
105	if (req->buf_list) {
106		req->buf_index = req->buf_list->bgid;
107		req->buf_list->head++;
108	}
109	req->flags &= ~REQ_F_BUFFER_RING;
110}
111
112static inline void __io_put_kbuf_list(struct io_kiocb *req,
113				      struct list_head *list)
114{
115	if (req->flags & REQ_F_BUFFER_RING) {
116		__io_put_kbuf_ring(req);
 
 
 
 
117	} else {
118		req->buf_index = req->kbuf->bgid;
119		list_add(&req->kbuf->list, list);
120		req->flags &= ~REQ_F_BUFFER_SELECTED;
121	}
 
 
122}
123
124static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
125{
126	unsigned int ret;
127
128	lockdep_assert_held(&req->ctx->completion_lock);
129
130	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
131		return 0;
132
133	ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
134	__io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
135	return ret;
136}
137
138static inline unsigned int io_put_kbuf(struct io_kiocb *req,
139				       unsigned issue_flags)
140{
141	unsigned int ret;
142
143	if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
144		return 0;
145
146	ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
147	if (req->flags & REQ_F_BUFFER_RING)
148		__io_put_kbuf_ring(req);
149	else
150		__io_put_kbuf(req, issue_flags);
151	return ret;
152}
153#endif