Loading...
1// SPDX-License-Identifier: GPL-2.0
2#ifndef IOU_KBUF_H
3#define IOU_KBUF_H
4
5#include <uapi/linux/io_uring.h>
6
7struct io_buffer_list {
8 /*
9 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10 * then these are classic provided buffers and ->buf_list is used.
11 */
12 union {
13 struct list_head buf_list;
14 struct {
15 struct page **buf_pages;
16 struct io_uring_buf_ring *buf_ring;
17 };
18 };
19 __u16 bgid;
20
21 /* below is for ring provided buffers */
22 __u16 buf_nr_pages;
23 __u16 nr_entries;
24 __u16 head;
25 __u16 mask;
26};
27
28struct io_buffer {
29 struct list_head list;
30 __u64 addr;
31 __u32 len;
32 __u16 bid;
33 __u16 bgid;
34};
35
36void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
37 unsigned int issue_flags);
38void io_destroy_buffers(struct io_ring_ctx *ctx);
39
40int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
41int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
42
43int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
44int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
45
46int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
47int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
48
49unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
50
51void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
52
53static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
54{
55 /*
56 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
57 * the flag and hence ensure that bl->head doesn't get incremented.
58 * If the tail has already been incremented, hang on to it.
59 * The exception is partial io, that case we should increment bl->head
60 * to monopolize the buffer.
61 */
62 if (req->buf_list) {
63 if (req->flags & REQ_F_PARTIAL_IO) {
64 /*
65 * If we end up here, then the io_uring_lock has
66 * been kept held since we retrieved the buffer.
67 * For the io-wq case, we already cleared
68 * req->buf_list when the buffer was retrieved,
69 * hence it cannot be set here for that case.
70 */
71 req->buf_list->head++;
72 req->buf_list = NULL;
73 } else {
74 req->buf_index = req->buf_list->bgid;
75 req->flags &= ~REQ_F_BUFFER_RING;
76 }
77 }
78}
79
80static inline bool io_do_buffer_select(struct io_kiocb *req)
81{
82 if (!(req->flags & REQ_F_BUFFER_SELECT))
83 return false;
84 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
85}
86
87static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
88{
89 if (req->flags & REQ_F_BUFFER_SELECTED)
90 io_kbuf_recycle_legacy(req, issue_flags);
91 if (req->flags & REQ_F_BUFFER_RING)
92 io_kbuf_recycle_ring(req);
93}
94
95static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
96 struct list_head *list)
97{
98 unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
99
100 if (req->flags & REQ_F_BUFFER_RING) {
101 if (req->buf_list) {
102 req->buf_index = req->buf_list->bgid;
103 req->buf_list->head++;
104 }
105 req->flags &= ~REQ_F_BUFFER_RING;
106 } else {
107 req->buf_index = req->kbuf->bgid;
108 list_add(&req->kbuf->list, list);
109 req->flags &= ~REQ_F_BUFFER_SELECTED;
110 }
111
112 return ret;
113}
114
115static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
116{
117 lockdep_assert_held(&req->ctx->completion_lock);
118
119 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
120 return 0;
121 return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
122}
123
124static inline unsigned int io_put_kbuf(struct io_kiocb *req,
125 unsigned issue_flags)
126{
127
128 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
129 return 0;
130 return __io_put_kbuf(req, issue_flags);
131}
132#endif
1// SPDX-License-Identifier: GPL-2.0
2#ifndef IOU_KBUF_H
3#define IOU_KBUF_H
4
5#include <uapi/linux/io_uring.h>
6
7struct io_buffer_list {
8 /*
9 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10 * then these are classic provided buffers and ->buf_list is used.
11 */
12 union {
13 struct list_head buf_list;
14 struct {
15 struct page **buf_pages;
16 struct io_uring_buf_ring *buf_ring;
17 };
18 struct rcu_head rcu;
19 };
20 __u16 bgid;
21
22 /* below is for ring provided buffers */
23 __u16 buf_nr_pages;
24 __u16 nr_entries;
25 __u16 head;
26 __u16 mask;
27
28 /* ring mapped provided buffers */
29 __u8 is_mapped;
30 /* ring mapped provided buffers, but mmap'ed by application */
31 __u8 is_mmap;
32 /* bl is visible from an RCU point of view for lookup */
33 __u8 is_ready;
34};
35
36struct io_buffer {
37 struct list_head list;
38 __u64 addr;
39 __u32 len;
40 __u16 bid;
41 __u16 bgid;
42};
43
44void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
45 unsigned int issue_flags);
46void io_destroy_buffers(struct io_ring_ctx *ctx);
47
48int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
49int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
50
51int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
52int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
53
54int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
55int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
56int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
57
58void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
59
60unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
61
62bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
63
64void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
65
66static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
67{
68 /*
69 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
70 * the flag and hence ensure that bl->head doesn't get incremented.
71 * If the tail has already been incremented, hang on to it.
72 * The exception is partial io, that case we should increment bl->head
73 * to monopolize the buffer.
74 */
75 if (req->buf_list) {
76 if (req->flags & REQ_F_PARTIAL_IO) {
77 /*
78 * If we end up here, then the io_uring_lock has
79 * been kept held since we retrieved the buffer.
80 * For the io-wq case, we already cleared
81 * req->buf_list when the buffer was retrieved,
82 * hence it cannot be set here for that case.
83 */
84 req->buf_list->head++;
85 req->buf_list = NULL;
86 } else {
87 req->buf_index = req->buf_list->bgid;
88 req->flags &= ~REQ_F_BUFFER_RING;
89 return true;
90 }
91 }
92 return false;
93}
94
95static inline bool io_do_buffer_select(struct io_kiocb *req)
96{
97 if (!(req->flags & REQ_F_BUFFER_SELECT))
98 return false;
99 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
100}
101
102static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
103{
104 if (req->flags & REQ_F_BUFFER_SELECTED)
105 return io_kbuf_recycle_legacy(req, issue_flags);
106 if (req->flags & REQ_F_BUFFER_RING)
107 return io_kbuf_recycle_ring(req);
108 return false;
109}
110
111static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
112 struct list_head *list)
113{
114 unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
115
116 if (req->flags & REQ_F_BUFFER_RING) {
117 if (req->buf_list) {
118 req->buf_index = req->buf_list->bgid;
119 req->buf_list->head++;
120 }
121 req->flags &= ~REQ_F_BUFFER_RING;
122 } else {
123 req->buf_index = req->kbuf->bgid;
124 list_add(&req->kbuf->list, list);
125 req->flags &= ~REQ_F_BUFFER_SELECTED;
126 }
127
128 return ret;
129}
130
131static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
132{
133 lockdep_assert_held(&req->ctx->completion_lock);
134
135 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
136 return 0;
137 return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
138}
139
140static inline unsigned int io_put_kbuf(struct io_kiocb *req,
141 unsigned issue_flags)
142{
143
144 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
145 return 0;
146 return __io_put_kbuf(req, issue_flags);
147}
148#endif