Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/io_uring/cmd.h>
6#include <linux/security.h>
7#include <linux/nospec.h>
8
9#include <uapi/linux/io_uring.h>
10#include <asm/ioctls.h>
11
12#include "io_uring.h"
13#include "rsrc.h"
14#include "uring_cmd.h"
15
16static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
17 unsigned int issue_flags)
18{
19 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
20 struct io_ring_ctx *ctx = req->ctx;
21
22 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
23 return;
24
25 cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
26 io_ring_submit_lock(ctx, issue_flags);
27 hlist_del(&req->hash_node);
28 io_ring_submit_unlock(ctx, issue_flags);
29}
30
31/*
32 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
33 * will try to cancel this issued command by sending ->uring_cmd() with
34 * issue_flags of IO_URING_F_CANCEL.
35 *
36 * The command is guaranteed to not be done when calling ->uring_cmd()
37 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
38 * with race between io_uring canceling and normal completion.
39 */
40void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
41 unsigned int issue_flags)
42{
43 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
44 struct io_ring_ctx *ctx = req->ctx;
45
46 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
47 cmd->flags |= IORING_URING_CMD_CANCELABLE;
48 io_ring_submit_lock(ctx, issue_flags);
49 hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
50 io_ring_submit_unlock(ctx, issue_flags);
51 }
52}
53EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
54
55static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
56{
57 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
58 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
59
60 ioucmd->task_work_cb(ioucmd, issue_flags);
61}
62
63void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
64 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
65 unsigned flags)
66{
67 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
68
69 ioucmd->task_work_cb = task_work_cb;
70 req->io_task_work.func = io_uring_cmd_work;
71 __io_req_task_work_add(req, flags);
72}
73EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
74
75static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
76 u64 extra1, u64 extra2)
77{
78 req->big_cqe.extra1 = extra1;
79 req->big_cqe.extra2 = extra2;
80}
81
82/*
83 * Called by consumers of io_uring_cmd, if they originally returned
84 * -EIOCBQUEUED upon receiving the command.
85 */
86void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
87 unsigned issue_flags)
88{
89 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
90
91 io_uring_cmd_del_cancelable(ioucmd, issue_flags);
92
93 if (ret < 0)
94 req_set_fail(req);
95
96 io_req_set_res(req, ret, 0);
97 if (req->ctx->flags & IORING_SETUP_CQE32)
98 io_req_set_cqe32_extra(req, res2, 0);
99 if (req->ctx->flags & IORING_SETUP_IOPOLL) {
100 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
101 smp_store_release(&req->iopoll_completed, 1);
102 } else {
103 struct io_tw_state ts = {
104 .locked = !(issue_flags & IO_URING_F_UNLOCKED),
105 };
106 io_req_task_complete(req, &ts);
107 }
108}
109EXPORT_SYMBOL_GPL(io_uring_cmd_done);
110
111int io_uring_cmd_prep_async(struct io_kiocb *req)
112{
113 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
114
115 memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx));
116 ioucmd->sqe = req->async_data;
117 return 0;
118}
119
120int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
121{
122 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
123
124 if (sqe->__pad1)
125 return -EINVAL;
126
127 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
128 if (ioucmd->flags & ~IORING_URING_CMD_MASK)
129 return -EINVAL;
130
131 if (ioucmd->flags & IORING_URING_CMD_FIXED) {
132 struct io_ring_ctx *ctx = req->ctx;
133 u16 index;
134
135 req->buf_index = READ_ONCE(sqe->buf_index);
136 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
137 return -EFAULT;
138 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
139 req->imu = ctx->user_bufs[index];
140 io_req_set_rsrc_node(req, ctx, 0);
141 }
142 ioucmd->sqe = sqe;
143 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
144 return 0;
145}
146
147int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
148{
149 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
150 struct io_ring_ctx *ctx = req->ctx;
151 struct file *file = req->file;
152 int ret;
153
154 if (!file->f_op->uring_cmd)
155 return -EOPNOTSUPP;
156
157 ret = security_uring_cmd(ioucmd);
158 if (ret)
159 return ret;
160
161 if (ctx->flags & IORING_SETUP_SQE128)
162 issue_flags |= IO_URING_F_SQE128;
163 if (ctx->flags & IORING_SETUP_CQE32)
164 issue_flags |= IO_URING_F_CQE32;
165 if (ctx->compat)
166 issue_flags |= IO_URING_F_COMPAT;
167 if (ctx->flags & IORING_SETUP_IOPOLL) {
168 if (!file->f_op->uring_cmd_iopoll)
169 return -EOPNOTSUPP;
170 issue_flags |= IO_URING_F_IOPOLL;
171 req->iopoll_completed = 0;
172 }
173
174 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
175 if (ret == -EAGAIN) {
176 if (!req_has_async_data(req)) {
177 if (io_alloc_async_data(req))
178 return -ENOMEM;
179 io_uring_cmd_prep_async(req);
180 }
181 return -EAGAIN;
182 }
183
184 if (ret != -EIOCBQUEUED) {
185 if (ret < 0)
186 req_set_fail(req);
187 io_req_set_res(req, ret, 0);
188 return ret;
189 }
190
191 return IOU_ISSUE_SKIP_COMPLETE;
192}
193
194int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
195 struct iov_iter *iter, void *ioucmd)
196{
197 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
198
199 return io_import_fixed(rw, iter, req->imu, ubuf, len);
200}
201EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
202
203static inline int io_uring_cmd_getsockopt(struct socket *sock,
204 struct io_uring_cmd *cmd,
205 unsigned int issue_flags)
206{
207 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
208 int optlen, optname, level, err;
209 void __user *optval;
210
211 level = READ_ONCE(cmd->sqe->level);
212 if (level != SOL_SOCKET)
213 return -EOPNOTSUPP;
214
215 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
216 optname = READ_ONCE(cmd->sqe->optname);
217 optlen = READ_ONCE(cmd->sqe->optlen);
218
219 err = do_sock_getsockopt(sock, compat, level, optname,
220 USER_SOCKPTR(optval),
221 KERNEL_SOCKPTR(&optlen));
222 if (err)
223 return err;
224
225 /* On success, return optlen */
226 return optlen;
227}
228
229static inline int io_uring_cmd_setsockopt(struct socket *sock,
230 struct io_uring_cmd *cmd,
231 unsigned int issue_flags)
232{
233 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
234 int optname, optlen, level;
235 void __user *optval;
236 sockptr_t optval_s;
237
238 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
239 optname = READ_ONCE(cmd->sqe->optname);
240 optlen = READ_ONCE(cmd->sqe->optlen);
241 level = READ_ONCE(cmd->sqe->level);
242 optval_s = USER_SOCKPTR(optval);
243
244 return do_sock_setsockopt(sock, compat, level, optname, optval_s,
245 optlen);
246}
247
248#if defined(CONFIG_NET)
249int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
250{
251 struct socket *sock = cmd->file->private_data;
252 struct sock *sk = sock->sk;
253 struct proto *prot = READ_ONCE(sk->sk_prot);
254 int ret, arg = 0;
255
256 if (!prot || !prot->ioctl)
257 return -EOPNOTSUPP;
258
259 switch (cmd->sqe->cmd_op) {
260 case SOCKET_URING_OP_SIOCINQ:
261 ret = prot->ioctl(sk, SIOCINQ, &arg);
262 if (ret)
263 return ret;
264 return arg;
265 case SOCKET_URING_OP_SIOCOUTQ:
266 ret = prot->ioctl(sk, SIOCOUTQ, &arg);
267 if (ret)
268 return ret;
269 return arg;
270 case SOCKET_URING_OP_GETSOCKOPT:
271 return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
272 case SOCKET_URING_OP_SETSOCKOPT:
273 return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
274 default:
275 return -EOPNOTSUPP;
276 }
277}
278EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
279#endif
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/io_uring/cmd.h>
6#include <linux/security.h>
7#include <linux/nospec.h>
8#include <net/sock.h>
9
10#include <uapi/linux/io_uring.h>
11#include <asm/ioctls.h>
12
13#include "io_uring.h"
14#include "rsrc.h"
15#include "uring_cmd.h"
16
17static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
18 unsigned int issue_flags)
19{
20 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
21 struct io_ring_ctx *ctx = req->ctx;
22
23 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
24 return;
25
26 cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
27 io_ring_submit_lock(ctx, issue_flags);
28 hlist_del(&req->hash_node);
29 io_ring_submit_unlock(ctx, issue_flags);
30}
31
32/*
33 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
34 * will try to cancel this issued command by sending ->uring_cmd() with
35 * issue_flags of IO_URING_F_CANCEL.
36 *
37 * The command is guaranteed to not be done when calling ->uring_cmd()
38 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
39 * with race between io_uring canceling and normal completion.
40 */
41void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
42 unsigned int issue_flags)
43{
44 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
45 struct io_ring_ctx *ctx = req->ctx;
46
47 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
48 cmd->flags |= IORING_URING_CMD_CANCELABLE;
49 io_ring_submit_lock(ctx, issue_flags);
50 hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
51 io_ring_submit_unlock(ctx, issue_flags);
52 }
53}
54EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
55
56static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
57{
58 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
59 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
60
61 ioucmd->task_work_cb(ioucmd, issue_flags);
62}
63
64void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
65 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
66 unsigned flags)
67{
68 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
69
70 ioucmd->task_work_cb = task_work_cb;
71 req->io_task_work.func = io_uring_cmd_work;
72 __io_req_task_work_add(req, flags);
73}
74EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
75
76static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
77 u64 extra1, u64 extra2)
78{
79 req->big_cqe.extra1 = extra1;
80 req->big_cqe.extra2 = extra2;
81}
82
83/*
84 * Called by consumers of io_uring_cmd, if they originally returned
85 * -EIOCBQUEUED upon receiving the command.
86 */
87void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
88 unsigned issue_flags)
89{
90 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
91
92 io_uring_cmd_del_cancelable(ioucmd, issue_flags);
93
94 if (ret < 0)
95 req_set_fail(req);
96
97 io_req_set_res(req, ret, 0);
98 if (req->ctx->flags & IORING_SETUP_CQE32)
99 io_req_set_cqe32_extra(req, res2, 0);
100 if (req->ctx->flags & IORING_SETUP_IOPOLL) {
101 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
102 smp_store_release(&req->iopoll_completed, 1);
103 } else {
104 struct io_tw_state ts = {
105 .locked = !(issue_flags & IO_URING_F_UNLOCKED),
106 };
107 io_req_task_complete(req, &ts);
108 }
109}
110EXPORT_SYMBOL_GPL(io_uring_cmd_done);
111
112int io_uring_cmd_prep_async(struct io_kiocb *req)
113{
114 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
115
116 memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx));
117 ioucmd->sqe = req->async_data;
118 return 0;
119}
120
121int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
122{
123 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
124
125 if (sqe->__pad1)
126 return -EINVAL;
127
128 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
129 if (ioucmd->flags & ~IORING_URING_CMD_MASK)
130 return -EINVAL;
131
132 if (ioucmd->flags & IORING_URING_CMD_FIXED) {
133 struct io_ring_ctx *ctx = req->ctx;
134 u16 index;
135
136 req->buf_index = READ_ONCE(sqe->buf_index);
137 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
138 return -EFAULT;
139 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
140 req->imu = ctx->user_bufs[index];
141 io_req_set_rsrc_node(req, ctx, 0);
142 }
143 ioucmd->sqe = sqe;
144 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
145 return 0;
146}
147
148int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
149{
150 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
151 struct io_ring_ctx *ctx = req->ctx;
152 struct file *file = req->file;
153 int ret;
154
155 if (!file->f_op->uring_cmd)
156 return -EOPNOTSUPP;
157
158 ret = security_uring_cmd(ioucmd);
159 if (ret)
160 return ret;
161
162 if (ctx->flags & IORING_SETUP_SQE128)
163 issue_flags |= IO_URING_F_SQE128;
164 if (ctx->flags & IORING_SETUP_CQE32)
165 issue_flags |= IO_URING_F_CQE32;
166 if (ctx->compat)
167 issue_flags |= IO_URING_F_COMPAT;
168 if (ctx->flags & IORING_SETUP_IOPOLL) {
169 if (!file->f_op->uring_cmd_iopoll)
170 return -EOPNOTSUPP;
171 issue_flags |= IO_URING_F_IOPOLL;
172 req->iopoll_completed = 0;
173 }
174
175 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
176 if (ret == -EAGAIN) {
177 if (!req_has_async_data(req)) {
178 if (io_alloc_async_data(req))
179 return -ENOMEM;
180 io_uring_cmd_prep_async(req);
181 }
182 return -EAGAIN;
183 }
184
185 if (ret != -EIOCBQUEUED) {
186 if (ret < 0)
187 req_set_fail(req);
188 io_req_set_res(req, ret, 0);
189 return ret;
190 }
191
192 return IOU_ISSUE_SKIP_COMPLETE;
193}
194
195int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
196 struct iov_iter *iter, void *ioucmd)
197{
198 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
199
200 return io_import_fixed(rw, iter, req->imu, ubuf, len);
201}
202EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
203
204static inline int io_uring_cmd_getsockopt(struct socket *sock,
205 struct io_uring_cmd *cmd,
206 unsigned int issue_flags)
207{
208 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
209 int optlen, optname, level, err;
210 void __user *optval;
211
212 level = READ_ONCE(cmd->sqe->level);
213 if (level != SOL_SOCKET)
214 return -EOPNOTSUPP;
215
216 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
217 optname = READ_ONCE(cmd->sqe->optname);
218 optlen = READ_ONCE(cmd->sqe->optlen);
219
220 err = do_sock_getsockopt(sock, compat, level, optname,
221 USER_SOCKPTR(optval),
222 KERNEL_SOCKPTR(&optlen));
223 if (err)
224 return err;
225
226 /* On success, return optlen */
227 return optlen;
228}
229
230static inline int io_uring_cmd_setsockopt(struct socket *sock,
231 struct io_uring_cmd *cmd,
232 unsigned int issue_flags)
233{
234 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
235 int optname, optlen, level;
236 void __user *optval;
237 sockptr_t optval_s;
238
239 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
240 optname = READ_ONCE(cmd->sqe->optname);
241 optlen = READ_ONCE(cmd->sqe->optlen);
242 level = READ_ONCE(cmd->sqe->level);
243 optval_s = USER_SOCKPTR(optval);
244
245 return do_sock_setsockopt(sock, compat, level, optname, optval_s,
246 optlen);
247}
248
249#if defined(CONFIG_NET)
250int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
251{
252 struct socket *sock = cmd->file->private_data;
253 struct sock *sk = sock->sk;
254 struct proto *prot = READ_ONCE(sk->sk_prot);
255 int ret, arg = 0;
256
257 if (!prot || !prot->ioctl)
258 return -EOPNOTSUPP;
259
260 switch (cmd->sqe->cmd_op) {
261 case SOCKET_URING_OP_SIOCINQ:
262 ret = prot->ioctl(sk, SIOCINQ, &arg);
263 if (ret)
264 return ret;
265 return arg;
266 case SOCKET_URING_OP_SIOCOUTQ:
267 ret = prot->ioctl(sk, SIOCOUTQ, &arg);
268 if (ret)
269 return ret;
270 return arg;
271 case SOCKET_URING_OP_GETSOCKOPT:
272 return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
273 case SOCKET_URING_OP_SETSOCKOPT:
274 return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
275 default:
276 return -EOPNOTSUPP;
277 }
278}
279EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
280#endif