Loading...
Note: File does not exist in v5.9.
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/io_uring/cmd.h>
6#include <linux/io_uring/net.h>
7#include <linux/security.h>
8#include <linux/nospec.h>
9#include <net/sock.h>
10
11#include <uapi/linux/io_uring.h>
12#include <asm/ioctls.h>
13
14#include "io_uring.h"
15#include "alloc_cache.h"
16#include "rsrc.h"
17#include "uring_cmd.h"
18
19static struct io_uring_cmd_data *io_uring_async_get(struct io_kiocb *req)
20{
21 struct io_ring_ctx *ctx = req->ctx;
22 struct io_uring_cmd_data *cache;
23
24 cache = io_alloc_cache_get(&ctx->uring_cache);
25 if (cache) {
26 cache->op_data = NULL;
27 req->flags |= REQ_F_ASYNC_DATA;
28 req->async_data = cache;
29 return cache;
30 }
31 if (!io_alloc_async_data(req)) {
32 cache = req->async_data;
33 cache->op_data = NULL;
34 return cache;
35 }
36 return NULL;
37}
38
39static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
40{
41 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
42 struct io_uring_cmd_data *cache = req->async_data;
43
44 if (cache->op_data) {
45 kfree(cache->op_data);
46 cache->op_data = NULL;
47 }
48
49 if (issue_flags & IO_URING_F_UNLOCKED)
50 return;
51 if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) {
52 ioucmd->sqe = NULL;
53 req->async_data = NULL;
54 req->flags &= ~REQ_F_ASYNC_DATA;
55 }
56}
57
58bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
59 struct io_uring_task *tctx, bool cancel_all)
60{
61 struct hlist_node *tmp;
62 struct io_kiocb *req;
63 bool ret = false;
64
65 lockdep_assert_held(&ctx->uring_lock);
66
67 hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
68 hash_node) {
69 struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
70 struct io_uring_cmd);
71 struct file *file = req->file;
72
73 if (!cancel_all && req->tctx != tctx)
74 continue;
75
76 if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
77 file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
78 IO_URING_F_COMPLETE_DEFER);
79 ret = true;
80 }
81 }
82 io_submit_flush_completions(ctx);
83 return ret;
84}
85
86static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
87 unsigned int issue_flags)
88{
89 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
90 struct io_ring_ctx *ctx = req->ctx;
91
92 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
93 return;
94
95 cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
96 io_ring_submit_lock(ctx, issue_flags);
97 hlist_del(&req->hash_node);
98 io_ring_submit_unlock(ctx, issue_flags);
99}
100
101/*
102 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
103 * will try to cancel this issued command by sending ->uring_cmd() with
104 * issue_flags of IO_URING_F_CANCEL.
105 *
106 * The command is guaranteed to not be done when calling ->uring_cmd()
107 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
108 * with race between io_uring canceling and normal completion.
109 */
110void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
111 unsigned int issue_flags)
112{
113 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
114 struct io_ring_ctx *ctx = req->ctx;
115
116 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
117 cmd->flags |= IORING_URING_CMD_CANCELABLE;
118 io_ring_submit_lock(ctx, issue_flags);
119 hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
120 io_ring_submit_unlock(ctx, issue_flags);
121 }
122}
123EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
124
125static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
126{
127 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
128 unsigned int flags = IO_URING_F_COMPLETE_DEFER;
129
130 if (current->flags & (PF_EXITING | PF_KTHREAD))
131 flags |= IO_URING_F_TASK_DEAD;
132
133 /* task_work executor checks the deffered list completion */
134 ioucmd->task_work_cb(ioucmd, flags);
135}
136
137void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
138 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
139 unsigned flags)
140{
141 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
142
143 ioucmd->task_work_cb = task_work_cb;
144 req->io_task_work.func = io_uring_cmd_work;
145 __io_req_task_work_add(req, flags);
146}
147EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
148
149static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
150 u64 extra1, u64 extra2)
151{
152 req->big_cqe.extra1 = extra1;
153 req->big_cqe.extra2 = extra2;
154}
155
156/*
157 * Called by consumers of io_uring_cmd, if they originally returned
158 * -EIOCBQUEUED upon receiving the command.
159 */
160void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
161 unsigned issue_flags)
162{
163 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
164
165 io_uring_cmd_del_cancelable(ioucmd, issue_flags);
166
167 if (ret < 0)
168 req_set_fail(req);
169
170 io_req_set_res(req, ret, 0);
171 if (req->ctx->flags & IORING_SETUP_CQE32)
172 io_req_set_cqe32_extra(req, res2, 0);
173 io_req_uring_cleanup(req, issue_flags);
174 if (req->ctx->flags & IORING_SETUP_IOPOLL) {
175 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
176 smp_store_release(&req->iopoll_completed, 1);
177 } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
178 if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
179 return;
180 io_req_complete_defer(req);
181 } else {
182 req->io_task_work.func = io_req_task_complete;
183 io_req_task_work_add(req);
184 }
185}
186EXPORT_SYMBOL_GPL(io_uring_cmd_done);
187
188static int io_uring_cmd_prep_setup(struct io_kiocb *req,
189 const struct io_uring_sqe *sqe)
190{
191 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
192 struct io_uring_cmd_data *cache;
193
194 cache = io_uring_async_get(req);
195 if (unlikely(!cache))
196 return -ENOMEM;
197
198 /*
199 * Unconditionally cache the SQE for now - this is only needed for
200 * requests that go async, but prep handlers must ensure that any
201 * sqe data is stable beyond prep. Since uring_cmd is special in
202 * that it doesn't read in per-op data, play it safe and ensure that
203 * any SQE data is stable beyond prep. This can later get relaxed.
204 */
205 memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
206 ioucmd->sqe = cache->sqes;
207 return 0;
208}
209
210int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
211{
212 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
213
214 if (sqe->__pad1)
215 return -EINVAL;
216
217 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
218 if (ioucmd->flags & ~IORING_URING_CMD_MASK)
219 return -EINVAL;
220
221 if (ioucmd->flags & IORING_URING_CMD_FIXED) {
222 struct io_ring_ctx *ctx = req->ctx;
223 struct io_rsrc_node *node;
224 u16 index = READ_ONCE(sqe->buf_index);
225
226 node = io_rsrc_node_lookup(&ctx->buf_table, index);
227 if (unlikely(!node))
228 return -EFAULT;
229 /*
230 * Pi node upfront, prior to io_uring_cmd_import_fixed()
231 * being called. This prevents destruction of the mapped buffer
232 * we'll need at actual import time.
233 */
234 io_req_assign_buf_node(req, node);
235 }
236 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
237
238 return io_uring_cmd_prep_setup(req, sqe);
239}
240
241int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
242{
243 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
244 struct io_ring_ctx *ctx = req->ctx;
245 struct file *file = req->file;
246 int ret;
247
248 if (!file->f_op->uring_cmd)
249 return -EOPNOTSUPP;
250
251 ret = security_uring_cmd(ioucmd);
252 if (ret)
253 return ret;
254
255 if (ctx->flags & IORING_SETUP_SQE128)
256 issue_flags |= IO_URING_F_SQE128;
257 if (ctx->flags & IORING_SETUP_CQE32)
258 issue_flags |= IO_URING_F_CQE32;
259 if (ctx->compat)
260 issue_flags |= IO_URING_F_COMPAT;
261 if (ctx->flags & IORING_SETUP_IOPOLL) {
262 if (!file->f_op->uring_cmd_iopoll)
263 return -EOPNOTSUPP;
264 issue_flags |= IO_URING_F_IOPOLL;
265 req->iopoll_completed = 0;
266 }
267
268 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
269 if (ret == -EAGAIN || ret == -EIOCBQUEUED)
270 return ret;
271 if (ret < 0)
272 req_set_fail(req);
273 io_req_uring_cleanup(req, issue_flags);
274 io_req_set_res(req, ret, 0);
275 return IOU_OK;
276}
277
278int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
279 struct iov_iter *iter, void *ioucmd)
280{
281 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
282 struct io_rsrc_node *node = req->buf_node;
283
284 /* Must have had rsrc_node assigned at prep time */
285 if (node)
286 return io_import_fixed(rw, iter, node->buf, ubuf, len);
287
288 return -EFAULT;
289}
290EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
291
292void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
293{
294 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
295
296 io_req_queue_iowq(req);
297}
298
299static inline int io_uring_cmd_getsockopt(struct socket *sock,
300 struct io_uring_cmd *cmd,
301 unsigned int issue_flags)
302{
303 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
304 int optlen, optname, level, err;
305 void __user *optval;
306
307 level = READ_ONCE(cmd->sqe->level);
308 if (level != SOL_SOCKET)
309 return -EOPNOTSUPP;
310
311 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
312 optname = READ_ONCE(cmd->sqe->optname);
313 optlen = READ_ONCE(cmd->sqe->optlen);
314
315 err = do_sock_getsockopt(sock, compat, level, optname,
316 USER_SOCKPTR(optval),
317 KERNEL_SOCKPTR(&optlen));
318 if (err)
319 return err;
320
321 /* On success, return optlen */
322 return optlen;
323}
324
325static inline int io_uring_cmd_setsockopt(struct socket *sock,
326 struct io_uring_cmd *cmd,
327 unsigned int issue_flags)
328{
329 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
330 int optname, optlen, level;
331 void __user *optval;
332 sockptr_t optval_s;
333
334 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
335 optname = READ_ONCE(cmd->sqe->optname);
336 optlen = READ_ONCE(cmd->sqe->optlen);
337 level = READ_ONCE(cmd->sqe->level);
338 optval_s = USER_SOCKPTR(optval);
339
340 return do_sock_setsockopt(sock, compat, level, optname, optval_s,
341 optlen);
342}
343
344#if defined(CONFIG_NET)
345int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
346{
347 struct socket *sock = cmd->file->private_data;
348 struct sock *sk = sock->sk;
349 struct proto *prot = READ_ONCE(sk->sk_prot);
350 int ret, arg = 0;
351
352 if (!prot || !prot->ioctl)
353 return -EOPNOTSUPP;
354
355 switch (cmd->cmd_op) {
356 case SOCKET_URING_OP_SIOCINQ:
357 ret = prot->ioctl(sk, SIOCINQ, &arg);
358 if (ret)
359 return ret;
360 return arg;
361 case SOCKET_URING_OP_SIOCOUTQ:
362 ret = prot->ioctl(sk, SIOCOUTQ, &arg);
363 if (ret)
364 return ret;
365 return arg;
366 case SOCKET_URING_OP_GETSOCKOPT:
367 return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
368 case SOCKET_URING_OP_SETSOCKOPT:
369 return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
370 default:
371 return -EOPNOTSUPP;
372 }
373}
374EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
375#endif