Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/errno.h>
  4#include <linux/file.h>
  5#include <linux/io_uring.h>
  6#include <linux/security.h>
  7#include <linux/nospec.h>
  8
  9#include <uapi/linux/io_uring.h>
 
 10
 11#include "io_uring.h"
 12#include "rsrc.h"
 13#include "uring_cmd.h"
 14
 15static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16{
 17	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
 
 18
 19	ioucmd->task_work_cb(ioucmd);
 20}
 21
 22void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
 23			void (*task_work_cb)(struct io_uring_cmd *))
 
 24{
 25	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
 26
 27	ioucmd->task_work_cb = task_work_cb;
 28	req->io_task_work.func = io_uring_cmd_work;
 29	io_req_task_work_add(req);
 30}
 31EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
 32
 33static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
 34					  u64 extra1, u64 extra2)
 35{
 36	req->extra1 = extra1;
 37	req->extra2 = extra2;
 38	req->flags |= REQ_F_CQE32_INIT;
 39}
 40
 41/*
 42 * Called by consumers of io_uring_cmd, if they originally returned
 43 * -EIOCBQUEUED upon receiving the command.
 44 */
 45void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
 
 46{
 47	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
 48
 
 
 49	if (ret < 0)
 50		req_set_fail(req);
 51
 52	io_req_set_res(req, ret, 0);
 53	if (req->ctx->flags & IORING_SETUP_CQE32)
 54		io_req_set_cqe32_extra(req, res2, 0);
 55	if (req->ctx->flags & IORING_SETUP_IOPOLL)
 56		/* order with io_iopoll_req_issued() checking ->iopoll_complete */
 57		smp_store_release(&req->iopoll_completed, 1);
 58	else
 59		io_req_complete_post(req, 0);
 
 
 
 
 60}
 61EXPORT_SYMBOL_GPL(io_uring_cmd_done);
 62
 63int io_uring_cmd_prep_async(struct io_kiocb *req)
 64{
 65	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
 66	size_t cmd_size;
 67
 68	BUILD_BUG_ON(uring_cmd_pdu_size(0) != 16);
 69	BUILD_BUG_ON(uring_cmd_pdu_size(1) != 80);
 70
 71	cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128);
 72
 73	memcpy(req->async_data, ioucmd->cmd, cmd_size);
 74	return 0;
 75}
 76
 77int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 78{
 79	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
 80
 81	if (sqe->__pad1)
 82		return -EINVAL;
 83
 84	ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
 85	if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
 86		return -EINVAL;
 87
 88	if (ioucmd->flags & IORING_URING_CMD_FIXED) {
 89		struct io_ring_ctx *ctx = req->ctx;
 90		u16 index;
 91
 92		req->buf_index = READ_ONCE(sqe->buf_index);
 93		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
 94			return -EFAULT;
 95		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
 96		req->imu = ctx->user_bufs[index];
 97		io_req_set_rsrc_node(req, ctx, 0);
 98	}
 99	ioucmd->cmd = sqe->cmd;
100	ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
101	return 0;
102}
103
104int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
105{
106	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
107	struct io_ring_ctx *ctx = req->ctx;
108	struct file *file = req->file;
109	int ret;
110
111	if (!req->file->f_op->uring_cmd)
112		return -EOPNOTSUPP;
113
114	ret = security_uring_cmd(ioucmd);
115	if (ret)
116		return ret;
117
118	if (ctx->flags & IORING_SETUP_SQE128)
119		issue_flags |= IO_URING_F_SQE128;
120	if (ctx->flags & IORING_SETUP_CQE32)
121		issue_flags |= IO_URING_F_CQE32;
 
 
122	if (ctx->flags & IORING_SETUP_IOPOLL) {
 
 
123		issue_flags |= IO_URING_F_IOPOLL;
124		req->iopoll_completed = 0;
125		WRITE_ONCE(ioucmd->cookie, NULL);
126	}
127
128	if (req_has_async_data(req))
129		ioucmd->cmd = req->async_data;
130
131	ret = file->f_op->uring_cmd(ioucmd, issue_flags);
132	if (ret == -EAGAIN) {
133		if (!req_has_async_data(req)) {
134			if (io_alloc_async_data(req))
135				return -ENOMEM;
136			io_uring_cmd_prep_async(req);
137		}
138		return -EAGAIN;
139	}
140
141	if (ret != -EIOCBQUEUED) {
142		if (ret < 0)
143			req_set_fail(req);
144		io_req_set_res(req, ret, 0);
145		return ret;
146	}
147
148	return IOU_ISSUE_SKIP_COMPLETE;
149}
150
151int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
152			      struct iov_iter *iter, void *ioucmd)
153{
154	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
155
156	return io_import_fixed(rw, iter, req->imu, ubuf, len);
157}
158EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/errno.h>
  4#include <linux/file.h>
  5#include <linux/io_uring/cmd.h>
  6#include <linux/security.h>
  7#include <linux/nospec.h>
  8
  9#include <uapi/linux/io_uring.h>
 10#include <asm/ioctls.h>
 11
 12#include "io_uring.h"
 13#include "rsrc.h"
 14#include "uring_cmd.h"
 15
 16static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
 17		unsigned int issue_flags)
 18{
 19	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
 20	struct io_ring_ctx *ctx = req->ctx;
 21
 22	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
 23		return;
 24
 25	cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
 26	io_ring_submit_lock(ctx, issue_flags);
 27	hlist_del(&req->hash_node);
 28	io_ring_submit_unlock(ctx, issue_flags);
 29}
 30
 31/*
 32 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
 33 * will try to cancel this issued command by sending ->uring_cmd() with
 34 * issue_flags of IO_URING_F_CANCEL.
 35 *
 36 * The command is guaranteed to not be done when calling ->uring_cmd()
 37 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
 38 * with race between io_uring canceling and normal completion.
 39 */
 40void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
 41		unsigned int issue_flags)
 42{
 43	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
 44	struct io_ring_ctx *ctx = req->ctx;
 45
 46	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
 47		cmd->flags |= IORING_URING_CMD_CANCELABLE;
 48		io_ring_submit_lock(ctx, issue_flags);
 49		hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
 50		io_ring_submit_unlock(ctx, issue_flags);
 51	}
 52}
 53EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
 54
 55static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
 56{
 57	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
 58	unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
 59
 60	ioucmd->task_work_cb(ioucmd, issue_flags);
 61}
 62
 63void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
 64			void (*task_work_cb)(struct io_uring_cmd *, unsigned),
 65			unsigned flags)
 66{
 67	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
 68
 69	ioucmd->task_work_cb = task_work_cb;
 70	req->io_task_work.func = io_uring_cmd_work;
 71	__io_req_task_work_add(req, flags);
 72}
 73EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
 74
 75static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
 76					  u64 extra1, u64 extra2)
 77{
 78	req->big_cqe.extra1 = extra1;
 79	req->big_cqe.extra2 = extra2;
 
 80}
 81
 82/*
 83 * Called by consumers of io_uring_cmd, if they originally returned
 84 * -EIOCBQUEUED upon receiving the command.
 85 */
 86void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
 87		       unsigned issue_flags)
 88{
 89	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
 90
 91	io_uring_cmd_del_cancelable(ioucmd, issue_flags);
 92
 93	if (ret < 0)
 94		req_set_fail(req);
 95
 96	io_req_set_res(req, ret, 0);
 97	if (req->ctx->flags & IORING_SETUP_CQE32)
 98		io_req_set_cqe32_extra(req, res2, 0);
 99	if (req->ctx->flags & IORING_SETUP_IOPOLL) {
100		/* order with io_iopoll_req_issued() checking ->iopoll_complete */
101		smp_store_release(&req->iopoll_completed, 1);
102	} else {
103		struct io_tw_state ts = {
104			.locked = !(issue_flags & IO_URING_F_UNLOCKED),
105		};
106		io_req_task_complete(req, &ts);
107	}
108}
109EXPORT_SYMBOL_GPL(io_uring_cmd_done);
110
111int io_uring_cmd_prep_async(struct io_kiocb *req)
112{
113	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
 
 
 
 
114
115	memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx));
116	ioucmd->sqe = req->async_data;
 
117	return 0;
118}
119
120int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
121{
122	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
123
124	if (sqe->__pad1)
125		return -EINVAL;
126
127	ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
128	if (ioucmd->flags & ~IORING_URING_CMD_MASK)
129		return -EINVAL;
130
131	if (ioucmd->flags & IORING_URING_CMD_FIXED) {
132		struct io_ring_ctx *ctx = req->ctx;
133		u16 index;
134
135		req->buf_index = READ_ONCE(sqe->buf_index);
136		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
137			return -EFAULT;
138		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
139		req->imu = ctx->user_bufs[index];
140		io_req_set_rsrc_node(req, ctx, 0);
141	}
142	ioucmd->sqe = sqe;
143	ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
144	return 0;
145}
146
147int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
148{
149	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
150	struct io_ring_ctx *ctx = req->ctx;
151	struct file *file = req->file;
152	int ret;
153
154	if (!file->f_op->uring_cmd)
155		return -EOPNOTSUPP;
156
157	ret = security_uring_cmd(ioucmd);
158	if (ret)
159		return ret;
160
161	if (ctx->flags & IORING_SETUP_SQE128)
162		issue_flags |= IO_URING_F_SQE128;
163	if (ctx->flags & IORING_SETUP_CQE32)
164		issue_flags |= IO_URING_F_CQE32;
165	if (ctx->compat)
166		issue_flags |= IO_URING_F_COMPAT;
167	if (ctx->flags & IORING_SETUP_IOPOLL) {
168		if (!file->f_op->uring_cmd_iopoll)
169			return -EOPNOTSUPP;
170		issue_flags |= IO_URING_F_IOPOLL;
171		req->iopoll_completed = 0;
 
172	}
173
 
 
 
174	ret = file->f_op->uring_cmd(ioucmd, issue_flags);
175	if (ret == -EAGAIN) {
176		if (!req_has_async_data(req)) {
177			if (io_alloc_async_data(req))
178				return -ENOMEM;
179			io_uring_cmd_prep_async(req);
180		}
181		return -EAGAIN;
182	}
183
184	if (ret != -EIOCBQUEUED) {
185		if (ret < 0)
186			req_set_fail(req);
187		io_req_set_res(req, ret, 0);
188		return ret;
189	}
190
191	return IOU_ISSUE_SKIP_COMPLETE;
192}
193
194int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
195			      struct iov_iter *iter, void *ioucmd)
196{
197	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
198
199	return io_import_fixed(rw, iter, req->imu, ubuf, len);
200}
201EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
202
203static inline int io_uring_cmd_getsockopt(struct socket *sock,
204					  struct io_uring_cmd *cmd,
205					  unsigned int issue_flags)
206{
207	bool compat = !!(issue_flags & IO_URING_F_COMPAT);
208	int optlen, optname, level, err;
209	void __user *optval;
210
211	level = READ_ONCE(cmd->sqe->level);
212	if (level != SOL_SOCKET)
213		return -EOPNOTSUPP;
214
215	optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
216	optname = READ_ONCE(cmd->sqe->optname);
217	optlen = READ_ONCE(cmd->sqe->optlen);
218
219	err = do_sock_getsockopt(sock, compat, level, optname,
220				 USER_SOCKPTR(optval),
221				 KERNEL_SOCKPTR(&optlen));
222	if (err)
223		return err;
224
225	/* On success, return optlen */
226	return optlen;
227}
228
229static inline int io_uring_cmd_setsockopt(struct socket *sock,
230					  struct io_uring_cmd *cmd,
231					  unsigned int issue_flags)
232{
233	bool compat = !!(issue_flags & IO_URING_F_COMPAT);
234	int optname, optlen, level;
235	void __user *optval;
236	sockptr_t optval_s;
237
238	optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
239	optname = READ_ONCE(cmd->sqe->optname);
240	optlen = READ_ONCE(cmd->sqe->optlen);
241	level = READ_ONCE(cmd->sqe->level);
242	optval_s = USER_SOCKPTR(optval);
243
244	return do_sock_setsockopt(sock, compat, level, optname, optval_s,
245				  optlen);
246}
247
248#if defined(CONFIG_NET)
249int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
250{
251	struct socket *sock = cmd->file->private_data;
252	struct sock *sk = sock->sk;
253	struct proto *prot = READ_ONCE(sk->sk_prot);
254	int ret, arg = 0;
255
256	if (!prot || !prot->ioctl)
257		return -EOPNOTSUPP;
258
259	switch (cmd->sqe->cmd_op) {
260	case SOCKET_URING_OP_SIOCINQ:
261		ret = prot->ioctl(sk, SIOCINQ, &arg);
262		if (ret)
263			return ret;
264		return arg;
265	case SOCKET_URING_OP_SIOCOUTQ:
266		ret = prot->ioctl(sk, SIOCOUTQ, &arg);
267		if (ret)
268			return ret;
269		return arg;
270	case SOCKET_URING_OP_GETSOCKOPT:
271		return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
272	case SOCKET_URING_OP_SETSOCKOPT:
273		return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
274	default:
275		return -EOPNOTSUPP;
276	}
277}
278EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
279#endif