Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/errno.h>
  4#include <linux/fs.h>
  5#include <linux/file.h>
  6#include <linux/mm.h>
  7#include <linux/slab.h>
  8#include <linux/namei.h>
  9#include <linux/nospec.h>
 10#include <linux/io_uring.h>
 11
 12#include <uapi/linux/io_uring.h>
 13
 14#include "io_uring.h"
 15#include "tctx.h"
 16#include "poll.h"
 17#include "timeout.h"
 
 
 18#include "cancel.h"
 19
 20struct io_cancel {
 21	struct file			*file;
 22	u64				addr;
 23	u32				flags;
 24	s32				fd;
 
 25};
 26
 27#define CANCEL_FLAGS	(IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
 28			 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED)
 
 29
 30static bool io_cancel_cb(struct io_wq_work *work, void *data)
 
 
 
 31{
 32	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
 33	struct io_cancel_data *cd = data;
 34
 35	if (req->ctx != cd->ctx)
 36		return false;
 37	if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
 38		;
 39	} else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
 
 
 
 
 40		if (req->file != cd->file)
 41			return false;
 42	} else {
 43		if (req->cqe.user_data != cd->data)
 
 44			return false;
 45	}
 46	if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
 47		if (cd->seq == req->work.cancel_seq)
 
 
 
 48			return false;
 49		req->work.cancel_seq = cd->seq;
 50	}
 
 51	return true;
 52}
 53
 
 
 
 
 
 
 
 
 54static int io_async_cancel_one(struct io_uring_task *tctx,
 55			       struct io_cancel_data *cd)
 56{
 57	enum io_wq_cancel cancel_ret;
 58	int ret = 0;
 59	bool all;
 60
 61	if (!tctx || !tctx->io_wq)
 62		return -ENOENT;
 63
 64	all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
 65	cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
 66	switch (cancel_ret) {
 67	case IO_WQ_CANCEL_OK:
 68		ret = 0;
 69		break;
 70	case IO_WQ_CANCEL_RUNNING:
 71		ret = -EALREADY;
 72		break;
 73	case IO_WQ_CANCEL_NOTFOUND:
 74		ret = -ENOENT;
 75		break;
 76	}
 77
 78	return ret;
 79}
 80
 81int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
 82		  unsigned issue_flags)
 83{
 84	struct io_ring_ctx *ctx = cd->ctx;
 85	int ret;
 86
 87	WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
 88
 89	ret = io_async_cancel_one(tctx, cd);
 90	/*
 91	 * Fall-through even for -EALREADY, as we may have poll armed
 92	 * that need unarming.
 93	 */
 94	if (!ret)
 95		return 0;
 96
 97	ret = io_poll_cancel(ctx, cd, issue_flags);
 98	if (ret != -ENOENT)
 99		return ret;
100
 
 
 
 
 
 
 
 
101	spin_lock(&ctx->completion_lock);
102	if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
103		ret = io_timeout_cancel(ctx, cd);
104	spin_unlock(&ctx->completion_lock);
105	return ret;
106}
107
108int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
109{
110	struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
111
112	if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
113		return -EINVAL;
114	if (sqe->off || sqe->len || sqe->splice_fd_in)
115		return -EINVAL;
116
117	cancel->addr = READ_ONCE(sqe->addr);
118	cancel->flags = READ_ONCE(sqe->cancel_flags);
119	if (cancel->flags & ~CANCEL_FLAGS)
120		return -EINVAL;
121	if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
122		if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
123			return -EINVAL;
124		cancel->fd = READ_ONCE(sqe->fd);
125	}
 
 
 
 
 
126
127	return 0;
128}
129
130static int __io_async_cancel(struct io_cancel_data *cd,
131			     struct io_uring_task *tctx,
132			     unsigned int issue_flags)
133{
134	bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
135	struct io_ring_ctx *ctx = cd->ctx;
136	struct io_tctx_node *node;
137	int ret, nr = 0;
138
139	do {
140		ret = io_try_cancel(tctx, cd, issue_flags);
141		if (ret == -ENOENT)
142			break;
143		if (!all)
144			return ret;
145		nr++;
146	} while (1);
147
148	/* slow path, try all io-wq's */
149	io_ring_submit_lock(ctx, issue_flags);
150	ret = -ENOENT;
151	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
152		struct io_uring_task *tctx = node->task->io_uring;
153
154		ret = io_async_cancel_one(tctx, cd);
155		if (ret != -ENOENT) {
156			if (!all)
157				break;
158			nr++;
159		}
160	}
161	io_ring_submit_unlock(ctx, issue_flags);
162	return all ? nr : ret;
163}
164
165int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
166{
167	struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
168	struct io_cancel_data cd = {
169		.ctx	= req->ctx,
170		.data	= cancel->addr,
171		.flags	= cancel->flags,
 
172		.seq	= atomic_inc_return(&req->ctx->cancel_seq),
173	};
174	struct io_uring_task *tctx = req->task->io_uring;
175	int ret;
176
177	if (cd.flags & IORING_ASYNC_CANCEL_FD) {
178		if (req->flags & REQ_F_FIXED_FILE ||
179		    cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
180			req->flags |= REQ_F_FIXED_FILE;
181			req->file = io_file_get_fixed(req, cancel->fd,
182							issue_flags);
183		} else {
184			req->file = io_file_get_normal(req, cancel->fd);
185		}
186		if (!req->file) {
187			ret = -EBADF;
188			goto done;
189		}
190		cd.file = req->file;
191	}
192
193	ret = __io_async_cancel(&cd, tctx, issue_flags);
194done:
195	if (ret < 0)
196		req_set_fail(req);
197	io_req_set_res(req, ret, 0);
198	return IOU_OK;
199}
200
201void init_hash_table(struct io_hash_table *table, unsigned size)
202{
203	unsigned int i;
204
205	for (i = 0; i < size; i++) {
206		spin_lock_init(&table->hbs[i].lock);
207		INIT_HLIST_HEAD(&table->hbs[i].list);
208	}
209}
210
211static int __io_sync_cancel(struct io_uring_task *tctx,
212			    struct io_cancel_data *cd, int fd)
213{
214	struct io_ring_ctx *ctx = cd->ctx;
215
216	/* fixed must be grabbed every time since we drop the uring_lock */
217	if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
218	    (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
219		unsigned long file_ptr;
220
221		if (unlikely(fd >= ctx->nr_user_files))
222			return -EBADF;
223		fd = array_index_nospec(fd, ctx->nr_user_files);
224		file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
225		cd->file = (struct file *) (file_ptr & FFS_MASK);
226		if (!cd->file)
227			return -EBADF;
228	}
229
230	return __io_async_cancel(cd, tctx, 0);
231}
232
233int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
234	__must_hold(&ctx->uring_lock)
235{
236	struct io_cancel_data cd = {
237		.ctx	= ctx,
238		.seq	= atomic_inc_return(&ctx->cancel_seq),
239	};
240	ktime_t timeout = KTIME_MAX;
241	struct io_uring_sync_cancel_reg sc;
242	struct fd f = { };
243	DEFINE_WAIT(wait);
244	int ret;
245
246	if (copy_from_user(&sc, arg, sizeof(sc)))
247		return -EFAULT;
248	if (sc.flags & ~CANCEL_FLAGS)
249		return -EINVAL;
250	if (sc.pad[0] || sc.pad[1] || sc.pad[2] || sc.pad[3])
251		return -EINVAL;
 
 
 
 
252
253	cd.data = sc.addr;
254	cd.flags = sc.flags;
 
255
256	/* we can grab a normal file descriptor upfront */
257	if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
258	   !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
259		f = fdget(sc.fd);
260		if (!f.file)
261			return -EBADF;
262		cd.file = f.file;
263	}
264
265	ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
266
267	/* found something, done! */
268	if (ret != -EALREADY)
269		goto out;
270
271	if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
272		struct timespec64 ts = {
273			.tv_sec		= sc.timeout.tv_sec,
274			.tv_nsec	= sc.timeout.tv_nsec
275		};
276
277		timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
278	}
279
280	/*
281	 * Keep looking until we get -ENOENT. we'll get woken everytime
282	 * every time a request completes and will retry the cancelation.
283	 */
284	do {
285		cd.seq = atomic_inc_return(&ctx->cancel_seq);
286
287		prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
288
289		ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
290
291		mutex_unlock(&ctx->uring_lock);
292		if (ret != -EALREADY)
293			break;
294
295		ret = io_run_task_work_sig(ctx);
296		if (ret < 0)
297			break;
298		ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
299		if (!ret) {
300			ret = -ETIME;
301			break;
302		}
303		mutex_lock(&ctx->uring_lock);
304	} while (1);
305
306	finish_wait(&ctx->cq_wait, &wait);
307	mutex_lock(&ctx->uring_lock);
308
309	if (ret == -ENOENT || ret > 0)
310		ret = 0;
311out:
312	fdput(f);
 
313	return ret;
314}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/errno.h>
  4#include <linux/fs.h>
  5#include <linux/file.h>
  6#include <linux/mm.h>
  7#include <linux/slab.h>
  8#include <linux/namei.h>
  9#include <linux/nospec.h>
 10#include <linux/io_uring.h>
 11
 12#include <uapi/linux/io_uring.h>
 13
 14#include "io_uring.h"
 15#include "tctx.h"
 16#include "poll.h"
 17#include "timeout.h"
 18#include "waitid.h"
 19#include "futex.h"
 20#include "cancel.h"
 21
 22struct io_cancel {
 23	struct file			*file;
 24	u64				addr;
 25	u32				flags;
 26	s32				fd;
 27	u8				opcode;
 28};
 29
 30#define CANCEL_FLAGS	(IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
 31			 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
 32			 IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
 33
 34/*
 35 * Returns true if the request matches the criteria outlined by 'cd'.
 36 */
 37bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
 38{
 39	bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
 
 40
 41	if (req->ctx != cd->ctx)
 42		return false;
 43
 44	if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
 45		match_user_data = true;
 46
 47	if (cd->flags & IORING_ASYNC_CANCEL_ANY)
 48		goto check_seq;
 49	if (cd->flags & IORING_ASYNC_CANCEL_FD) {
 50		if (req->file != cd->file)
 51			return false;
 52	}
 53	if (cd->flags & IORING_ASYNC_CANCEL_OP) {
 54		if (req->opcode != cd->opcode)
 55			return false;
 56	}
 57	if (match_user_data && req->cqe.user_data != cd->data)
 58		return false;
 59	if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
 60check_seq:
 61		if (io_cancel_match_sequence(req, cd->seq))
 62			return false;
 
 63	}
 64
 65	return true;
 66}
 67
 68static bool io_cancel_cb(struct io_wq_work *work, void *data)
 69{
 70	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
 71	struct io_cancel_data *cd = data;
 72
 73	return io_cancel_req_match(req, cd);
 74}
 75
 76static int io_async_cancel_one(struct io_uring_task *tctx,
 77			       struct io_cancel_data *cd)
 78{
 79	enum io_wq_cancel cancel_ret;
 80	int ret = 0;
 81	bool all;
 82
 83	if (!tctx || !tctx->io_wq)
 84		return -ENOENT;
 85
 86	all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
 87	cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
 88	switch (cancel_ret) {
 89	case IO_WQ_CANCEL_OK:
 90		ret = 0;
 91		break;
 92	case IO_WQ_CANCEL_RUNNING:
 93		ret = -EALREADY;
 94		break;
 95	case IO_WQ_CANCEL_NOTFOUND:
 96		ret = -ENOENT;
 97		break;
 98	}
 99
100	return ret;
101}
102
103int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
104		  unsigned issue_flags)
105{
106	struct io_ring_ctx *ctx = cd->ctx;
107	int ret;
108
109	WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
110
111	ret = io_async_cancel_one(tctx, cd);
112	/*
113	 * Fall-through even for -EALREADY, as we may have poll armed
114	 * that need unarming.
115	 */
116	if (!ret)
117		return 0;
118
119	ret = io_poll_cancel(ctx, cd, issue_flags);
120	if (ret != -ENOENT)
121		return ret;
122
123	ret = io_waitid_cancel(ctx, cd, issue_flags);
124	if (ret != -ENOENT)
125		return ret;
126
127	ret = io_futex_cancel(ctx, cd, issue_flags);
128	if (ret != -ENOENT)
129		return ret;
130
131	spin_lock(&ctx->completion_lock);
132	if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
133		ret = io_timeout_cancel(ctx, cd);
134	spin_unlock(&ctx->completion_lock);
135	return ret;
136}
137
138int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
139{
140	struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
141
142	if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
143		return -EINVAL;
144	if (sqe->off || sqe->splice_fd_in)
145		return -EINVAL;
146
147	cancel->addr = READ_ONCE(sqe->addr);
148	cancel->flags = READ_ONCE(sqe->cancel_flags);
149	if (cancel->flags & ~CANCEL_FLAGS)
150		return -EINVAL;
151	if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
152		if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
153			return -EINVAL;
154		cancel->fd = READ_ONCE(sqe->fd);
155	}
156	if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
157		if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
158			return -EINVAL;
159		cancel->opcode = READ_ONCE(sqe->len);
160	}
161
162	return 0;
163}
164
165static int __io_async_cancel(struct io_cancel_data *cd,
166			     struct io_uring_task *tctx,
167			     unsigned int issue_flags)
168{
169	bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
170	struct io_ring_ctx *ctx = cd->ctx;
171	struct io_tctx_node *node;
172	int ret, nr = 0;
173
174	do {
175		ret = io_try_cancel(tctx, cd, issue_flags);
176		if (ret == -ENOENT)
177			break;
178		if (!all)
179			return ret;
180		nr++;
181	} while (1);
182
183	/* slow path, try all io-wq's */
184	io_ring_submit_lock(ctx, issue_flags);
185	ret = -ENOENT;
186	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
187		struct io_uring_task *tctx = node->task->io_uring;
188
189		ret = io_async_cancel_one(tctx, cd);
190		if (ret != -ENOENT) {
191			if (!all)
192				break;
193			nr++;
194		}
195	}
196	io_ring_submit_unlock(ctx, issue_flags);
197	return all ? nr : ret;
198}
199
200int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
201{
202	struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
203	struct io_cancel_data cd = {
204		.ctx	= req->ctx,
205		.data	= cancel->addr,
206		.flags	= cancel->flags,
207		.opcode	= cancel->opcode,
208		.seq	= atomic_inc_return(&req->ctx->cancel_seq),
209	};
210	struct io_uring_task *tctx = req->task->io_uring;
211	int ret;
212
213	if (cd.flags & IORING_ASYNC_CANCEL_FD) {
214		if (req->flags & REQ_F_FIXED_FILE ||
215		    cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
216			req->flags |= REQ_F_FIXED_FILE;
217			req->file = io_file_get_fixed(req, cancel->fd,
218							issue_flags);
219		} else {
220			req->file = io_file_get_normal(req, cancel->fd);
221		}
222		if (!req->file) {
223			ret = -EBADF;
224			goto done;
225		}
226		cd.file = req->file;
227	}
228
229	ret = __io_async_cancel(&cd, tctx, issue_flags);
230done:
231	if (ret < 0)
232		req_set_fail(req);
233	io_req_set_res(req, ret, 0);
234	return IOU_OK;
235}
236
237void init_hash_table(struct io_hash_table *table, unsigned size)
238{
239	unsigned int i;
240
241	for (i = 0; i < size; i++) {
242		spin_lock_init(&table->hbs[i].lock);
243		INIT_HLIST_HEAD(&table->hbs[i].list);
244	}
245}
246
247static int __io_sync_cancel(struct io_uring_task *tctx,
248			    struct io_cancel_data *cd, int fd)
249{
250	struct io_ring_ctx *ctx = cd->ctx;
251
252	/* fixed must be grabbed every time since we drop the uring_lock */
253	if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
254	    (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
 
 
255		if (unlikely(fd >= ctx->nr_user_files))
256			return -EBADF;
257		fd = array_index_nospec(fd, ctx->nr_user_files);
258		cd->file = io_file_from_index(&ctx->file_table, fd);
 
259		if (!cd->file)
260			return -EBADF;
261	}
262
263	return __io_async_cancel(cd, tctx, 0);
264}
265
266int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
267	__must_hold(&ctx->uring_lock)
268{
269	struct io_cancel_data cd = {
270		.ctx	= ctx,
271		.seq	= atomic_inc_return(&ctx->cancel_seq),
272	};
273	ktime_t timeout = KTIME_MAX;
274	struct io_uring_sync_cancel_reg sc;
275	struct file *file = NULL;
276	DEFINE_WAIT(wait);
277	int ret, i;
278
279	if (copy_from_user(&sc, arg, sizeof(sc)))
280		return -EFAULT;
281	if (sc.flags & ~CANCEL_FLAGS)
282		return -EINVAL;
283	for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
284		if (sc.pad[i])
285			return -EINVAL;
286	for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
287		if (sc.pad2[i])
288			return -EINVAL;
289
290	cd.data = sc.addr;
291	cd.flags = sc.flags;
292	cd.opcode = sc.opcode;
293
294	/* we can grab a normal file descriptor upfront */
295	if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
296	   !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
297		file = fget(sc.fd);
298		if (!file)
299			return -EBADF;
300		cd.file = file;
301	}
302
303	ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
304
305	/* found something, done! */
306	if (ret != -EALREADY)
307		goto out;
308
309	if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
310		struct timespec64 ts = {
311			.tv_sec		= sc.timeout.tv_sec,
312			.tv_nsec	= sc.timeout.tv_nsec
313		};
314
315		timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
316	}
317
318	/*
319	 * Keep looking until we get -ENOENT. we'll get woken everytime
320	 * every time a request completes and will retry the cancelation.
321	 */
322	do {
323		cd.seq = atomic_inc_return(&ctx->cancel_seq);
324
325		prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
326
327		ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
328
329		mutex_unlock(&ctx->uring_lock);
330		if (ret != -EALREADY)
331			break;
332
333		ret = io_run_task_work_sig(ctx);
334		if (ret < 0)
335			break;
336		ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
337		if (!ret) {
338			ret = -ETIME;
339			break;
340		}
341		mutex_lock(&ctx->uring_lock);
342	} while (1);
343
344	finish_wait(&ctx->cq_wait, &wait);
345	mutex_lock(&ctx->uring_lock);
346
347	if (ret == -ENOENT || ret > 0)
348		ret = 0;
349out:
350	if (file)
351		fput(file);
352	return ret;
353}