Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/namei.h>
9#include <linux/nospec.h>
10#include <linux/io_uring.h>
11
12#include <uapi/linux/io_uring.h>
13
14#include "io_uring.h"
15#include "tctx.h"
16#include "poll.h"
17#include "timeout.h"
18#include "waitid.h"
19#include "futex.h"
20#include "cancel.h"
21
22struct io_cancel {
23 struct file *file;
24 u64 addr;
25 u32 flags;
26 s32 fd;
27 u8 opcode;
28};
29
30#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
31 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
32 IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
33
34/*
35 * Returns true if the request matches the criteria outlined by 'cd'.
36 */
37bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
38{
39 bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
40
41 if (req->ctx != cd->ctx)
42 return false;
43
44 if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
45 match_user_data = true;
46
47 if (cd->flags & IORING_ASYNC_CANCEL_ANY)
48 goto check_seq;
49 if (cd->flags & IORING_ASYNC_CANCEL_FD) {
50 if (req->file != cd->file)
51 return false;
52 }
53 if (cd->flags & IORING_ASYNC_CANCEL_OP) {
54 if (req->opcode != cd->opcode)
55 return false;
56 }
57 if (match_user_data && req->cqe.user_data != cd->data)
58 return false;
59 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
60check_seq:
61 if (cd->seq == req->work.cancel_seq)
62 return false;
63 req->work.cancel_seq = cd->seq;
64 }
65
66 return true;
67}
68
69static bool io_cancel_cb(struct io_wq_work *work, void *data)
70{
71 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
72 struct io_cancel_data *cd = data;
73
74 return io_cancel_req_match(req, cd);
75}
76
77static int io_async_cancel_one(struct io_uring_task *tctx,
78 struct io_cancel_data *cd)
79{
80 enum io_wq_cancel cancel_ret;
81 int ret = 0;
82 bool all;
83
84 if (!tctx || !tctx->io_wq)
85 return -ENOENT;
86
87 all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
88 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
89 switch (cancel_ret) {
90 case IO_WQ_CANCEL_OK:
91 ret = 0;
92 break;
93 case IO_WQ_CANCEL_RUNNING:
94 ret = -EALREADY;
95 break;
96 case IO_WQ_CANCEL_NOTFOUND:
97 ret = -ENOENT;
98 break;
99 }
100
101 return ret;
102}
103
104int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
105 unsigned issue_flags)
106{
107 struct io_ring_ctx *ctx = cd->ctx;
108 int ret;
109
110 WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
111
112 ret = io_async_cancel_one(tctx, cd);
113 /*
114 * Fall-through even for -EALREADY, as we may have poll armed
115 * that need unarming.
116 */
117 if (!ret)
118 return 0;
119
120 ret = io_poll_cancel(ctx, cd, issue_flags);
121 if (ret != -ENOENT)
122 return ret;
123
124 ret = io_waitid_cancel(ctx, cd, issue_flags);
125 if (ret != -ENOENT)
126 return ret;
127
128 ret = io_futex_cancel(ctx, cd, issue_flags);
129 if (ret != -ENOENT)
130 return ret;
131
132 spin_lock(&ctx->completion_lock);
133 if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
134 ret = io_timeout_cancel(ctx, cd);
135 spin_unlock(&ctx->completion_lock);
136 return ret;
137}
138
139int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
140{
141 struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
142
143 if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
144 return -EINVAL;
145 if (sqe->off || sqe->splice_fd_in)
146 return -EINVAL;
147
148 cancel->addr = READ_ONCE(sqe->addr);
149 cancel->flags = READ_ONCE(sqe->cancel_flags);
150 if (cancel->flags & ~CANCEL_FLAGS)
151 return -EINVAL;
152 if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
153 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
154 return -EINVAL;
155 cancel->fd = READ_ONCE(sqe->fd);
156 }
157 if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
158 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
159 return -EINVAL;
160 cancel->opcode = READ_ONCE(sqe->len);
161 }
162
163 return 0;
164}
165
166static int __io_async_cancel(struct io_cancel_data *cd,
167 struct io_uring_task *tctx,
168 unsigned int issue_flags)
169{
170 bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
171 struct io_ring_ctx *ctx = cd->ctx;
172 struct io_tctx_node *node;
173 int ret, nr = 0;
174
175 do {
176 ret = io_try_cancel(tctx, cd, issue_flags);
177 if (ret == -ENOENT)
178 break;
179 if (!all)
180 return ret;
181 nr++;
182 } while (1);
183
184 /* slow path, try all io-wq's */
185 io_ring_submit_lock(ctx, issue_flags);
186 ret = -ENOENT;
187 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
188 struct io_uring_task *tctx = node->task->io_uring;
189
190 ret = io_async_cancel_one(tctx, cd);
191 if (ret != -ENOENT) {
192 if (!all)
193 break;
194 nr++;
195 }
196 }
197 io_ring_submit_unlock(ctx, issue_flags);
198 return all ? nr : ret;
199}
200
201int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
202{
203 struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
204 struct io_cancel_data cd = {
205 .ctx = req->ctx,
206 .data = cancel->addr,
207 .flags = cancel->flags,
208 .opcode = cancel->opcode,
209 .seq = atomic_inc_return(&req->ctx->cancel_seq),
210 };
211 struct io_uring_task *tctx = req->task->io_uring;
212 int ret;
213
214 if (cd.flags & IORING_ASYNC_CANCEL_FD) {
215 if (req->flags & REQ_F_FIXED_FILE ||
216 cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
217 req->flags |= REQ_F_FIXED_FILE;
218 req->file = io_file_get_fixed(req, cancel->fd,
219 issue_flags);
220 } else {
221 req->file = io_file_get_normal(req, cancel->fd);
222 }
223 if (!req->file) {
224 ret = -EBADF;
225 goto done;
226 }
227 cd.file = req->file;
228 }
229
230 ret = __io_async_cancel(&cd, tctx, issue_flags);
231done:
232 if (ret < 0)
233 req_set_fail(req);
234 io_req_set_res(req, ret, 0);
235 return IOU_OK;
236}
237
238void init_hash_table(struct io_hash_table *table, unsigned size)
239{
240 unsigned int i;
241
242 for (i = 0; i < size; i++) {
243 spin_lock_init(&table->hbs[i].lock);
244 INIT_HLIST_HEAD(&table->hbs[i].list);
245 }
246}
247
248static int __io_sync_cancel(struct io_uring_task *tctx,
249 struct io_cancel_data *cd, int fd)
250{
251 struct io_ring_ctx *ctx = cd->ctx;
252
253 /* fixed must be grabbed every time since we drop the uring_lock */
254 if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
255 (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
256 if (unlikely(fd >= ctx->nr_user_files))
257 return -EBADF;
258 fd = array_index_nospec(fd, ctx->nr_user_files);
259 cd->file = io_file_from_index(&ctx->file_table, fd);
260 if (!cd->file)
261 return -EBADF;
262 }
263
264 return __io_async_cancel(cd, tctx, 0);
265}
266
267int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
268 __must_hold(&ctx->uring_lock)
269{
270 struct io_cancel_data cd = {
271 .ctx = ctx,
272 .seq = atomic_inc_return(&ctx->cancel_seq),
273 };
274 ktime_t timeout = KTIME_MAX;
275 struct io_uring_sync_cancel_reg sc;
276 struct file *file = NULL;
277 DEFINE_WAIT(wait);
278 int ret, i;
279
280 if (copy_from_user(&sc, arg, sizeof(sc)))
281 return -EFAULT;
282 if (sc.flags & ~CANCEL_FLAGS)
283 return -EINVAL;
284 for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
285 if (sc.pad[i])
286 return -EINVAL;
287 for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
288 if (sc.pad2[i])
289 return -EINVAL;
290
291 cd.data = sc.addr;
292 cd.flags = sc.flags;
293 cd.opcode = sc.opcode;
294
295 /* we can grab a normal file descriptor upfront */
296 if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
297 !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
298 file = fget(sc.fd);
299 if (!file)
300 return -EBADF;
301 cd.file = file;
302 }
303
304 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
305
306 /* found something, done! */
307 if (ret != -EALREADY)
308 goto out;
309
310 if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
311 struct timespec64 ts = {
312 .tv_sec = sc.timeout.tv_sec,
313 .tv_nsec = sc.timeout.tv_nsec
314 };
315
316 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
317 }
318
319 /*
320 * Keep looking until we get -ENOENT. we'll get woken everytime
321 * every time a request completes and will retry the cancelation.
322 */
323 do {
324 cd.seq = atomic_inc_return(&ctx->cancel_seq);
325
326 prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
327
328 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
329
330 mutex_unlock(&ctx->uring_lock);
331 if (ret != -EALREADY)
332 break;
333
334 ret = io_run_task_work_sig(ctx);
335 if (ret < 0)
336 break;
337 ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
338 if (!ret) {
339 ret = -ETIME;
340 break;
341 }
342 mutex_lock(&ctx->uring_lock);
343 } while (1);
344
345 finish_wait(&ctx->cq_wait, &wait);
346 mutex_lock(&ctx->uring_lock);
347
348 if (ret == -ENOENT || ret > 0)
349 ret = 0;
350out:
351 if (file)
352 fput(file);
353 return ret;
354}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/namei.h>
9#include <linux/nospec.h>
10#include <linux/io_uring.h>
11
12#include <uapi/linux/io_uring.h>
13
14#include "io_uring.h"
15#include "tctx.h"
16#include "poll.h"
17#include "timeout.h"
18#include "waitid.h"
19#include "futex.h"
20#include "cancel.h"
21
22struct io_cancel {
23 struct file *file;
24 u64 addr;
25 u32 flags;
26 s32 fd;
27 u8 opcode;
28};
29
30#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
31 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
32 IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
33
34/*
35 * Returns true if the request matches the criteria outlined by 'cd'.
36 */
37bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
38{
39 bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
40
41 if (req->ctx != cd->ctx)
42 return false;
43
44 if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
45 match_user_data = true;
46
47 if (cd->flags & IORING_ASYNC_CANCEL_ANY)
48 goto check_seq;
49 if (cd->flags & IORING_ASYNC_CANCEL_FD) {
50 if (req->file != cd->file)
51 return false;
52 }
53 if (cd->flags & IORING_ASYNC_CANCEL_OP) {
54 if (req->opcode != cd->opcode)
55 return false;
56 }
57 if (match_user_data && req->cqe.user_data != cd->data)
58 return false;
59 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
60check_seq:
61 if (io_cancel_match_sequence(req, cd->seq))
62 return false;
63 }
64
65 return true;
66}
67
68static bool io_cancel_cb(struct io_wq_work *work, void *data)
69{
70 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
71 struct io_cancel_data *cd = data;
72
73 return io_cancel_req_match(req, cd);
74}
75
76static int io_async_cancel_one(struct io_uring_task *tctx,
77 struct io_cancel_data *cd)
78{
79 enum io_wq_cancel cancel_ret;
80 int ret = 0;
81 bool all;
82
83 if (!tctx || !tctx->io_wq)
84 return -ENOENT;
85
86 all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
87 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
88 switch (cancel_ret) {
89 case IO_WQ_CANCEL_OK:
90 ret = 0;
91 break;
92 case IO_WQ_CANCEL_RUNNING:
93 ret = -EALREADY;
94 break;
95 case IO_WQ_CANCEL_NOTFOUND:
96 ret = -ENOENT;
97 break;
98 }
99
100 return ret;
101}
102
103int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
104 unsigned issue_flags)
105{
106 struct io_ring_ctx *ctx = cd->ctx;
107 int ret;
108
109 WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
110
111 ret = io_async_cancel_one(tctx, cd);
112 /*
113 * Fall-through even for -EALREADY, as we may have poll armed
114 * that need unarming.
115 */
116 if (!ret)
117 return 0;
118
119 ret = io_poll_cancel(ctx, cd, issue_flags);
120 if (ret != -ENOENT)
121 return ret;
122
123 ret = io_waitid_cancel(ctx, cd, issue_flags);
124 if (ret != -ENOENT)
125 return ret;
126
127 ret = io_futex_cancel(ctx, cd, issue_flags);
128 if (ret != -ENOENT)
129 return ret;
130
131 spin_lock(&ctx->completion_lock);
132 if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
133 ret = io_timeout_cancel(ctx, cd);
134 spin_unlock(&ctx->completion_lock);
135 return ret;
136}
137
138int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
139{
140 struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
141
142 if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
143 return -EINVAL;
144 if (sqe->off || sqe->splice_fd_in)
145 return -EINVAL;
146
147 cancel->addr = READ_ONCE(sqe->addr);
148 cancel->flags = READ_ONCE(sqe->cancel_flags);
149 if (cancel->flags & ~CANCEL_FLAGS)
150 return -EINVAL;
151 if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
152 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
153 return -EINVAL;
154 cancel->fd = READ_ONCE(sqe->fd);
155 }
156 if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
157 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
158 return -EINVAL;
159 cancel->opcode = READ_ONCE(sqe->len);
160 }
161
162 return 0;
163}
164
165static int __io_async_cancel(struct io_cancel_data *cd,
166 struct io_uring_task *tctx,
167 unsigned int issue_flags)
168{
169 bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
170 struct io_ring_ctx *ctx = cd->ctx;
171 struct io_tctx_node *node;
172 int ret, nr = 0;
173
174 do {
175 ret = io_try_cancel(tctx, cd, issue_flags);
176 if (ret == -ENOENT)
177 break;
178 if (!all)
179 return ret;
180 nr++;
181 } while (1);
182
183 /* slow path, try all io-wq's */
184 io_ring_submit_lock(ctx, issue_flags);
185 ret = -ENOENT;
186 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
187 ret = io_async_cancel_one(node->task->io_uring, cd);
188 if (ret != -ENOENT) {
189 if (!all)
190 break;
191 nr++;
192 }
193 }
194 io_ring_submit_unlock(ctx, issue_flags);
195 return all ? nr : ret;
196}
197
198int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
199{
200 struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
201 struct io_cancel_data cd = {
202 .ctx = req->ctx,
203 .data = cancel->addr,
204 .flags = cancel->flags,
205 .opcode = cancel->opcode,
206 .seq = atomic_inc_return(&req->ctx->cancel_seq),
207 };
208 struct io_uring_task *tctx = req->tctx;
209 int ret;
210
211 if (cd.flags & IORING_ASYNC_CANCEL_FD) {
212 if (req->flags & REQ_F_FIXED_FILE ||
213 cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
214 req->flags |= REQ_F_FIXED_FILE;
215 req->file = io_file_get_fixed(req, cancel->fd,
216 issue_flags);
217 } else {
218 req->file = io_file_get_normal(req, cancel->fd);
219 }
220 if (!req->file) {
221 ret = -EBADF;
222 goto done;
223 }
224 cd.file = req->file;
225 }
226
227 ret = __io_async_cancel(&cd, tctx, issue_flags);
228done:
229 if (ret < 0)
230 req_set_fail(req);
231 io_req_set_res(req, ret, 0);
232 return IOU_OK;
233}
234
235static int __io_sync_cancel(struct io_uring_task *tctx,
236 struct io_cancel_data *cd, int fd)
237{
238 struct io_ring_ctx *ctx = cd->ctx;
239
240 /* fixed must be grabbed every time since we drop the uring_lock */
241 if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
242 (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
243 struct io_rsrc_node *node;
244
245 node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
246 if (unlikely(!node))
247 return -EBADF;
248 cd->file = io_slot_file(node);
249 if (!cd->file)
250 return -EBADF;
251 }
252
253 return __io_async_cancel(cd, tctx, 0);
254}
255
256int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
257 __must_hold(&ctx->uring_lock)
258{
259 struct io_cancel_data cd = {
260 .ctx = ctx,
261 .seq = atomic_inc_return(&ctx->cancel_seq),
262 };
263 ktime_t timeout = KTIME_MAX;
264 struct io_uring_sync_cancel_reg sc;
265 struct file *file = NULL;
266 DEFINE_WAIT(wait);
267 int ret, i;
268
269 if (copy_from_user(&sc, arg, sizeof(sc)))
270 return -EFAULT;
271 if (sc.flags & ~CANCEL_FLAGS)
272 return -EINVAL;
273 for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
274 if (sc.pad[i])
275 return -EINVAL;
276 for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
277 if (sc.pad2[i])
278 return -EINVAL;
279
280 cd.data = sc.addr;
281 cd.flags = sc.flags;
282 cd.opcode = sc.opcode;
283
284 /* we can grab a normal file descriptor upfront */
285 if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
286 !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
287 file = fget(sc.fd);
288 if (!file)
289 return -EBADF;
290 cd.file = file;
291 }
292
293 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
294
295 /* found something, done! */
296 if (ret != -EALREADY)
297 goto out;
298
299 if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
300 struct timespec64 ts = {
301 .tv_sec = sc.timeout.tv_sec,
302 .tv_nsec = sc.timeout.tv_nsec
303 };
304
305 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
306 }
307
308 /*
309 * Keep looking until we get -ENOENT. we'll get woken everytime
310 * every time a request completes and will retry the cancelation.
311 */
312 do {
313 cd.seq = atomic_inc_return(&ctx->cancel_seq);
314
315 prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
316
317 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
318
319 mutex_unlock(&ctx->uring_lock);
320 if (ret != -EALREADY)
321 break;
322
323 ret = io_run_task_work_sig(ctx);
324 if (ret < 0)
325 break;
326 ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
327 if (!ret) {
328 ret = -ETIME;
329 break;
330 }
331 mutex_lock(&ctx->uring_lock);
332 } while (1);
333
334 finish_wait(&ctx->cq_wait, &wait);
335 mutex_lock(&ctx->uring_lock);
336
337 if (ret == -ENOENT || ret > 0)
338 ret = 0;
339out:
340 if (file)
341 fput(file);
342 return ret;
343}