Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to generic timeout handling of requests.
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/blkdev.h>
8#include <linux/fault-inject.h>
9
10#include "blk.h"
11#include "blk-mq.h"
12
13#ifdef CONFIG_FAIL_IO_TIMEOUT
14
15static DECLARE_FAULT_ATTR(fail_io_timeout);
16
17static int __init setup_fail_io_timeout(char *str)
18{
19 return setup_fault_attr(&fail_io_timeout, str);
20}
21__setup("fail_io_timeout=", setup_fail_io_timeout);
22
23bool __blk_should_fake_timeout(struct request_queue *q)
24{
25 return should_fail(&fail_io_timeout, 1);
26}
27EXPORT_SYMBOL_GPL(__blk_should_fake_timeout);
28
29static int __init fail_io_timeout_debugfs(void)
30{
31 struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
32 NULL, &fail_io_timeout);
33
34 return PTR_ERR_OR_ZERO(dir);
35}
36
37late_initcall(fail_io_timeout_debugfs);
38
39ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
40 char *buf)
41{
42 struct gendisk *disk = dev_to_disk(dev);
43 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
44
45 return sprintf(buf, "%d\n", set != 0);
46}
47
48ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
49 const char *buf, size_t count)
50{
51 struct gendisk *disk = dev_to_disk(dev);
52 int val;
53
54 if (count) {
55 struct request_queue *q = disk->queue;
56 char *p = (char *) buf;
57
58 val = simple_strtoul(p, &p, 10);
59 if (val)
60 blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
61 else
62 blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
63 }
64
65 return count;
66}
67
68#endif /* CONFIG_FAIL_IO_TIMEOUT */
69
70/**
71 * blk_abort_request - Request recovery for the specified command
72 * @req: pointer to the request of interest
73 *
74 * This function requests that the block layer start recovery for the
75 * request by deleting the timer and calling the q's timeout function.
76 * LLDDs who implement their own error recovery MAY ignore the timeout
77 * event if they generated blk_abort_request.
78 */
79void blk_abort_request(struct request *req)
80{
81 /*
82 * All we need to ensure is that timeout scan takes place
83 * immediately and that scan sees the new timeout value.
84 * No need for fancy synchronizations.
85 */
86 WRITE_ONCE(req->deadline, jiffies);
87 kblockd_schedule_work(&req->q->timeout_work);
88}
89EXPORT_SYMBOL_GPL(blk_abort_request);
90
91static unsigned long blk_timeout_mask __read_mostly;
92
93static int __init blk_timeout_init(void)
94{
95 blk_timeout_mask = roundup_pow_of_two(HZ) - 1;
96 return 0;
97}
98
99late_initcall(blk_timeout_init);
100
101/*
102 * Just a rough estimate, we don't care about specific values for timeouts.
103 */
104static inline unsigned long blk_round_jiffies(unsigned long j)
105{
106 return (j + blk_timeout_mask) + 1;
107}
108
109unsigned long blk_rq_timeout(unsigned long timeout)
110{
111 unsigned long maxt;
112
113 maxt = blk_round_jiffies(jiffies + BLK_MAX_TIMEOUT);
114 if (time_after(timeout, maxt))
115 timeout = maxt;
116
117 return timeout;
118}
119
120/**
121 * blk_add_timer - Start timeout timer for a single request
122 * @req: request that is about to start running.
123 *
124 * Notes:
125 * Each request has its own timer, and as it is added to the queue, we
126 * set up the timer. When the request completes, we cancel the timer.
127 */
128void blk_add_timer(struct request *req)
129{
130 struct request_queue *q = req->q;
131 unsigned long expiry;
132
133 /*
134 * Some LLDs, like scsi, peek at the timeout to prevent a
135 * command from being retried forever.
136 */
137 if (!req->timeout)
138 req->timeout = q->rq_timeout;
139
140 req->rq_flags &= ~RQF_TIMED_OUT;
141
142 expiry = jiffies + req->timeout;
143 WRITE_ONCE(req->deadline, expiry);
144
145 /*
146 * If the timer isn't already pending or this timeout is earlier
147 * than an existing one, modify the timer. Round up to next nearest
148 * second.
149 */
150 expiry = blk_rq_timeout(blk_round_jiffies(expiry));
151
152 if (!timer_pending(&q->timeout) ||
153 time_before(expiry, q->timeout.expires)) {
154 unsigned long diff = q->timeout.expires - expiry;
155
156 /*
157 * Due to added timer slack to group timers, the timer
158 * will often be a little in front of what we asked for.
159 * So apply some tolerance here too, otherwise we keep
160 * modifying the timer because expires for value X
161 * will be X + something.
162 */
163 if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
164 mod_timer(&q->timeout, expiry);
165 }
166
167}
1/*
2 * Functions related to generic timeout handling of requests.
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/blkdev.h>
7#include <linux/fault-inject.h>
8
9#include "blk.h"
10#include "blk-mq.h"
11
12#ifdef CONFIG_FAIL_IO_TIMEOUT
13
14static DECLARE_FAULT_ATTR(fail_io_timeout);
15
16static int __init setup_fail_io_timeout(char *str)
17{
18 return setup_fault_attr(&fail_io_timeout, str);
19}
20__setup("fail_io_timeout=", setup_fail_io_timeout);
21
22int blk_should_fake_timeout(struct request_queue *q)
23{
24 if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
25 return 0;
26
27 return should_fail(&fail_io_timeout, 1);
28}
29
30static int __init fail_io_timeout_debugfs(void)
31{
32 struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
33 NULL, &fail_io_timeout);
34
35 return PTR_ERR_OR_ZERO(dir);
36}
37
38late_initcall(fail_io_timeout_debugfs);
39
40ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
41 char *buf)
42{
43 struct gendisk *disk = dev_to_disk(dev);
44 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
45
46 return sprintf(buf, "%d\n", set != 0);
47}
48
49ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
50 const char *buf, size_t count)
51{
52 struct gendisk *disk = dev_to_disk(dev);
53 int val;
54
55 if (count) {
56 struct request_queue *q = disk->queue;
57 char *p = (char *) buf;
58
59 val = simple_strtoul(p, &p, 10);
60 spin_lock_irq(q->queue_lock);
61 if (val)
62 queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
63 else
64 queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
65 spin_unlock_irq(q->queue_lock);
66 }
67
68 return count;
69}
70
71#endif /* CONFIG_FAIL_IO_TIMEOUT */
72
73/*
74 * blk_delete_timer - Delete/cancel timer for a given function.
75 * @req: request that we are canceling timer for
76 *
77 */
78void blk_delete_timer(struct request *req)
79{
80 list_del_init(&req->timeout_list);
81}
82
83static void blk_rq_timed_out(struct request *req)
84{
85 struct request_queue *q = req->q;
86 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
87
88 if (q->rq_timed_out_fn)
89 ret = q->rq_timed_out_fn(req);
90 switch (ret) {
91 case BLK_EH_HANDLED:
92 /* Can we use req->errors here? */
93 __blk_complete_request(req);
94 break;
95 case BLK_EH_RESET_TIMER:
96 blk_add_timer(req);
97 blk_clear_rq_complete(req);
98 break;
99 case BLK_EH_NOT_HANDLED:
100 /*
101 * LLD handles this for now but in the future
102 * we can send a request msg to abort the command
103 * and we can move more of the generic scsi eh code to
104 * the blk layer.
105 */
106 break;
107 default:
108 printk(KERN_ERR "block: bad eh return: %d\n", ret);
109 break;
110 }
111}
112
113static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
114 unsigned int *next_set)
115{
116 if (time_after_eq(jiffies, rq->deadline)) {
117 list_del_init(&rq->timeout_list);
118
119 /*
120 * Check if we raced with end io completion
121 */
122 if (!blk_mark_rq_complete(rq))
123 blk_rq_timed_out(rq);
124 } else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
125 *next_timeout = rq->deadline;
126 *next_set = 1;
127 }
128}
129
130void blk_timeout_work(struct work_struct *work)
131{
132 struct request_queue *q =
133 container_of(work, struct request_queue, timeout_work);
134 unsigned long flags, next = 0;
135 struct request *rq, *tmp;
136 int next_set = 0;
137
138 if (blk_queue_enter(q, true))
139 return;
140 spin_lock_irqsave(q->queue_lock, flags);
141
142 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
143 blk_rq_check_expired(rq, &next, &next_set);
144
145 if (next_set)
146 mod_timer(&q->timeout, round_jiffies_up(next));
147
148 spin_unlock_irqrestore(q->queue_lock, flags);
149 blk_queue_exit(q);
150}
151
152/**
153 * blk_abort_request -- Request request recovery for the specified command
154 * @req: pointer to the request of interest
155 *
156 * This function requests that the block layer start recovery for the
157 * request by deleting the timer and calling the q's timeout function.
158 * LLDDs who implement their own error recovery MAY ignore the timeout
159 * event if they generated blk_abort_req. Must hold queue lock.
160 */
161void blk_abort_request(struct request *req)
162{
163 if (blk_mark_rq_complete(req))
164 return;
165
166 if (req->q->mq_ops) {
167 blk_mq_rq_timed_out(req, false);
168 } else {
169 blk_delete_timer(req);
170 blk_rq_timed_out(req);
171 }
172}
173EXPORT_SYMBOL_GPL(blk_abort_request);
174
175unsigned long blk_rq_timeout(unsigned long timeout)
176{
177 unsigned long maxt;
178
179 maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
180 if (time_after(timeout, maxt))
181 timeout = maxt;
182
183 return timeout;
184}
185
186/**
187 * blk_add_timer - Start timeout timer for a single request
188 * @req: request that is about to start running.
189 *
190 * Notes:
191 * Each request has its own timer, and as it is added to the queue, we
192 * set up the timer. When the request completes, we cancel the timer.
193 * Queue lock must be held for the non-mq case, mq case doesn't care.
194 */
195void blk_add_timer(struct request *req)
196{
197 struct request_queue *q = req->q;
198 unsigned long expiry;
199
200 /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
201 if (!q->mq_ops && !q->rq_timed_out_fn)
202 return;
203
204 BUG_ON(!list_empty(&req->timeout_list));
205
206 /*
207 * Some LLDs, like scsi, peek at the timeout to prevent a
208 * command from being retried forever.
209 */
210 if (!req->timeout)
211 req->timeout = q->rq_timeout;
212
213 req->deadline = jiffies + req->timeout;
214
215 /*
216 * Only the non-mq case needs to add the request to a protected list.
217 * For the mq case we simply scan the tag map.
218 */
219 if (!q->mq_ops)
220 list_add_tail(&req->timeout_list, &req->q->timeout_list);
221
222 /*
223 * If the timer isn't already pending or this timeout is earlier
224 * than an existing one, modify the timer. Round up to next nearest
225 * second.
226 */
227 expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
228
229 if (!timer_pending(&q->timeout) ||
230 time_before(expiry, q->timeout.expires)) {
231 unsigned long diff = q->timeout.expires - expiry;
232
233 /*
234 * Due to added timer slack to group timers, the timer
235 * will often be a little in front of what we asked for.
236 * So apply some tolerance here too, otherwise we keep
237 * modifying the timer because expires for value X
238 * will be X + something.
239 */
240 if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
241 mod_timer(&q->timeout, expiry);
242 }
243
244}