Loading...
1/*
2 * Functions to sequence FLUSH and FUA writes.
3 *
4 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
5 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability.
12 *
13 * If a request doesn't have data, only REQ_FLUSH makes sense, which
14 * indicates a simple flush request. If there is data, REQ_FLUSH indicates
15 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request
17 * completion.
18 *
19 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20 * difference. The requests are either completed immediately if there's no
21 * data or executed as normal requests otherwise.
22 *
23 * If the device has writeback cache and supports FUA, REQ_FLUSH is
24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 *
26 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27 * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 *
29 * The actual execution of flush is double buffered. Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at
31 * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a
32 * flush is issued and the pending_idx is toggled. When the flush
33 * completes, all the requests which were pending are proceeded to the next
34 * step. This allows arbitrary merging of different types of FLUSH/FUA
35 * requests.
36 *
37 * Currently, the following conditions are used to determine when to issue
38 * flush.
39 *
40 * C1. At any given time, only one flush shall be in progress. This makes
41 * double buffering sufficient.
42 *
43 * C2. Flush is deferred if any request is executing DATA of its sequence.
44 * This avoids issuing separate POSTFLUSHes for requests which shared
45 * PREFLUSH.
46 *
47 * C3. The second condition is ignored if there is a request which has
48 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
49 * starvation in the unlikely case where there are continuous stream of
50 * FUA (without FLUSH) requests.
51 *
52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53 * is beneficial.
54 *
55 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56 * Once while executing DATA and again after the whole sequence is
57 * complete. The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
59 * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
60 * req_bio_endio().
61 *
62 * The above peculiarity requires that each FLUSH/FUA request has only one
63 * bio attached to it, which is guaranteed as they aren't allowed to be
64 * merged in the usual way.
65 */
66
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/bio.h>
70#include <linux/blkdev.h>
71#include <linux/gfp.h>
72#include <linux/blk-mq.h>
73
74#include "blk.h"
75#include "blk-mq.h"
76
77/* FLUSH/FUA sequences */
78enum {
79 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
80 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
81 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
82 REQ_FSEQ_DONE = (1 << 3),
83
84 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85 REQ_FSEQ_POSTFLUSH,
86
87 /*
88 * If flush has been pending longer than the following timeout,
89 * it's issued even if flush_data requests are still in flight.
90 */
91 FLUSH_PENDING_TIMEOUT = 5 * HZ,
92};
93
94static bool blk_kick_flush(struct request_queue *q);
95
96static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
97{
98 unsigned int policy = 0;
99
100 if (blk_rq_sectors(rq))
101 policy |= REQ_FSEQ_DATA;
102
103 if (fflags & REQ_FLUSH) {
104 if (rq->cmd_flags & REQ_FLUSH)
105 policy |= REQ_FSEQ_PREFLUSH;
106 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
107 policy |= REQ_FSEQ_POSTFLUSH;
108 }
109 return policy;
110}
111
112static unsigned int blk_flush_cur_seq(struct request *rq)
113{
114 return 1 << ffz(rq->flush.seq);
115}
116
117static void blk_flush_restore_request(struct request *rq)
118{
119 /*
120 * After flush data completion, @rq->bio is %NULL but we need to
121 * complete the bio again. @rq->biotail is guaranteed to equal the
122 * original @rq->bio. Restore it.
123 */
124 rq->bio = rq->biotail;
125
126 /* make @rq a normal request */
127 rq->cmd_flags &= ~REQ_FLUSH_SEQ;
128 rq->end_io = rq->flush.saved_end_io;
129
130 blk_clear_rq_complete(rq);
131}
132
133static void mq_flush_run(struct work_struct *work)
134{
135 struct request *rq;
136
137 rq = container_of(work, struct request, mq_flush_work);
138
139 memset(&rq->csd, 0, sizeof(rq->csd));
140 blk_mq_insert_request(rq, false, true, false);
141}
142
143static bool blk_flush_queue_rq(struct request *rq, bool add_front)
144{
145 if (rq->q->mq_ops) {
146 INIT_WORK(&rq->mq_flush_work, mq_flush_run);
147 kblockd_schedule_work(rq->q, &rq->mq_flush_work);
148 return false;
149 } else {
150 if (add_front)
151 list_add(&rq->queuelist, &rq->q->queue_head);
152 else
153 list_add_tail(&rq->queuelist, &rq->q->queue_head);
154 return true;
155 }
156}
157
158/**
159 * blk_flush_complete_seq - complete flush sequence
160 * @rq: FLUSH/FUA request being sequenced
161 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
162 * @error: whether an error occurred
163 *
164 * @rq just completed @seq part of its flush sequence, record the
165 * completion and trigger the next step.
166 *
167 * CONTEXT:
168 * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
169 *
170 * RETURNS:
171 * %true if requests were added to the dispatch queue, %false otherwise.
172 */
173static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
174 int error)
175{
176 struct request_queue *q = rq->q;
177 struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
178 bool queued = false, kicked;
179
180 BUG_ON(rq->flush.seq & seq);
181 rq->flush.seq |= seq;
182
183 if (likely(!error))
184 seq = blk_flush_cur_seq(rq);
185 else
186 seq = REQ_FSEQ_DONE;
187
188 switch (seq) {
189 case REQ_FSEQ_PREFLUSH:
190 case REQ_FSEQ_POSTFLUSH:
191 /* queue for flush */
192 if (list_empty(pending))
193 q->flush_pending_since = jiffies;
194 list_move_tail(&rq->flush.list, pending);
195 break;
196
197 case REQ_FSEQ_DATA:
198 list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
199 queued = blk_flush_queue_rq(rq, true);
200 break;
201
202 case REQ_FSEQ_DONE:
203 /*
204 * @rq was previously adjusted by blk_flush_issue() for
205 * flush sequencing and may already have gone through the
206 * flush data request completion path. Restore @rq for
207 * normal completion and end it.
208 */
209 BUG_ON(!list_empty(&rq->queuelist));
210 list_del_init(&rq->flush.list);
211 blk_flush_restore_request(rq);
212 if (q->mq_ops)
213 blk_mq_end_io(rq, error);
214 else
215 __blk_end_request_all(rq, error);
216 break;
217
218 default:
219 BUG();
220 }
221
222 kicked = blk_kick_flush(q);
223 return kicked | queued;
224}
225
226static void flush_end_io(struct request *flush_rq, int error)
227{
228 struct request_queue *q = flush_rq->q;
229 struct list_head *running;
230 bool queued = false;
231 struct request *rq, *n;
232 unsigned long flags = 0;
233
234 if (q->mq_ops)
235 spin_lock_irqsave(&q->mq_flush_lock, flags);
236
237 running = &q->flush_queue[q->flush_running_idx];
238 BUG_ON(q->flush_pending_idx == q->flush_running_idx);
239
240 /* account completion of the flush request */
241 q->flush_running_idx ^= 1;
242
243 if (!q->mq_ops)
244 elv_completed_request(q, flush_rq);
245
246 /* and push the waiting requests to the next stage */
247 list_for_each_entry_safe(rq, n, running, flush.list) {
248 unsigned int seq = blk_flush_cur_seq(rq);
249
250 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
251 queued |= blk_flush_complete_seq(rq, seq, error);
252 }
253
254 /*
255 * Kick the queue to avoid stall for two cases:
256 * 1. Moving a request silently to empty queue_head may stall the
257 * queue.
258 * 2. When flush request is running in non-queueable queue, the
259 * queue is hold. Restart the queue after flush request is finished
260 * to avoid stall.
261 * This function is called from request completion path and calling
262 * directly into request_fn may confuse the driver. Always use
263 * kblockd.
264 */
265 if (queued || q->flush_queue_delayed) {
266 WARN_ON(q->mq_ops);
267 blk_run_queue_async(q);
268 }
269 q->flush_queue_delayed = 0;
270 if (q->mq_ops)
271 spin_unlock_irqrestore(&q->mq_flush_lock, flags);
272}
273
274/**
275 * blk_kick_flush - consider issuing flush request
276 * @q: request_queue being kicked
277 *
278 * Flush related states of @q have changed, consider issuing flush request.
279 * Please read the comment at the top of this file for more info.
280 *
281 * CONTEXT:
282 * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
283 *
284 * RETURNS:
285 * %true if flush was issued, %false otherwise.
286 */
287static bool blk_kick_flush(struct request_queue *q)
288{
289 struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
290 struct request *first_rq =
291 list_first_entry(pending, struct request, flush.list);
292
293 /* C1 described at the top of this file */
294 if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
295 return false;
296
297 /* C2 and C3 */
298 if (!list_empty(&q->flush_data_in_flight) &&
299 time_before(jiffies,
300 q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
301 return false;
302
303 /*
304 * Issue flush and toggle pending_idx. This makes pending_idx
305 * different from running_idx, which means flush is in flight.
306 */
307 q->flush_pending_idx ^= 1;
308
309 if (q->mq_ops) {
310 struct blk_mq_ctx *ctx = first_rq->mq_ctx;
311 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
312
313 blk_mq_rq_init(hctx, q->flush_rq);
314 q->flush_rq->mq_ctx = ctx;
315
316 /*
317 * Reuse the tag value from the fist waiting request,
318 * with blk-mq the tag is generated during request
319 * allocation and drivers can rely on it being inside
320 * the range they asked for.
321 */
322 q->flush_rq->tag = first_rq->tag;
323 } else {
324 blk_rq_init(q, q->flush_rq);
325 }
326
327 q->flush_rq->cmd_type = REQ_TYPE_FS;
328 q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
329 q->flush_rq->rq_disk = first_rq->rq_disk;
330 q->flush_rq->end_io = flush_end_io;
331
332 return blk_flush_queue_rq(q->flush_rq, false);
333}
334
335static void flush_data_end_io(struct request *rq, int error)
336{
337 struct request_queue *q = rq->q;
338
339 /*
340 * After populating an empty queue, kick it to avoid stall. Read
341 * the comment in flush_end_io().
342 */
343 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
344 blk_run_queue_async(q);
345}
346
347static void mq_flush_data_end_io(struct request *rq, int error)
348{
349 struct request_queue *q = rq->q;
350 struct blk_mq_hw_ctx *hctx;
351 struct blk_mq_ctx *ctx;
352 unsigned long flags;
353
354 ctx = rq->mq_ctx;
355 hctx = q->mq_ops->map_queue(q, ctx->cpu);
356
357 /*
358 * After populating an empty queue, kick it to avoid stall. Read
359 * the comment in flush_end_io().
360 */
361 spin_lock_irqsave(&q->mq_flush_lock, flags);
362 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
363 blk_mq_run_hw_queue(hctx, true);
364 spin_unlock_irqrestore(&q->mq_flush_lock, flags);
365}
366
367/**
368 * blk_insert_flush - insert a new FLUSH/FUA request
369 * @rq: request to insert
370 *
371 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
372 * or __blk_mq_run_hw_queue() to dispatch request.
373 * @rq is being submitted. Analyze what needs to be done and put it on the
374 * right queue.
375 *
376 * CONTEXT:
377 * spin_lock_irq(q->queue_lock) in !mq case
378 */
379void blk_insert_flush(struct request *rq)
380{
381 struct request_queue *q = rq->q;
382 unsigned int fflags = q->flush_flags; /* may change, cache */
383 unsigned int policy = blk_flush_policy(fflags, rq);
384
385 /*
386 * @policy now records what operations need to be done. Adjust
387 * REQ_FLUSH and FUA for the driver.
388 */
389 rq->cmd_flags &= ~REQ_FLUSH;
390 if (!(fflags & REQ_FUA))
391 rq->cmd_flags &= ~REQ_FUA;
392
393 /*
394 * An empty flush handed down from a stacking driver may
395 * translate into nothing if the underlying device does not
396 * advertise a write-back cache. In this case, simply
397 * complete the request.
398 */
399 if (!policy) {
400 if (q->mq_ops)
401 blk_mq_end_io(rq, 0);
402 else
403 __blk_end_bidi_request(rq, 0, 0, 0);
404 return;
405 }
406
407 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
408
409 /*
410 * If there's data but flush is not necessary, the request can be
411 * processed directly without going through flush machinery. Queue
412 * for normal execution.
413 */
414 if ((policy & REQ_FSEQ_DATA) &&
415 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
416 if (q->mq_ops) {
417 blk_mq_insert_request(rq, false, false, true);
418 } else
419 list_add_tail(&rq->queuelist, &q->queue_head);
420 return;
421 }
422
423 /*
424 * @rq should go through flush machinery. Mark it part of flush
425 * sequence and submit for further processing.
426 */
427 memset(&rq->flush, 0, sizeof(rq->flush));
428 INIT_LIST_HEAD(&rq->flush.list);
429 rq->cmd_flags |= REQ_FLUSH_SEQ;
430 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
431 if (q->mq_ops) {
432 rq->end_io = mq_flush_data_end_io;
433
434 spin_lock_irq(&q->mq_flush_lock);
435 blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
436 spin_unlock_irq(&q->mq_flush_lock);
437 return;
438 }
439 rq->end_io = flush_data_end_io;
440
441 blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
442}
443
444/**
445 * blk_abort_flushes - @q is being aborted, abort flush requests
446 * @q: request_queue being aborted
447 *
448 * To be called from elv_abort_queue(). @q is being aborted. Prepare all
449 * FLUSH/FUA requests for abortion.
450 *
451 * CONTEXT:
452 * spin_lock_irq(q->queue_lock)
453 */
454void blk_abort_flushes(struct request_queue *q)
455{
456 struct request *rq, *n;
457 int i;
458
459 /*
460 * Requests in flight for data are already owned by the dispatch
461 * queue or the device driver. Just restore for normal completion.
462 */
463 list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
464 list_del_init(&rq->flush.list);
465 blk_flush_restore_request(rq);
466 }
467
468 /*
469 * We need to give away requests on flush queues. Restore for
470 * normal completion and put them on the dispatch queue.
471 */
472 for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
473 list_for_each_entry_safe(rq, n, &q->flush_queue[i],
474 flush.list) {
475 list_del_init(&rq->flush.list);
476 blk_flush_restore_request(rq);
477 list_add_tail(&rq->queuelist, &q->queue_head);
478 }
479 }
480}
481
482/**
483 * blkdev_issue_flush - queue a flush
484 * @bdev: blockdev to issue flush for
485 * @gfp_mask: memory allocation flags (for bio_alloc)
486 * @error_sector: error sector
487 *
488 * Description:
489 * Issue a flush for the block device in question. Caller can supply
490 * room for storing the error offset in case of a flush error, if they
491 * wish to. If WAIT flag is not passed then caller may check only what
492 * request was pushed in some internal queue for later handling.
493 */
494int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
495 sector_t *error_sector)
496{
497 struct request_queue *q;
498 struct bio *bio;
499 int ret = 0;
500
501 if (bdev->bd_disk == NULL)
502 return -ENXIO;
503
504 q = bdev_get_queue(bdev);
505 if (!q)
506 return -ENXIO;
507
508 /*
509 * some block devices may not have their queue correctly set up here
510 * (e.g. loop device without a backing file) and so issuing a flush
511 * here will panic. Ensure there is a request function before issuing
512 * the flush.
513 */
514 if (!q->make_request_fn)
515 return -ENXIO;
516
517 bio = bio_alloc(gfp_mask, 0);
518 bio->bi_bdev = bdev;
519
520 ret = submit_bio_wait(WRITE_FLUSH, bio);
521
522 /*
523 * The driver must store the error location in ->bi_sector, if
524 * it supports it. For non-stacked drivers, this should be
525 * copied from blk_rq_pos(rq).
526 */
527 if (error_sector)
528 *error_sector = bio->bi_iter.bi_sector;
529
530 bio_put(bio);
531 return ret;
532}
533EXPORT_SYMBOL(blkdev_issue_flush);
534
535void blk_mq_init_flush(struct request_queue *q)
536{
537 spin_lock_init(&q->mq_flush_lock);
538}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions to sequence PREFLUSH and FUA writes.
4 *
5 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
6 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
7 *
8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10 * properties and hardware capability.
11 *
12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13 * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
14 * that the device cache should be flushed before the data is executed, and
15 * REQ_FUA means that the data must be on non-volatile media on request
16 * completion.
17 *
18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19 * difference. The requests are either completed immediately if there's no data
20 * or executed as normal requests otherwise.
21 *
22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
24 *
25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
27 *
28 * The actual execution of flush is double buffered. Whenever a request
29 * needs to execute PRE or POSTFLUSH, it queues at
30 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
31 * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
32 * completes, all the requests which were pending are proceeded to the next
33 * step. This allows arbitrary merging of different types of PREFLUSH/FUA
34 * requests.
35 *
36 * Currently, the following conditions are used to determine when to issue
37 * flush.
38 *
39 * C1. At any given time, only one flush shall be in progress. This makes
40 * double buffering sufficient.
41 *
42 * C2. Flush is deferred if any request is executing DATA of its sequence.
43 * This avoids issuing separate POSTFLUSHes for requests which shared
44 * PREFLUSH.
45 *
46 * C3. The second condition is ignored if there is a request which has
47 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
48 * starvation in the unlikely case where there are continuous stream of
49 * FUA (without PREFLUSH) requests.
50 *
51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
52 * is beneficial.
53 *
54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55 * Once while executing DATA and again after the whole sequence is
56 * complete. The first completion updates the contained bio but doesn't
57 * finish it so that the bio submitter is notified only after the whole
58 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
59 * req_bio_endio().
60 *
61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
62 * bio attached to it, which is guaranteed as they aren't allowed to be
63 * merged in the usual way.
64 */
65
66#include <linux/kernel.h>
67#include <linux/module.h>
68#include <linux/bio.h>
69#include <linux/blkdev.h>
70#include <linux/gfp.h>
71#include <linux/blk-mq.h>
72#include <linux/lockdep.h>
73
74#include "blk.h"
75#include "blk-mq.h"
76#include "blk-mq-tag.h"
77#include "blk-mq-sched.h"
78
79/* PREFLUSH/FUA sequences */
80enum {
81 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
82 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
83 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
84 REQ_FSEQ_DONE = (1 << 3),
85
86 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
87 REQ_FSEQ_POSTFLUSH,
88
89 /*
90 * If flush has been pending longer than the following timeout,
91 * it's issued even if flush_data requests are still in flight.
92 */
93 FLUSH_PENDING_TIMEOUT = 5 * HZ,
94};
95
96static void blk_kick_flush(struct request_queue *q,
97 struct blk_flush_queue *fq, unsigned int flags);
98
99static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
100{
101 unsigned int policy = 0;
102
103 if (blk_rq_sectors(rq))
104 policy |= REQ_FSEQ_DATA;
105
106 if (fflags & (1UL << QUEUE_FLAG_WC)) {
107 if (rq->cmd_flags & REQ_PREFLUSH)
108 policy |= REQ_FSEQ_PREFLUSH;
109 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
110 (rq->cmd_flags & REQ_FUA))
111 policy |= REQ_FSEQ_POSTFLUSH;
112 }
113 return policy;
114}
115
116static unsigned int blk_flush_cur_seq(struct request *rq)
117{
118 return 1 << ffz(rq->flush.seq);
119}
120
121static void blk_flush_restore_request(struct request *rq)
122{
123 /*
124 * After flush data completion, @rq->bio is %NULL but we need to
125 * complete the bio again. @rq->biotail is guaranteed to equal the
126 * original @rq->bio. Restore it.
127 */
128 rq->bio = rq->biotail;
129
130 /* make @rq a normal request */
131 rq->rq_flags &= ~RQF_FLUSH_SEQ;
132 rq->end_io = rq->flush.saved_end_io;
133}
134
135static void blk_flush_queue_rq(struct request *rq, bool add_front)
136{
137 blk_mq_add_to_requeue_list(rq, add_front, true);
138}
139
140static void blk_account_io_flush(struct request *rq)
141{
142 struct hd_struct *part = &rq->rq_disk->part0;
143
144 part_stat_lock();
145 part_stat_inc(part, ios[STAT_FLUSH]);
146 part_stat_add(part, nsecs[STAT_FLUSH],
147 ktime_get_ns() - rq->start_time_ns);
148 part_stat_unlock();
149}
150
151/**
152 * blk_flush_complete_seq - complete flush sequence
153 * @rq: PREFLUSH/FUA request being sequenced
154 * @fq: flush queue
155 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
156 * @error: whether an error occurred
157 *
158 * @rq just completed @seq part of its flush sequence, record the
159 * completion and trigger the next step.
160 *
161 * CONTEXT:
162 * spin_lock_irq(fq->mq_flush_lock)
163 */
164static void blk_flush_complete_seq(struct request *rq,
165 struct blk_flush_queue *fq,
166 unsigned int seq, blk_status_t error)
167{
168 struct request_queue *q = rq->q;
169 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
170 unsigned int cmd_flags;
171
172 BUG_ON(rq->flush.seq & seq);
173 rq->flush.seq |= seq;
174 cmd_flags = rq->cmd_flags;
175
176 if (likely(!error))
177 seq = blk_flush_cur_seq(rq);
178 else
179 seq = REQ_FSEQ_DONE;
180
181 switch (seq) {
182 case REQ_FSEQ_PREFLUSH:
183 case REQ_FSEQ_POSTFLUSH:
184 /* queue for flush */
185 if (list_empty(pending))
186 fq->flush_pending_since = jiffies;
187 list_move_tail(&rq->flush.list, pending);
188 break;
189
190 case REQ_FSEQ_DATA:
191 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
192 blk_flush_queue_rq(rq, true);
193 break;
194
195 case REQ_FSEQ_DONE:
196 /*
197 * @rq was previously adjusted by blk_insert_flush() for
198 * flush sequencing and may already have gone through the
199 * flush data request completion path. Restore @rq for
200 * normal completion and end it.
201 */
202 BUG_ON(!list_empty(&rq->queuelist));
203 list_del_init(&rq->flush.list);
204 blk_flush_restore_request(rq);
205 blk_mq_end_request(rq, error);
206 break;
207
208 default:
209 BUG();
210 }
211
212 blk_kick_flush(q, fq, cmd_flags);
213}
214
215static void flush_end_io(struct request *flush_rq, blk_status_t error)
216{
217 struct request_queue *q = flush_rq->q;
218 struct list_head *running;
219 struct request *rq, *n;
220 unsigned long flags = 0;
221 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
222
223 blk_account_io_flush(flush_rq);
224
225 /* release the tag's ownership to the req cloned from */
226 spin_lock_irqsave(&fq->mq_flush_lock, flags);
227
228 if (!refcount_dec_and_test(&flush_rq->ref)) {
229 fq->rq_status = error;
230 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
231 return;
232 }
233
234 if (fq->rq_status != BLK_STS_OK)
235 error = fq->rq_status;
236
237 if (!q->elevator) {
238 flush_rq->tag = BLK_MQ_NO_TAG;
239 } else {
240 blk_mq_put_driver_tag(flush_rq);
241 flush_rq->internal_tag = BLK_MQ_NO_TAG;
242 }
243
244 running = &fq->flush_queue[fq->flush_running_idx];
245 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
246
247 /* account completion of the flush request */
248 fq->flush_running_idx ^= 1;
249
250 /* and push the waiting requests to the next stage */
251 list_for_each_entry_safe(rq, n, running, flush.list) {
252 unsigned int seq = blk_flush_cur_seq(rq);
253
254 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
255 blk_flush_complete_seq(rq, fq, seq, error);
256 }
257
258 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
259}
260
261/**
262 * blk_kick_flush - consider issuing flush request
263 * @q: request_queue being kicked
264 * @fq: flush queue
265 * @flags: cmd_flags of the original request
266 *
267 * Flush related states of @q have changed, consider issuing flush request.
268 * Please read the comment at the top of this file for more info.
269 *
270 * CONTEXT:
271 * spin_lock_irq(fq->mq_flush_lock)
272 *
273 */
274static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
275 unsigned int flags)
276{
277 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
278 struct request *first_rq =
279 list_first_entry(pending, struct request, flush.list);
280 struct request *flush_rq = fq->flush_rq;
281
282 /* C1 described at the top of this file */
283 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
284 return;
285
286 /* C2 and C3 */
287 if (!list_empty(&fq->flush_data_in_flight) &&
288 time_before(jiffies,
289 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
290 return;
291
292 /*
293 * Issue flush and toggle pending_idx. This makes pending_idx
294 * different from running_idx, which means flush is in flight.
295 */
296 fq->flush_pending_idx ^= 1;
297
298 blk_rq_init(q, flush_rq);
299
300 /*
301 * In case of none scheduler, borrow tag from the first request
302 * since they can't be in flight at the same time. And acquire
303 * the tag's ownership for flush req.
304 *
305 * In case of IO scheduler, flush rq need to borrow scheduler tag
306 * just for cheating put/get driver tag.
307 */
308 flush_rq->mq_ctx = first_rq->mq_ctx;
309 flush_rq->mq_hctx = first_rq->mq_hctx;
310
311 if (!q->elevator) {
312 flush_rq->tag = first_rq->tag;
313
314 /*
315 * We borrow data request's driver tag, so have to mark
316 * this flush request as INFLIGHT for avoiding double
317 * account of this driver tag
318 */
319 flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
320 } else
321 flush_rq->internal_tag = first_rq->internal_tag;
322
323 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
324 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
325 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
326 flush_rq->rq_disk = first_rq->rq_disk;
327 flush_rq->end_io = flush_end_io;
328
329 blk_flush_queue_rq(flush_rq, false);
330}
331
332static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
333{
334 struct request_queue *q = rq->q;
335 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
336 struct blk_mq_ctx *ctx = rq->mq_ctx;
337 unsigned long flags;
338 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
339
340 if (q->elevator) {
341 WARN_ON(rq->tag < 0);
342 blk_mq_put_driver_tag(rq);
343 }
344
345 /*
346 * After populating an empty queue, kick it to avoid stall. Read
347 * the comment in flush_end_io().
348 */
349 spin_lock_irqsave(&fq->mq_flush_lock, flags);
350 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
351 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
352
353 blk_mq_sched_restart(hctx);
354}
355
356/**
357 * blk_insert_flush - insert a new PREFLUSH/FUA request
358 * @rq: request to insert
359 *
360 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
361 * or __blk_mq_run_hw_queue() to dispatch request.
362 * @rq is being submitted. Analyze what needs to be done and put it on the
363 * right queue.
364 */
365void blk_insert_flush(struct request *rq)
366{
367 struct request_queue *q = rq->q;
368 unsigned long fflags = q->queue_flags; /* may change, cache */
369 unsigned int policy = blk_flush_policy(fflags, rq);
370 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
371
372 /*
373 * @policy now records what operations need to be done. Adjust
374 * REQ_PREFLUSH and FUA for the driver.
375 */
376 rq->cmd_flags &= ~REQ_PREFLUSH;
377 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
378 rq->cmd_flags &= ~REQ_FUA;
379
380 /*
381 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
382 * of those flags, we have to set REQ_SYNC to avoid skewing
383 * the request accounting.
384 */
385 rq->cmd_flags |= REQ_SYNC;
386
387 /*
388 * An empty flush handed down from a stacking driver may
389 * translate into nothing if the underlying device does not
390 * advertise a write-back cache. In this case, simply
391 * complete the request.
392 */
393 if (!policy) {
394 blk_mq_end_request(rq, 0);
395 return;
396 }
397
398 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
399
400 /*
401 * If there's data but flush is not necessary, the request can be
402 * processed directly without going through flush machinery. Queue
403 * for normal execution.
404 */
405 if ((policy & REQ_FSEQ_DATA) &&
406 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
407 blk_mq_request_bypass_insert(rq, false, false);
408 return;
409 }
410
411 /*
412 * @rq should go through flush machinery. Mark it part of flush
413 * sequence and submit for further processing.
414 */
415 memset(&rq->flush, 0, sizeof(rq->flush));
416 INIT_LIST_HEAD(&rq->flush.list);
417 rq->rq_flags |= RQF_FLUSH_SEQ;
418 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
419
420 rq->end_io = mq_flush_data_end_io;
421
422 spin_lock_irq(&fq->mq_flush_lock);
423 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
424 spin_unlock_irq(&fq->mq_flush_lock);
425}
426
427/**
428 * blkdev_issue_flush - queue a flush
429 * @bdev: blockdev to issue flush for
430 * @gfp_mask: memory allocation flags (for bio_alloc)
431 *
432 * Description:
433 * Issue a flush for the block device in question.
434 */
435int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
436{
437 struct bio *bio;
438 int ret = 0;
439
440 bio = bio_alloc(gfp_mask, 0);
441 bio_set_dev(bio, bdev);
442 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
443
444 ret = submit_bio_wait(bio);
445 bio_put(bio);
446 return ret;
447}
448EXPORT_SYMBOL(blkdev_issue_flush);
449
450struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
451 gfp_t flags)
452{
453 struct blk_flush_queue *fq;
454 int rq_sz = sizeof(struct request);
455
456 fq = kzalloc_node(sizeof(*fq), flags, node);
457 if (!fq)
458 goto fail;
459
460 spin_lock_init(&fq->mq_flush_lock);
461
462 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
463 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
464 if (!fq->flush_rq)
465 goto fail_rq;
466
467 INIT_LIST_HEAD(&fq->flush_queue[0]);
468 INIT_LIST_HEAD(&fq->flush_queue[1]);
469 INIT_LIST_HEAD(&fq->flush_data_in_flight);
470
471 lockdep_register_key(&fq->key);
472 lockdep_set_class(&fq->mq_flush_lock, &fq->key);
473
474 return fq;
475
476 fail_rq:
477 kfree(fq);
478 fail:
479 return NULL;
480}
481
482void blk_free_flush_queue(struct blk_flush_queue *fq)
483{
484 /* bio based request queue hasn't flush queue */
485 if (!fq)
486 return;
487
488 lockdep_unregister_key(&fq->key);
489 kfree(fq->flush_rq);
490 kfree(fq);
491}