Loading...
1/*
2 * Functions to sequence FLUSH and FUA writes.
3 *
4 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
5 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability.
12 *
13 * If a request doesn't have data, only REQ_FLUSH makes sense, which
14 * indicates a simple flush request. If there is data, REQ_FLUSH indicates
15 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request
17 * completion.
18 *
19 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20 * difference. The requests are either completed immediately if there's no
21 * data or executed as normal requests otherwise.
22 *
23 * If the device has writeback cache and supports FUA, REQ_FLUSH is
24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 *
26 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27 * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 *
29 * The actual execution of flush is double buffered. Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at
31 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
32 * flush is issued and the pending_idx is toggled. When the flush
33 * completes, all the requests which were pending are proceeded to the next
34 * step. This allows arbitrary merging of different types of FLUSH/FUA
35 * requests.
36 *
37 * Currently, the following conditions are used to determine when to issue
38 * flush.
39 *
40 * C1. At any given time, only one flush shall be in progress. This makes
41 * double buffering sufficient.
42 *
43 * C2. Flush is deferred if any request is executing DATA of its sequence.
44 * This avoids issuing separate POSTFLUSHes for requests which shared
45 * PREFLUSH.
46 *
47 * C3. The second condition is ignored if there is a request which has
48 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
49 * starvation in the unlikely case where there are continuous stream of
50 * FUA (without FLUSH) requests.
51 *
52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53 * is beneficial.
54 *
55 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56 * Once while executing DATA and again after the whole sequence is
57 * complete. The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
59 * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
60 * req_bio_endio().
61 *
62 * The above peculiarity requires that each FLUSH/FUA request has only one
63 * bio attached to it, which is guaranteed as they aren't allowed to be
64 * merged in the usual way.
65 */
66
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/bio.h>
70#include <linux/blkdev.h>
71#include <linux/gfp.h>
72#include <linux/blk-mq.h>
73
74#include "blk.h"
75#include "blk-mq.h"
76#include "blk-mq-tag.h"
77
78/* FLUSH/FUA sequences */
79enum {
80 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
81 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
82 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
83 REQ_FSEQ_DONE = (1 << 3),
84
85 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
86 REQ_FSEQ_POSTFLUSH,
87
88 /*
89 * If flush has been pending longer than the following timeout,
90 * it's issued even if flush_data requests are still in flight.
91 */
92 FLUSH_PENDING_TIMEOUT = 5 * HZ,
93};
94
95static bool blk_kick_flush(struct request_queue *q,
96 struct blk_flush_queue *fq);
97
98static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
99{
100 unsigned int policy = 0;
101
102 if (blk_rq_sectors(rq))
103 policy |= REQ_FSEQ_DATA;
104
105 if (fflags & REQ_FLUSH) {
106 if (rq->cmd_flags & REQ_FLUSH)
107 policy |= REQ_FSEQ_PREFLUSH;
108 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
109 policy |= REQ_FSEQ_POSTFLUSH;
110 }
111 return policy;
112}
113
114static unsigned int blk_flush_cur_seq(struct request *rq)
115{
116 return 1 << ffz(rq->flush.seq);
117}
118
119static void blk_flush_restore_request(struct request *rq)
120{
121 /*
122 * After flush data completion, @rq->bio is %NULL but we need to
123 * complete the bio again. @rq->biotail is guaranteed to equal the
124 * original @rq->bio. Restore it.
125 */
126 rq->bio = rq->biotail;
127
128 /* make @rq a normal request */
129 rq->cmd_flags &= ~REQ_FLUSH_SEQ;
130 rq->end_io = rq->flush.saved_end_io;
131}
132
133static bool blk_flush_queue_rq(struct request *rq, bool add_front)
134{
135 if (rq->q->mq_ops) {
136 struct request_queue *q = rq->q;
137
138 blk_mq_add_to_requeue_list(rq, add_front);
139 blk_mq_kick_requeue_list(q);
140 return false;
141 } else {
142 if (add_front)
143 list_add(&rq->queuelist, &rq->q->queue_head);
144 else
145 list_add_tail(&rq->queuelist, &rq->q->queue_head);
146 return true;
147 }
148}
149
150/**
151 * blk_flush_complete_seq - complete flush sequence
152 * @rq: FLUSH/FUA request being sequenced
153 * @fq: flush queue
154 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
155 * @error: whether an error occurred
156 *
157 * @rq just completed @seq part of its flush sequence, record the
158 * completion and trigger the next step.
159 *
160 * CONTEXT:
161 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
162 *
163 * RETURNS:
164 * %true if requests were added to the dispatch queue, %false otherwise.
165 */
166static bool blk_flush_complete_seq(struct request *rq,
167 struct blk_flush_queue *fq,
168 unsigned int seq, int error)
169{
170 struct request_queue *q = rq->q;
171 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
172 bool queued = false, kicked;
173
174 BUG_ON(rq->flush.seq & seq);
175 rq->flush.seq |= seq;
176
177 if (likely(!error))
178 seq = blk_flush_cur_seq(rq);
179 else
180 seq = REQ_FSEQ_DONE;
181
182 switch (seq) {
183 case REQ_FSEQ_PREFLUSH:
184 case REQ_FSEQ_POSTFLUSH:
185 /* queue for flush */
186 if (list_empty(pending))
187 fq->flush_pending_since = jiffies;
188 list_move_tail(&rq->flush.list, pending);
189 break;
190
191 case REQ_FSEQ_DATA:
192 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
193 queued = blk_flush_queue_rq(rq, true);
194 break;
195
196 case REQ_FSEQ_DONE:
197 /*
198 * @rq was previously adjusted by blk_flush_issue() for
199 * flush sequencing and may already have gone through the
200 * flush data request completion path. Restore @rq for
201 * normal completion and end it.
202 */
203 BUG_ON(!list_empty(&rq->queuelist));
204 list_del_init(&rq->flush.list);
205 blk_flush_restore_request(rq);
206 if (q->mq_ops)
207 blk_mq_end_request(rq, error);
208 else
209 __blk_end_request_all(rq, error);
210 break;
211
212 default:
213 BUG();
214 }
215
216 kicked = blk_kick_flush(q, fq);
217 return kicked | queued;
218}
219
220static void flush_end_io(struct request *flush_rq, int error)
221{
222 struct request_queue *q = flush_rq->q;
223 struct list_head *running;
224 bool queued = false;
225 struct request *rq, *n;
226 unsigned long flags = 0;
227 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
228
229 if (q->mq_ops) {
230 struct blk_mq_hw_ctx *hctx;
231
232 /* release the tag's ownership to the req cloned from */
233 spin_lock_irqsave(&fq->mq_flush_lock, flags);
234 hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu);
235 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
236 flush_rq->tag = -1;
237 }
238
239 running = &fq->flush_queue[fq->flush_running_idx];
240 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
241
242 /* account completion of the flush request */
243 fq->flush_running_idx ^= 1;
244
245 if (!q->mq_ops)
246 elv_completed_request(q, flush_rq);
247
248 /* and push the waiting requests to the next stage */
249 list_for_each_entry_safe(rq, n, running, flush.list) {
250 unsigned int seq = blk_flush_cur_seq(rq);
251
252 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
253 queued |= blk_flush_complete_seq(rq, fq, seq, error);
254 }
255
256 /*
257 * Kick the queue to avoid stall for two cases:
258 * 1. Moving a request silently to empty queue_head may stall the
259 * queue.
260 * 2. When flush request is running in non-queueable queue, the
261 * queue is hold. Restart the queue after flush request is finished
262 * to avoid stall.
263 * This function is called from request completion path and calling
264 * directly into request_fn may confuse the driver. Always use
265 * kblockd.
266 */
267 if (queued || fq->flush_queue_delayed) {
268 WARN_ON(q->mq_ops);
269 blk_run_queue_async(q);
270 }
271 fq->flush_queue_delayed = 0;
272 if (q->mq_ops)
273 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
274}
275
276/**
277 * blk_kick_flush - consider issuing flush request
278 * @q: request_queue being kicked
279 * @fq: flush queue
280 *
281 * Flush related states of @q have changed, consider issuing flush request.
282 * Please read the comment at the top of this file for more info.
283 *
284 * CONTEXT:
285 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
286 *
287 * RETURNS:
288 * %true if flush was issued, %false otherwise.
289 */
290static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
291{
292 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
293 struct request *first_rq =
294 list_first_entry(pending, struct request, flush.list);
295 struct request *flush_rq = fq->flush_rq;
296
297 /* C1 described at the top of this file */
298 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
299 return false;
300
301 /* C2 and C3 */
302 if (!list_empty(&fq->flush_data_in_flight) &&
303 time_before(jiffies,
304 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
305 return false;
306
307 /*
308 * Issue flush and toggle pending_idx. This makes pending_idx
309 * different from running_idx, which means flush is in flight.
310 */
311 fq->flush_pending_idx ^= 1;
312
313 blk_rq_init(q, flush_rq);
314
315 /*
316 * Borrow tag from the first request since they can't
317 * be in flight at the same time. And acquire the tag's
318 * ownership for flush req.
319 */
320 if (q->mq_ops) {
321 struct blk_mq_hw_ctx *hctx;
322
323 flush_rq->mq_ctx = first_rq->mq_ctx;
324 flush_rq->tag = first_rq->tag;
325 fq->orig_rq = first_rq;
326
327 hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu);
328 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
329 }
330
331 flush_rq->cmd_type = REQ_TYPE_FS;
332 flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
333 flush_rq->rq_disk = first_rq->rq_disk;
334 flush_rq->end_io = flush_end_io;
335
336 return blk_flush_queue_rq(flush_rq, false);
337}
338
339static void flush_data_end_io(struct request *rq, int error)
340{
341 struct request_queue *q = rq->q;
342 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
343
344 /*
345 * After populating an empty queue, kick it to avoid stall. Read
346 * the comment in flush_end_io().
347 */
348 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
349 blk_run_queue_async(q);
350}
351
352static void mq_flush_data_end_io(struct request *rq, int error)
353{
354 struct request_queue *q = rq->q;
355 struct blk_mq_hw_ctx *hctx;
356 struct blk_mq_ctx *ctx = rq->mq_ctx;
357 unsigned long flags;
358 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
359
360 hctx = q->mq_ops->map_queue(q, ctx->cpu);
361
362 /*
363 * After populating an empty queue, kick it to avoid stall. Read
364 * the comment in flush_end_io().
365 */
366 spin_lock_irqsave(&fq->mq_flush_lock, flags);
367 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
368 blk_mq_run_hw_queue(hctx, true);
369 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
370}
371
372/**
373 * blk_insert_flush - insert a new FLUSH/FUA request
374 * @rq: request to insert
375 *
376 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
377 * or __blk_mq_run_hw_queue() to dispatch request.
378 * @rq is being submitted. Analyze what needs to be done and put it on the
379 * right queue.
380 *
381 * CONTEXT:
382 * spin_lock_irq(q->queue_lock) in !mq case
383 */
384void blk_insert_flush(struct request *rq)
385{
386 struct request_queue *q = rq->q;
387 unsigned int fflags = q->flush_flags; /* may change, cache */
388 unsigned int policy = blk_flush_policy(fflags, rq);
389 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
390
391 /*
392 * @policy now records what operations need to be done. Adjust
393 * REQ_FLUSH and FUA for the driver.
394 */
395 rq->cmd_flags &= ~REQ_FLUSH;
396 if (!(fflags & REQ_FUA))
397 rq->cmd_flags &= ~REQ_FUA;
398
399 /*
400 * An empty flush handed down from a stacking driver may
401 * translate into nothing if the underlying device does not
402 * advertise a write-back cache. In this case, simply
403 * complete the request.
404 */
405 if (!policy) {
406 if (q->mq_ops)
407 blk_mq_end_request(rq, 0);
408 else
409 __blk_end_bidi_request(rq, 0, 0, 0);
410 return;
411 }
412
413 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
414
415 /*
416 * If there's data but flush is not necessary, the request can be
417 * processed directly without going through flush machinery. Queue
418 * for normal execution.
419 */
420 if ((policy & REQ_FSEQ_DATA) &&
421 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
422 if (q->mq_ops) {
423 blk_mq_insert_request(rq, false, false, true);
424 } else
425 list_add_tail(&rq->queuelist, &q->queue_head);
426 return;
427 }
428
429 /*
430 * @rq should go through flush machinery. Mark it part of flush
431 * sequence and submit for further processing.
432 */
433 memset(&rq->flush, 0, sizeof(rq->flush));
434 INIT_LIST_HEAD(&rq->flush.list);
435 rq->cmd_flags |= REQ_FLUSH_SEQ;
436 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
437 if (q->mq_ops) {
438 rq->end_io = mq_flush_data_end_io;
439
440 spin_lock_irq(&fq->mq_flush_lock);
441 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
442 spin_unlock_irq(&fq->mq_flush_lock);
443 return;
444 }
445 rq->end_io = flush_data_end_io;
446
447 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
448}
449
450/**
451 * blkdev_issue_flush - queue a flush
452 * @bdev: blockdev to issue flush for
453 * @gfp_mask: memory allocation flags (for bio_alloc)
454 * @error_sector: error sector
455 *
456 * Description:
457 * Issue a flush for the block device in question. Caller can supply
458 * room for storing the error offset in case of a flush error, if they
459 * wish to. If WAIT flag is not passed then caller may check only what
460 * request was pushed in some internal queue for later handling.
461 */
462int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
463 sector_t *error_sector)
464{
465 struct request_queue *q;
466 struct bio *bio;
467 int ret = 0;
468
469 if (bdev->bd_disk == NULL)
470 return -ENXIO;
471
472 q = bdev_get_queue(bdev);
473 if (!q)
474 return -ENXIO;
475
476 /*
477 * some block devices may not have their queue correctly set up here
478 * (e.g. loop device without a backing file) and so issuing a flush
479 * here will panic. Ensure there is a request function before issuing
480 * the flush.
481 */
482 if (!q->make_request_fn)
483 return -ENXIO;
484
485 bio = bio_alloc(gfp_mask, 0);
486 bio->bi_bdev = bdev;
487
488 ret = submit_bio_wait(WRITE_FLUSH, bio);
489
490 /*
491 * The driver must store the error location in ->bi_sector, if
492 * it supports it. For non-stacked drivers, this should be
493 * copied from blk_rq_pos(rq).
494 */
495 if (error_sector)
496 *error_sector = bio->bi_iter.bi_sector;
497
498 bio_put(bio);
499 return ret;
500}
501EXPORT_SYMBOL(blkdev_issue_flush);
502
503struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
504 int node, int cmd_size)
505{
506 struct blk_flush_queue *fq;
507 int rq_sz = sizeof(struct request);
508
509 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
510 if (!fq)
511 goto fail;
512
513 if (q->mq_ops) {
514 spin_lock_init(&fq->mq_flush_lock);
515 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
516 }
517
518 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
519 if (!fq->flush_rq)
520 goto fail_rq;
521
522 INIT_LIST_HEAD(&fq->flush_queue[0]);
523 INIT_LIST_HEAD(&fq->flush_queue[1]);
524 INIT_LIST_HEAD(&fq->flush_data_in_flight);
525
526 return fq;
527
528 fail_rq:
529 kfree(fq);
530 fail:
531 return NULL;
532}
533
534void blk_free_flush_queue(struct blk_flush_queue *fq)
535{
536 /* bio based request queue hasn't flush queue */
537 if (!fq)
538 return;
539
540 kfree(fq->flush_rq);
541 kfree(fq);
542}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions to sequence PREFLUSH and FUA writes.
4 *
5 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
6 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
7 *
8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10 * properties and hardware capability.
11 *
12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13 * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
14 * that the device cache should be flushed before the data is executed, and
15 * REQ_FUA means that the data must be on non-volatile media on request
16 * completion.
17 *
18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19 * difference. The requests are either completed immediately if there's no data
20 * or executed as normal requests otherwise.
21 *
22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
24 *
25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
27 *
28 * The actual execution of flush is double buffered. Whenever a request
29 * needs to execute PRE or POSTFLUSH, it queues at
30 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
31 * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
32 * completes, all the requests which were pending are proceeded to the next
33 * step. This allows arbitrary merging of different types of PREFLUSH/FUA
34 * requests.
35 *
36 * Currently, the following conditions are used to determine when to issue
37 * flush.
38 *
39 * C1. At any given time, only one flush shall be in progress. This makes
40 * double buffering sufficient.
41 *
42 * C2. Flush is deferred if any request is executing DATA of its sequence.
43 * This avoids issuing separate POSTFLUSHes for requests which shared
44 * PREFLUSH.
45 *
46 * C3. The second condition is ignored if there is a request which has
47 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
48 * starvation in the unlikely case where there are continuous stream of
49 * FUA (without PREFLUSH) requests.
50 *
51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
52 * is beneficial.
53 *
54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55 * Once while executing DATA and again after the whole sequence is
56 * complete. The first completion updates the contained bio but doesn't
57 * finish it so that the bio submitter is notified only after the whole
58 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
59 * req_bio_endio().
60 *
61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
62 * bio attached to it, which is guaranteed as they aren't allowed to be
63 * merged in the usual way.
64 */
65
66#include <linux/kernel.h>
67#include <linux/module.h>
68#include <linux/bio.h>
69#include <linux/blkdev.h>
70#include <linux/gfp.h>
71#include <linux/part_stat.h>
72
73#include "blk.h"
74#include "blk-mq.h"
75#include "blk-mq-sched.h"
76
77/* PREFLUSH/FUA sequences */
78enum {
79 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
80 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
81 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
82 REQ_FSEQ_DONE = (1 << 3),
83
84 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85 REQ_FSEQ_POSTFLUSH,
86
87 /*
88 * If flush has been pending longer than the following timeout,
89 * it's issued even if flush_data requests are still in flight.
90 */
91 FLUSH_PENDING_TIMEOUT = 5 * HZ,
92};
93
94static void blk_kick_flush(struct request_queue *q,
95 struct blk_flush_queue *fq, blk_opf_t flags);
96
97static inline struct blk_flush_queue *
98blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
99{
100 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
101}
102
103static unsigned int blk_flush_cur_seq(struct request *rq)
104{
105 return 1 << ffz(rq->flush.seq);
106}
107
108static void blk_flush_restore_request(struct request *rq)
109{
110 /*
111 * After flush data completion, @rq->bio is %NULL but we need to
112 * complete the bio again. @rq->biotail is guaranteed to equal the
113 * original @rq->bio. Restore it.
114 */
115 rq->bio = rq->biotail;
116 if (rq->bio)
117 rq->__sector = rq->bio->bi_iter.bi_sector;
118
119 /* make @rq a normal request */
120 rq->rq_flags &= ~RQF_FLUSH_SEQ;
121 rq->end_io = rq->flush.saved_end_io;
122}
123
124static void blk_account_io_flush(struct request *rq)
125{
126 struct block_device *part = rq->q->disk->part0;
127
128 part_stat_lock();
129 part_stat_inc(part, ios[STAT_FLUSH]);
130 part_stat_add(part, nsecs[STAT_FLUSH],
131 blk_time_get_ns() - rq->start_time_ns);
132 part_stat_unlock();
133}
134
135/**
136 * blk_flush_complete_seq - complete flush sequence
137 * @rq: PREFLUSH/FUA request being sequenced
138 * @fq: flush queue
139 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
140 * @error: whether an error occurred
141 *
142 * @rq just completed @seq part of its flush sequence, record the
143 * completion and trigger the next step.
144 *
145 * CONTEXT:
146 * spin_lock_irq(fq->mq_flush_lock)
147 */
148static void blk_flush_complete_seq(struct request *rq,
149 struct blk_flush_queue *fq,
150 unsigned int seq, blk_status_t error)
151{
152 struct request_queue *q = rq->q;
153 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
154 blk_opf_t cmd_flags;
155
156 BUG_ON(rq->flush.seq & seq);
157 rq->flush.seq |= seq;
158 cmd_flags = rq->cmd_flags;
159
160 if (likely(!error))
161 seq = blk_flush_cur_seq(rq);
162 else
163 seq = REQ_FSEQ_DONE;
164
165 switch (seq) {
166 case REQ_FSEQ_PREFLUSH:
167 case REQ_FSEQ_POSTFLUSH:
168 /* queue for flush */
169 if (list_empty(pending))
170 fq->flush_pending_since = jiffies;
171 list_add_tail(&rq->queuelist, pending);
172 break;
173
174 case REQ_FSEQ_DATA:
175 fq->flush_data_in_flight++;
176 spin_lock(&q->requeue_lock);
177 list_move(&rq->queuelist, &q->requeue_list);
178 spin_unlock(&q->requeue_lock);
179 blk_mq_kick_requeue_list(q);
180 break;
181
182 case REQ_FSEQ_DONE:
183 /*
184 * @rq was previously adjusted by blk_insert_flush() for
185 * flush sequencing and may already have gone through the
186 * flush data request completion path. Restore @rq for
187 * normal completion and end it.
188 */
189 list_del_init(&rq->queuelist);
190 blk_flush_restore_request(rq);
191 blk_mq_end_request(rq, error);
192 break;
193
194 default:
195 BUG();
196 }
197
198 blk_kick_flush(q, fq, cmd_flags);
199}
200
201static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
202 blk_status_t error)
203{
204 struct request_queue *q = flush_rq->q;
205 struct list_head *running;
206 struct request *rq, *n;
207 unsigned long flags = 0;
208 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
209
210 /* release the tag's ownership to the req cloned from */
211 spin_lock_irqsave(&fq->mq_flush_lock, flags);
212
213 if (!req_ref_put_and_test(flush_rq)) {
214 fq->rq_status = error;
215 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
216 return RQ_END_IO_NONE;
217 }
218
219 blk_account_io_flush(flush_rq);
220 /*
221 * Flush request has to be marked as IDLE when it is really ended
222 * because its .end_io() is called from timeout code path too for
223 * avoiding use-after-free.
224 */
225 WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
226 if (fq->rq_status != BLK_STS_OK) {
227 error = fq->rq_status;
228 fq->rq_status = BLK_STS_OK;
229 }
230
231 if (!q->elevator) {
232 flush_rq->tag = BLK_MQ_NO_TAG;
233 } else {
234 blk_mq_put_driver_tag(flush_rq);
235 flush_rq->internal_tag = BLK_MQ_NO_TAG;
236 }
237
238 running = &fq->flush_queue[fq->flush_running_idx];
239 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
240
241 /* account completion of the flush request */
242 fq->flush_running_idx ^= 1;
243
244 /* and push the waiting requests to the next stage */
245 list_for_each_entry_safe(rq, n, running, queuelist) {
246 unsigned int seq = blk_flush_cur_seq(rq);
247
248 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
249 list_del_init(&rq->queuelist);
250 blk_flush_complete_seq(rq, fq, seq, error);
251 }
252
253 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
254 return RQ_END_IO_NONE;
255}
256
257bool is_flush_rq(struct request *rq)
258{
259 return rq->end_io == flush_end_io;
260}
261
262/**
263 * blk_kick_flush - consider issuing flush request
264 * @q: request_queue being kicked
265 * @fq: flush queue
266 * @flags: cmd_flags of the original request
267 *
268 * Flush related states of @q have changed, consider issuing flush request.
269 * Please read the comment at the top of this file for more info.
270 *
271 * CONTEXT:
272 * spin_lock_irq(fq->mq_flush_lock)
273 *
274 */
275static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
276 blk_opf_t flags)
277{
278 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
279 struct request *first_rq =
280 list_first_entry(pending, struct request, queuelist);
281 struct request *flush_rq = fq->flush_rq;
282
283 /* C1 described at the top of this file */
284 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
285 return;
286
287 /* C2 and C3 */
288 if (fq->flush_data_in_flight &&
289 time_before(jiffies,
290 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
291 return;
292
293 /*
294 * Issue flush and toggle pending_idx. This makes pending_idx
295 * different from running_idx, which means flush is in flight.
296 */
297 fq->flush_pending_idx ^= 1;
298
299 blk_rq_init(q, flush_rq);
300
301 /*
302 * In case of none scheduler, borrow tag from the first request
303 * since they can't be in flight at the same time. And acquire
304 * the tag's ownership for flush req.
305 *
306 * In case of IO scheduler, flush rq need to borrow scheduler tag
307 * just for cheating put/get driver tag.
308 */
309 flush_rq->mq_ctx = first_rq->mq_ctx;
310 flush_rq->mq_hctx = first_rq->mq_hctx;
311
312 if (!q->elevator)
313 flush_rq->tag = first_rq->tag;
314 else
315 flush_rq->internal_tag = first_rq->internal_tag;
316
317 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
318 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
319 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
320 flush_rq->end_io = flush_end_io;
321 /*
322 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
323 * implied in refcount_inc_not_zero() called from
324 * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
325 * and READ flush_rq->end_io
326 */
327 smp_wmb();
328 req_ref_set(flush_rq, 1);
329
330 spin_lock(&q->requeue_lock);
331 list_add_tail(&flush_rq->queuelist, &q->flush_list);
332 spin_unlock(&q->requeue_lock);
333
334 blk_mq_kick_requeue_list(q);
335}
336
337static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
338 blk_status_t error)
339{
340 struct request_queue *q = rq->q;
341 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
342 struct blk_mq_ctx *ctx = rq->mq_ctx;
343 unsigned long flags;
344 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
345
346 if (q->elevator) {
347 WARN_ON(rq->tag < 0);
348 blk_mq_put_driver_tag(rq);
349 }
350
351 /*
352 * After populating an empty queue, kick it to avoid stall. Read
353 * the comment in flush_end_io().
354 */
355 spin_lock_irqsave(&fq->mq_flush_lock, flags);
356 fq->flush_data_in_flight--;
357 /*
358 * May have been corrupted by rq->rq_next reuse, we need to
359 * re-initialize rq->queuelist before reusing it here.
360 */
361 INIT_LIST_HEAD(&rq->queuelist);
362 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
363 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
364
365 blk_mq_sched_restart(hctx);
366 return RQ_END_IO_NONE;
367}
368
369static void blk_rq_init_flush(struct request *rq)
370{
371 rq->flush.seq = 0;
372 rq->rq_flags |= RQF_FLUSH_SEQ;
373 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
374 rq->end_io = mq_flush_data_end_io;
375}
376
377/*
378 * Insert a PREFLUSH/FUA request into the flush state machine.
379 * Returns true if the request has been consumed by the flush state machine,
380 * or false if the caller should continue to process it.
381 */
382bool blk_insert_flush(struct request *rq)
383{
384 struct request_queue *q = rq->q;
385 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
386 bool supports_fua = q->limits.features & BLK_FEAT_FUA;
387 unsigned int policy = 0;
388
389 /* FLUSH/FUA request must never be merged */
390 WARN_ON_ONCE(rq->bio != rq->biotail);
391
392 if (blk_rq_sectors(rq))
393 policy |= REQ_FSEQ_DATA;
394
395 /*
396 * Check which flushes we need to sequence for this operation.
397 */
398 if (blk_queue_write_cache(q)) {
399 if (rq->cmd_flags & REQ_PREFLUSH)
400 policy |= REQ_FSEQ_PREFLUSH;
401 if ((rq->cmd_flags & REQ_FUA) && !supports_fua)
402 policy |= REQ_FSEQ_POSTFLUSH;
403 }
404
405 /*
406 * @policy now records what operations need to be done. Adjust
407 * REQ_PREFLUSH and FUA for the driver.
408 */
409 rq->cmd_flags &= ~REQ_PREFLUSH;
410 if (!supports_fua)
411 rq->cmd_flags &= ~REQ_FUA;
412
413 /*
414 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
415 * of those flags, we have to set REQ_SYNC to avoid skewing
416 * the request accounting.
417 */
418 rq->cmd_flags |= REQ_SYNC;
419
420 switch (policy) {
421 case 0:
422 /*
423 * An empty flush handed down from a stacking driver may
424 * translate into nothing if the underlying device does not
425 * advertise a write-back cache. In this case, simply
426 * complete the request.
427 */
428 blk_mq_end_request(rq, 0);
429 return true;
430 case REQ_FSEQ_DATA:
431 /*
432 * If there's data, but no flush is necessary, the request can
433 * be processed directly without going through flush machinery.
434 * Queue for normal execution.
435 */
436 return false;
437 case REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH:
438 /*
439 * Initialize the flush fields and completion handler to trigger
440 * the post flush, and then just pass the command on.
441 */
442 blk_rq_init_flush(rq);
443 rq->flush.seq |= REQ_FSEQ_PREFLUSH;
444 spin_lock_irq(&fq->mq_flush_lock);
445 fq->flush_data_in_flight++;
446 spin_unlock_irq(&fq->mq_flush_lock);
447 return false;
448 default:
449 /*
450 * Mark the request as part of a flush sequence and submit it
451 * for further processing to the flush state machine.
452 */
453 blk_rq_init_flush(rq);
454 spin_lock_irq(&fq->mq_flush_lock);
455 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
456 spin_unlock_irq(&fq->mq_flush_lock);
457 return true;
458 }
459}
460
461/**
462 * blkdev_issue_flush - queue a flush
463 * @bdev: blockdev to issue flush for
464 *
465 * Description:
466 * Issue a flush for the block device in question.
467 */
468int blkdev_issue_flush(struct block_device *bdev)
469{
470 struct bio bio;
471
472 bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH);
473 return submit_bio_wait(&bio);
474}
475EXPORT_SYMBOL(blkdev_issue_flush);
476
477struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
478 gfp_t flags)
479{
480 struct blk_flush_queue *fq;
481 int rq_sz = sizeof(struct request);
482
483 fq = kzalloc_node(sizeof(*fq), flags, node);
484 if (!fq)
485 goto fail;
486
487 spin_lock_init(&fq->mq_flush_lock);
488
489 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
490 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
491 if (!fq->flush_rq)
492 goto fail_rq;
493
494 INIT_LIST_HEAD(&fq->flush_queue[0]);
495 INIT_LIST_HEAD(&fq->flush_queue[1]);
496
497 return fq;
498
499 fail_rq:
500 kfree(fq);
501 fail:
502 return NULL;
503}
504
505void blk_free_flush_queue(struct blk_flush_queue *fq)
506{
507 /* bio based request queue hasn't flush queue */
508 if (!fq)
509 return;
510
511 kfree(fq->flush_rq);
512 kfree(fq);
513}
514
515/*
516 * Allow driver to set its own lock class to fq->mq_flush_lock for
517 * avoiding lockdep complaint.
518 *
519 * flush_end_io() may be called recursively from some driver, such as
520 * nvme-loop, so lockdep may complain 'possible recursive locking' because
521 * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
522 * key. We need to assign different lock class for these driver's
523 * fq->mq_flush_lock for avoiding the lockdep warning.
524 *
525 * Use dynamically allocated lock class key for each 'blk_flush_queue'
526 * instance is over-kill, and more worse it introduces horrible boot delay
527 * issue because synchronize_rcu() is implied in lockdep_unregister_key which
528 * is called for each hctx release. SCSI probing may synchronously create and
529 * destroy lots of MQ request_queues for non-existent devices, and some robot
530 * test kernel always enable lockdep option. It is observed that more than half
531 * an hour is taken during SCSI MQ probe with per-fq lock class.
532 */
533void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
534 struct lock_class_key *key)
535{
536 lockdep_set_class(&hctx->fq->mq_flush_lock, key);
537}
538EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);