Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Functions to sequence FLUSH and FUA writes.
  3 *
  4 * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
  5 * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
  6 *
  7 * This file is released under the GPLv2.
  8 *
  9 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
 10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
 11 * properties and hardware capability.
 12 *
 13 * If a request doesn't have data, only REQ_FLUSH makes sense, which
 14 * indicates a simple flush request.  If there is data, REQ_FLUSH indicates
 15 * that the device cache should be flushed before the data is executed, and
 16 * REQ_FUA means that the data must be on non-volatile media on request
 17 * completion.
 18 *
 19 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
 20 * difference.  The requests are either completed immediately if there's no
 21 * data or executed as normal requests otherwise.
 22 *
 23 * If the device has writeback cache and supports FUA, REQ_FLUSH is
 24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
 25 *
 26 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
 27 * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
 28 *
 29 * The actual execution of flush is double buffered.  Whenever a request
 30 * needs to execute PRE or POSTFLUSH, it queues at
 31 * q->flush_queue[q->flush_pending_idx].  Once certain criteria are met, a
 32 * flush is issued and the pending_idx is toggled.  When the flush
 33 * completes, all the requests which were pending are proceeded to the next
 34 * step.  This allows arbitrary merging of different types of FLUSH/FUA
 35 * requests.
 36 *
 37 * Currently, the following conditions are used to determine when to issue
 38 * flush.
 39 *
 40 * C1. At any given time, only one flush shall be in progress.  This makes
 41 *     double buffering sufficient.
 42 *
 43 * C2. Flush is deferred if any request is executing DATA of its sequence.
 44 *     This avoids issuing separate POSTFLUSHes for requests which shared
 45 *     PREFLUSH.
 46 *
 47 * C3. The second condition is ignored if there is a request which has
 48 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
 49 *     starvation in the unlikely case where there are continuous stream of
 50 *     FUA (without FLUSH) requests.
 51 *
 52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
 53 * is beneficial.
 54 *
 55 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
 56 * Once while executing DATA and again after the whole sequence is
 57 * complete.  The first completion updates the contained bio but doesn't
 58 * finish it so that the bio submitter is notified only after the whole
 59 * sequence is complete.  This is implemented by testing REQ_FLUSH_SEQ in
 60 * req_bio_endio().
 61 *
 62 * The above peculiarity requires that each FLUSH/FUA request has only one
 63 * bio attached to it, which is guaranteed as they aren't allowed to be
 64 * merged in the usual way.
 65 */
 66
 67#include <linux/kernel.h>
 68#include <linux/module.h>
 69#include <linux/bio.h>
 70#include <linux/blkdev.h>
 71#include <linux/gfp.h>
 
 72
 73#include "blk.h"
 
 
 74
 75/* FLUSH/FUA sequences */
 76enum {
 77	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
 78	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
 79	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
 80	REQ_FSEQ_DONE		= (1 << 3),
 81
 82	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
 83				  REQ_FSEQ_POSTFLUSH,
 84
 85	/*
 86	 * If flush has been pending longer than the following timeout,
 87	 * it's issued even if flush_data requests are still in flight.
 88	 */
 89	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
 90};
 91
 92static bool blk_kick_flush(struct request_queue *q);
 
 93
 94static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
 
 95{
 96	unsigned int policy = 0;
 97
 98	if (blk_rq_sectors(rq))
 99		policy |= REQ_FSEQ_DATA;
100
101	if (fflags & REQ_FLUSH) {
102		if (rq->cmd_flags & REQ_FLUSH)
103			policy |= REQ_FSEQ_PREFLUSH;
104		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
105			policy |= REQ_FSEQ_POSTFLUSH;
106	}
107	return policy;
108}
109
110static unsigned int blk_flush_cur_seq(struct request *rq)
111{
112	return 1 << ffz(rq->flush.seq);
113}
114
115static void blk_flush_restore_request(struct request *rq)
116{
117	/*
118	 * After flush data completion, @rq->bio is %NULL but we need to
119	 * complete the bio again.  @rq->biotail is guaranteed to equal the
120	 * original @rq->bio.  Restore it.
121	 */
122	rq->bio = rq->biotail;
 
 
123
124	/* make @rq a normal request */
125	rq->cmd_flags &= ~REQ_FLUSH_SEQ;
126	rq->end_io = rq->flush.saved_end_io;
127}
128
 
 
 
 
 
 
 
 
 
 
 
129/**
130 * blk_flush_complete_seq - complete flush sequence
131 * @rq: FLUSH/FUA request being sequenced
 
132 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
133 * @error: whether an error occurred
134 *
135 * @rq just completed @seq part of its flush sequence, record the
136 * completion and trigger the next step.
137 *
138 * CONTEXT:
139 * spin_lock_irq(q->queue_lock)
140 *
141 * RETURNS:
142 * %true if requests were added to the dispatch queue, %false otherwise.
143 */
144static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
145				   int error)
 
146{
147	struct request_queue *q = rq->q;
148	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
149	bool queued = false;
150
151	BUG_ON(rq->flush.seq & seq);
152	rq->flush.seq |= seq;
 
153
154	if (likely(!error))
155		seq = blk_flush_cur_seq(rq);
156	else
157		seq = REQ_FSEQ_DONE;
158
159	switch (seq) {
160	case REQ_FSEQ_PREFLUSH:
161	case REQ_FSEQ_POSTFLUSH:
162		/* queue for flush */
163		if (list_empty(pending))
164			q->flush_pending_since = jiffies;
165		list_move_tail(&rq->flush.list, pending);
166		break;
167
168	case REQ_FSEQ_DATA:
169		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
170		list_add(&rq->queuelist, &q->queue_head);
171		queued = true;
 
 
172		break;
173
174	case REQ_FSEQ_DONE:
175		/*
176		 * @rq was previously adjusted by blk_flush_issue() for
177		 * flush sequencing and may already have gone through the
178		 * flush data request completion path.  Restore @rq for
179		 * normal completion and end it.
180		 */
181		BUG_ON(!list_empty(&rq->queuelist));
182		list_del_init(&rq->flush.list);
183		blk_flush_restore_request(rq);
184		__blk_end_request_all(rq, error);
185		break;
186
187	default:
188		BUG();
189	}
190
191	return blk_kick_flush(q) | queued;
192}
193
194static void flush_end_io(struct request *flush_rq, int error)
 
195{
196	struct request_queue *q = flush_rq->q;
197	struct list_head *running = &q->flush_queue[q->flush_running_idx];
198	bool queued = false;
199	struct request *rq, *n;
 
 
 
 
 
200
201	BUG_ON(q->flush_pending_idx == q->flush_running_idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
203	/* account completion of the flush request */
204	q->flush_running_idx ^= 1;
205	elv_completed_request(q, flush_rq);
206
207	/* and push the waiting requests to the next stage */
208	list_for_each_entry_safe(rq, n, running, flush.list) {
209		unsigned int seq = blk_flush_cur_seq(rq);
210
211		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
212		queued |= blk_flush_complete_seq(rq, seq, error);
 
213	}
214
215	/*
216	 * Kick the queue to avoid stall for two cases:
217	 * 1. Moving a request silently to empty queue_head may stall the
218	 * queue.
219	 * 2. When flush request is running in non-queueable queue, the
220	 * queue is hold. Restart the queue after flush request is finished
221	 * to avoid stall.
222	 * This function is called from request completion path and calling
223	 * directly into request_fn may confuse the driver.  Always use
224	 * kblockd.
225	 */
226	if (queued || q->flush_queue_delayed)
227		blk_run_queue_async(q);
228	q->flush_queue_delayed = 0;
229}
230
231/**
232 * blk_kick_flush - consider issuing flush request
233 * @q: request_queue being kicked
 
 
234 *
235 * Flush related states of @q have changed, consider issuing flush request.
236 * Please read the comment at the top of this file for more info.
237 *
238 * CONTEXT:
239 * spin_lock_irq(q->queue_lock)
240 *
241 * RETURNS:
242 * %true if flush was issued, %false otherwise.
243 */
244static bool blk_kick_flush(struct request_queue *q)
 
245{
246	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
247	struct request *first_rq =
248		list_first_entry(pending, struct request, flush.list);
 
249
250	/* C1 described at the top of this file */
251	if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
252		return false;
253
254	/* C2 and C3 */
255	if (!list_empty(&q->flush_data_in_flight) &&
256	    time_before(jiffies,
257			q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
258		return false;
259
260	/*
261	 * Issue flush and toggle pending_idx.  This makes pending_idx
262	 * different from running_idx, which means flush is in flight.
263	 */
264	blk_rq_init(q, &q->flush_rq);
265	q->flush_rq.cmd_type = REQ_TYPE_FS;
266	q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
267	q->flush_rq.rq_disk = first_rq->rq_disk;
268	q->flush_rq.end_io = flush_end_io;
269
270	q->flush_pending_idx ^= 1;
271	list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
272	return true;
273}
274
275static void flush_data_end_io(struct request *rq, int error)
276{
277	struct request_queue *q = rq->q;
278
279	/*
280	 * After populating an empty queue, kick it to avoid stall.  Read
281	 * the comment in flush_end_io().
 
 
 
 
282	 */
283	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
284		blk_run_queue_async(q);
285}
286
287/**
288 * blk_insert_flush - insert a new FLUSH/FUA request
289 * @rq: request to insert
290 *
291 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
292 * @rq is being submitted.  Analyze what needs to be done and put it on the
293 * right queue.
294 *
295 * CONTEXT:
296 * spin_lock_irq(q->queue_lock)
297 */
298void blk_insert_flush(struct request *rq)
299{
300	struct request_queue *q = rq->q;
301	unsigned int fflags = q->flush_flags;	/* may change, cache */
302	unsigned int policy = blk_flush_policy(fflags, rq);
303
 
 
 
 
304	/*
305	 * @policy now records what operations need to be done.  Adjust
306	 * REQ_FLUSH and FUA for the driver.
 
 
307	 */
308	rq->cmd_flags &= ~REQ_FLUSH;
309	if (!(fflags & REQ_FUA))
310		rq->cmd_flags &= ~REQ_FUA;
311
312	/*
313	 * An empty flush handed down from a stacking driver may
314	 * translate into nothing if the underlying device does not
315	 * advertise a write-back cache.  In this case, simply
316	 * complete the request.
317	 */
318	if (!policy) {
319		__blk_end_bidi_request(rq, 0, 0, 0);
320		return;
321	}
322
323	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
 
324
325	/*
326	 * If there's data but flush is not necessary, the request can be
327	 * processed directly without going through flush machinery.  Queue
328	 * for normal execution.
329	 */
330	if ((policy & REQ_FSEQ_DATA) &&
331	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
332		list_add_tail(&rq->queuelist, &q->queue_head);
333		return;
 
 
 
334	}
335
336	/*
337	 * @rq should go through flush machinery.  Mark it part of flush
338	 * sequence and submit for further processing.
339	 */
340	memset(&rq->flush, 0, sizeof(rq->flush));
341	INIT_LIST_HEAD(&rq->flush.list);
342	rq->cmd_flags |= REQ_FLUSH_SEQ;
343	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
344	rq->end_io = flush_data_end_io;
 
 
 
 
345
346	blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
 
347}
348
349/**
350 * blk_abort_flushes - @q is being aborted, abort flush requests
351 * @q: request_queue being aborted
352 *
353 * To be called from elv_abort_queue().  @q is being aborted.  Prepare all
354 * FLUSH/FUA requests for abortion.
355 *
356 * CONTEXT:
357 * spin_lock_irq(q->queue_lock)
 
 
 
358 */
359void blk_abort_flushes(struct request_queue *q)
360{
361	struct request *rq, *n;
362	int i;
 
 
 
 
 
 
 
 
363
364	/*
365	 * Requests in flight for data are already owned by the dispatch
366	 * queue or the device driver.  Just restore for normal completion.
367	 */
368	list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
369		list_del_init(&rq->flush.list);
370		blk_flush_restore_request(rq);
 
 
371	}
372
373	/*
374	 * We need to give away requests on flush queues.  Restore for
375	 * normal completion and put them on the dispatch queue.
376	 */
377	for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
378		list_for_each_entry_safe(rq, n, &q->flush_queue[i],
379					 flush.list) {
380			list_del_init(&rq->flush.list);
381			blk_flush_restore_request(rq);
382			list_add_tail(&rq->queuelist, &q->queue_head);
383		}
384	}
385}
386
387static void bio_end_flush(struct bio *bio, int err)
388{
389	if (err)
390		clear_bit(BIO_UPTODATE, &bio->bi_flags);
391	if (bio->bi_private)
392		complete(bio->bi_private);
393	bio_put(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394}
395
396/**
397 * blkdev_issue_flush - queue a flush
398 * @bdev:	blockdev to issue flush for
399 * @gfp_mask:	memory allocation flags (for bio_alloc)
400 * @error_sector:	error sector
401 *
402 * Description:
403 *    Issue a flush for the block device in question. Caller can supply
404 *    room for storing the error offset in case of a flush error, if they
405 *    wish to. If WAIT flag is not passed then caller may check only what
406 *    request was pushed in some internal queue for later handling.
407 */
408int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
409		sector_t *error_sector)
410{
411	DECLARE_COMPLETION_ONSTACK(wait);
412	struct request_queue *q;
413	struct bio *bio;
414	int ret = 0;
415
416	if (bdev->bd_disk == NULL)
417		return -ENXIO;
 
 
418
419	q = bdev_get_queue(bdev);
420	if (!q)
421		return -ENXIO;
 
 
422
423	/*
424	 * some block devices may not have their queue correctly set up here
425	 * (e.g. loop device without a backing file) and so issuing a flush
426	 * here will panic. Ensure there is a request function before issuing
427	 * the flush.
428	 */
429	if (!q->make_request_fn)
430		return -ENXIO;
431
432	bio = bio_alloc(gfp_mask, 0);
433	bio->bi_end_io = bio_end_flush;
434	bio->bi_bdev = bdev;
435	bio->bi_private = &wait;
436
437	bio_get(bio);
438	submit_bio(WRITE_FLUSH, bio);
439	wait_for_completion(&wait);
 
440
441	/*
442	 * The driver must store the error location in ->bi_sector, if
443	 * it supports it. For non-stacked drivers, this should be
444	 * copied from blk_rq_pos(rq).
445	 */
446	if (error_sector)
447               *error_sector = bio->bi_sector;
448
449	if (!bio_flagged(bio, BIO_UPTODATE))
450		ret = -EIO;
451
452	bio_put(bio);
453	return ret;
 
 
454}
455EXPORT_SYMBOL(blkdev_issue_flush);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions to sequence PREFLUSH and FUA writes.
  4 *
  5 * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
  6 * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
  7 *
  8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
 
 
  9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
 10 * properties and hardware capability.
 11 *
 12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
 13 * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
 14 * that the device cache should be flushed before the data is executed, and
 15 * REQ_FUA means that the data must be on non-volatile media on request
 16 * completion.
 17 *
 18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
 19 * difference.  The requests are either completed immediately if there's no data
 20 * or executed as normal requests otherwise.
 21 *
 22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
 23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
 24 *
 25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
 26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
 27 *
 28 * The actual execution of flush is double buffered.  Whenever a request
 29 * needs to execute PRE or POSTFLUSH, it queues at
 30 * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
 31 * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
 32 * completes, all the requests which were pending are proceeded to the next
 33 * step.  This allows arbitrary merging of different types of PREFLUSH/FUA
 34 * requests.
 35 *
 36 * Currently, the following conditions are used to determine when to issue
 37 * flush.
 38 *
 39 * C1. At any given time, only one flush shall be in progress.  This makes
 40 *     double buffering sufficient.
 41 *
 42 * C2. Flush is deferred if any request is executing DATA of its sequence.
 43 *     This avoids issuing separate POSTFLUSHes for requests which shared
 44 *     PREFLUSH.
 45 *
 46 * C3. The second condition is ignored if there is a request which has
 47 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
 48 *     starvation in the unlikely case where there are continuous stream of
 49 *     FUA (without PREFLUSH) requests.
 50 *
 51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
 52 * is beneficial.
 53 *
 54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
 55 * Once while executing DATA and again after the whole sequence is
 56 * complete.  The first completion updates the contained bio but doesn't
 57 * finish it so that the bio submitter is notified only after the whole
 58 * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
 59 * req_bio_endio().
 60 *
 61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
 62 * bio attached to it, which is guaranteed as they aren't allowed to be
 63 * merged in the usual way.
 64 */
 65
 66#include <linux/kernel.h>
 67#include <linux/module.h>
 68#include <linux/bio.h>
 69#include <linux/blkdev.h>
 70#include <linux/gfp.h>
 71#include <linux/part_stat.h>
 72
 73#include "blk.h"
 74#include "blk-mq.h"
 75#include "blk-mq-sched.h"
 76
 77/* PREFLUSH/FUA sequences */
 78enum {
 79	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
 80	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
 81	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
 82	REQ_FSEQ_DONE		= (1 << 3),
 83
 84	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
 85				  REQ_FSEQ_POSTFLUSH,
 86
 87	/*
 88	 * If flush has been pending longer than the following timeout,
 89	 * it's issued even if flush_data requests are still in flight.
 90	 */
 91	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
 92};
 93
 94static void blk_kick_flush(struct request_queue *q,
 95			   struct blk_flush_queue *fq, blk_opf_t flags);
 96
 97static inline struct blk_flush_queue *
 98blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
 99{
100	return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
 
 
 
 
 
 
 
 
 
 
 
101}
102
103static unsigned int blk_flush_cur_seq(struct request *rq)
104{
105	return 1 << ffz(rq->flush.seq);
106}
107
108static void blk_flush_restore_request(struct request *rq)
109{
110	/*
111	 * After flush data completion, @rq->bio is %NULL but we need to
112	 * complete the bio again.  @rq->biotail is guaranteed to equal the
113	 * original @rq->bio.  Restore it.
114	 */
115	rq->bio = rq->biotail;
116	if (rq->bio)
117		rq->__sector = rq->bio->bi_iter.bi_sector;
118
119	/* make @rq a normal request */
120	rq->rq_flags &= ~RQF_FLUSH_SEQ;
121	rq->end_io = rq->flush.saved_end_io;
122}
123
124static void blk_account_io_flush(struct request *rq)
125{
126	struct block_device *part = rq->q->disk->part0;
127
128	part_stat_lock();
129	part_stat_inc(part, ios[STAT_FLUSH]);
130	part_stat_add(part, nsecs[STAT_FLUSH],
131		      blk_time_get_ns() - rq->start_time_ns);
132	part_stat_unlock();
133}
134
135/**
136 * blk_flush_complete_seq - complete flush sequence
137 * @rq: PREFLUSH/FUA request being sequenced
138 * @fq: flush queue
139 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
140 * @error: whether an error occurred
141 *
142 * @rq just completed @seq part of its flush sequence, record the
143 * completion and trigger the next step.
144 *
145 * CONTEXT:
146 * spin_lock_irq(fq->mq_flush_lock)
 
 
 
147 */
148static void blk_flush_complete_seq(struct request *rq,
149				   struct blk_flush_queue *fq,
150				   unsigned int seq, blk_status_t error)
151{
152	struct request_queue *q = rq->q;
153	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
154	blk_opf_t cmd_flags;
155
156	BUG_ON(rq->flush.seq & seq);
157	rq->flush.seq |= seq;
158	cmd_flags = rq->cmd_flags;
159
160	if (likely(!error))
161		seq = blk_flush_cur_seq(rq);
162	else
163		seq = REQ_FSEQ_DONE;
164
165	switch (seq) {
166	case REQ_FSEQ_PREFLUSH:
167	case REQ_FSEQ_POSTFLUSH:
168		/* queue for flush */
169		if (list_empty(pending))
170			fq->flush_pending_since = jiffies;
171		list_add_tail(&rq->queuelist, pending);
172		break;
173
174	case REQ_FSEQ_DATA:
175		fq->flush_data_in_flight++;
176		spin_lock(&q->requeue_lock);
177		list_move(&rq->queuelist, &q->requeue_list);
178		spin_unlock(&q->requeue_lock);
179		blk_mq_kick_requeue_list(q);
180		break;
181
182	case REQ_FSEQ_DONE:
183		/*
184		 * @rq was previously adjusted by blk_insert_flush() for
185		 * flush sequencing and may already have gone through the
186		 * flush data request completion path.  Restore @rq for
187		 * normal completion and end it.
188		 */
189		list_del_init(&rq->queuelist);
 
190		blk_flush_restore_request(rq);
191		blk_mq_end_request(rq, error);
192		break;
193
194	default:
195		BUG();
196	}
197
198	blk_kick_flush(q, fq, cmd_flags);
199}
200
201static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
202				       blk_status_t error)
203{
204	struct request_queue *q = flush_rq->q;
205	struct list_head *running;
 
206	struct request *rq, *n;
207	unsigned long flags = 0;
208	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
209
210	/* release the tag's ownership to the req cloned from */
211	spin_lock_irqsave(&fq->mq_flush_lock, flags);
212
213	if (!req_ref_put_and_test(flush_rq)) {
214		fq->rq_status = error;
215		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
216		return RQ_END_IO_NONE;
217	}
218
219	blk_account_io_flush(flush_rq);
220	/*
221	 * Flush request has to be marked as IDLE when it is really ended
222	 * because its .end_io() is called from timeout code path too for
223	 * avoiding use-after-free.
224	 */
225	WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
226	if (fq->rq_status != BLK_STS_OK) {
227		error = fq->rq_status;
228		fq->rq_status = BLK_STS_OK;
229	}
230
231	if (!q->elevator) {
232		flush_rq->tag = BLK_MQ_NO_TAG;
233	} else {
234		blk_mq_put_driver_tag(flush_rq);
235		flush_rq->internal_tag = BLK_MQ_NO_TAG;
236	}
237
238	running = &fq->flush_queue[fq->flush_running_idx];
239	BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
240
241	/* account completion of the flush request */
242	fq->flush_running_idx ^= 1;
 
243
244	/* and push the waiting requests to the next stage */
245	list_for_each_entry_safe(rq, n, running, queuelist) {
246		unsigned int seq = blk_flush_cur_seq(rq);
247
248		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
249		list_del_init(&rq->queuelist);
250		blk_flush_complete_seq(rq, fq, seq, error);
251	}
252
253	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
254	return RQ_END_IO_NONE;
255}
256
257bool is_flush_rq(struct request *rq)
258{
259	return rq->end_io == flush_end_io;
 
 
 
 
 
 
 
260}
261
262/**
263 * blk_kick_flush - consider issuing flush request
264 * @q: request_queue being kicked
265 * @fq: flush queue
266 * @flags: cmd_flags of the original request
267 *
268 * Flush related states of @q have changed, consider issuing flush request.
269 * Please read the comment at the top of this file for more info.
270 *
271 * CONTEXT:
272 * spin_lock_irq(fq->mq_flush_lock)
273 *
 
 
274 */
275static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
276			   blk_opf_t flags)
277{
278	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
279	struct request *first_rq =
280		list_first_entry(pending, struct request, queuelist);
281	struct request *flush_rq = fq->flush_rq;
282
283	/* C1 described at the top of this file */
284	if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
285		return;
286
287	/* C2 and C3 */
288	if (fq->flush_data_in_flight &&
289	    time_before(jiffies,
290			fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
291		return;
292
293	/*
294	 * Issue flush and toggle pending_idx.  This makes pending_idx
295	 * different from running_idx, which means flush is in flight.
296	 */
297	fq->flush_pending_idx ^= 1;
 
 
 
 
 
 
 
 
 
298
299	blk_rq_init(q, flush_rq);
 
 
300
301	/*
302	 * In case of none scheduler, borrow tag from the first request
303	 * since they can't be in flight at the same time. And acquire
304	 * the tag's ownership for flush req.
305	 *
306	 * In case of IO scheduler, flush rq need to borrow scheduler tag
307	 * just for cheating put/get driver tag.
308	 */
309	flush_rq->mq_ctx = first_rq->mq_ctx;
310	flush_rq->mq_hctx = first_rq->mq_hctx;
 
311
312	if (!q->elevator)
313		flush_rq->tag = first_rq->tag;
314	else
315		flush_rq->internal_tag = first_rq->internal_tag;
 
 
 
 
 
 
 
 
 
 
 
 
316
317	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
318	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
319	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
320	flush_rq->end_io = flush_end_io;
321	/*
322	 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
323	 * implied in refcount_inc_not_zero() called from
324	 * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
325	 * and READ flush_rq->end_io
326	 */
327	smp_wmb();
328	req_ref_set(flush_rq, 1);
 
329
330	spin_lock(&q->requeue_lock);
331	list_add_tail(&flush_rq->queuelist, &q->flush_list);
332	spin_unlock(&q->requeue_lock);
 
 
 
 
 
 
 
333
334	blk_mq_kick_requeue_list(q);
335}
336
337static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
338					       blk_status_t error)
339{
340	struct request_queue *q = rq->q;
341	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
342	struct blk_mq_ctx *ctx = rq->mq_ctx;
343	unsigned long flags;
344	struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
345
346	if (q->elevator) {
347		WARN_ON(rq->tag < 0);
348		blk_mq_put_driver_tag(rq);
349	}
350
351	/*
352	 * After populating an empty queue, kick it to avoid stall.  Read
353	 * the comment in flush_end_io().
354	 */
355	spin_lock_irqsave(&fq->mq_flush_lock, flags);
356	fq->flush_data_in_flight--;
357	/*
358	 * May have been corrupted by rq->rq_next reuse, we need to
359	 * re-initialize rq->queuelist before reusing it here.
360	 */
361	INIT_LIST_HEAD(&rq->queuelist);
362	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
363	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
364
365	blk_mq_sched_restart(hctx);
366	return RQ_END_IO_NONE;
367}
368
369static void blk_rq_init_flush(struct request *rq)
370{
371	rq->flush.seq = 0;
372	rq->rq_flags |= RQF_FLUSH_SEQ;
373	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
374	rq->end_io = mq_flush_data_end_io;
375}
376
377/*
378 * Insert a PREFLUSH/FUA request into the flush state machine.
379 * Returns true if the request has been consumed by the flush state machine,
380 * or false if the caller should continue to process it.
381 */
382bool blk_insert_flush(struct request *rq)
383{
384	struct request_queue *q = rq->q;
385	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
386	bool supports_fua = q->limits.features & BLK_FEAT_FUA;
387	unsigned int policy = 0;
388
389	/* FLUSH/FUA request must never be merged */
390	WARN_ON_ONCE(rq->bio != rq->biotail);
391
392	if (blk_rq_sectors(rq))
393		policy |= REQ_FSEQ_DATA;
394
395	/*
396	 * Check which flushes we need to sequence for this operation.
 
397	 */
398	if (blk_queue_write_cache(q)) {
399		if (rq->cmd_flags & REQ_PREFLUSH)
400			policy |= REQ_FSEQ_PREFLUSH;
401		if ((rq->cmd_flags & REQ_FUA) && !supports_fua)
402			policy |= REQ_FSEQ_POSTFLUSH;
403	}
404
405	/*
406	 * @policy now records what operations need to be done.  Adjust
407	 * REQ_PREFLUSH and FUA for the driver.
408	 */
409	rq->cmd_flags &= ~REQ_PREFLUSH;
410	if (!supports_fua)
411		rq->cmd_flags &= ~REQ_FUA;
 
 
 
 
 
 
412
413	/*
414	 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
415	 * of those flags, we have to set REQ_SYNC to avoid skewing
416	 * the request accounting.
417	 */
418	rq->cmd_flags |= REQ_SYNC;
419
420	switch (policy) {
421	case 0:
422		/*
423		 * An empty flush handed down from a stacking driver may
424		 * translate into nothing if the underlying device does not
425		 * advertise a write-back cache.  In this case, simply
426		 * complete the request.
427		 */
428		blk_mq_end_request(rq, 0);
429		return true;
430	case REQ_FSEQ_DATA:
431		/*
432		 * If there's data, but no flush is necessary, the request can
433		 * be processed directly without going through flush machinery.
434		 * Queue for normal execution.
435		 */
436		return false;
437	case REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH:
438		/*
439		 * Initialize the flush fields and completion handler to trigger
440		 * the post flush, and then just pass the command on.
441		 */
442		blk_rq_init_flush(rq);
443		rq->flush.seq |= REQ_FSEQ_PREFLUSH;
444		spin_lock_irq(&fq->mq_flush_lock);
445		fq->flush_data_in_flight++;
446		spin_unlock_irq(&fq->mq_flush_lock);
447		return false;
448	default:
449		/*
450		 * Mark the request as part of a flush sequence and submit it
451		 * for further processing to the flush state machine.
452		 */
453		blk_rq_init_flush(rq);
454		spin_lock_irq(&fq->mq_flush_lock);
455		blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
456		spin_unlock_irq(&fq->mq_flush_lock);
457		return true;
458	}
459}
460
461/**
462 * blkdev_issue_flush - queue a flush
463 * @bdev:	blockdev to issue flush for
 
 
464 *
465 * Description:
466 *    Issue a flush for the block device in question.
 
 
 
467 */
468int blkdev_issue_flush(struct block_device *bdev)
 
469{
470	struct bio bio;
 
 
 
471
472	bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH);
473	return submit_bio_wait(&bio);
474}
475EXPORT_SYMBOL(blkdev_issue_flush);
476
477struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
478					      gfp_t flags)
479{
480	struct blk_flush_queue *fq;
481	int rq_sz = sizeof(struct request);
482
483	fq = kzalloc_node(sizeof(*fq), flags, node);
484	if (!fq)
485		goto fail;
 
 
 
 
 
486
487	spin_lock_init(&fq->mq_flush_lock);
 
 
 
488
489	rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
490	fq->flush_rq = kzalloc_node(rq_sz, flags, node);
491	if (!fq->flush_rq)
492		goto fail_rq;
493
494	INIT_LIST_HEAD(&fq->flush_queue[0]);
495	INIT_LIST_HEAD(&fq->flush_queue[1]);
 
 
 
 
 
496
497	return fq;
 
498
499 fail_rq:
500	kfree(fq);
501 fail:
502	return NULL;
503}
504
505void blk_free_flush_queue(struct blk_flush_queue *fq)
506{
507	/* bio based request queue hasn't flush queue */
508	if (!fq)
509		return;
510
511	kfree(fq->flush_rq);
512	kfree(fq);
513}
514
515/*
516 * Allow driver to set its own lock class to fq->mq_flush_lock for
517 * avoiding lockdep complaint.
518 *
519 * flush_end_io() may be called recursively from some driver, such as
520 * nvme-loop, so lockdep may complain 'possible recursive locking' because
521 * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
522 * key. We need to assign different lock class for these driver's
523 * fq->mq_flush_lock for avoiding the lockdep warning.
524 *
525 * Use dynamically allocated lock class key for each 'blk_flush_queue'
526 * instance is over-kill, and more worse it introduces horrible boot delay
527 * issue because synchronize_rcu() is implied in lockdep_unregister_key which
528 * is called for each hctx release. SCSI probing may synchronously create and
529 * destroy lots of MQ request_queues for non-existent devices, and some robot
530 * test kernel always enable lockdep option. It is observed that more than half
531 * an hour is taken during SCSI MQ probe with per-fq lock class.
532 */
533void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
534		struct lock_class_key *key)
535{
536	lockdep_set_class(&hctx->fq->mq_flush_lock, key);
537}
538EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);