Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Functions related to segment and merge handling
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/bio.h>
  7#include <linux/blkdev.h>
 
  8#include <linux/scatterlist.h>
 
 
 
 
  9
 10#include "blk.h"
 
 
 
 11
 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 13					     struct bio *bio)
 14{
 15	struct bio_vec *bv, *bvprv = NULL;
 16	int cluster, i, high, highprv = 1;
 17	unsigned int seg_size, nr_phys_segs;
 18	struct bio *fbio, *bbio;
 19
 20	if (!bio)
 21		return 0;
 
 
 22
 23	fbio = bio;
 24	cluster = blk_queue_cluster(q);
 25	seg_size = 0;
 26	nr_phys_segs = 0;
 27	for_each_bio(bio) {
 28		bio_for_each_segment(bv, bio, i) {
 29			/*
 30			 * the trick here is making sure that a high page is
 31			 * never considered part of another segment, since that
 32			 * might change with the bounce page.
 33			 */
 34			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
 35			if (high || highprv)
 36				goto new_segment;
 37			if (cluster) {
 38				if (seg_size + bv->bv_len
 39				    > queue_max_segment_size(q))
 40					goto new_segment;
 41				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
 42					goto new_segment;
 43				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
 44					goto new_segment;
 45
 46				seg_size += bv->bv_len;
 47				bvprv = bv;
 48				continue;
 49			}
 50new_segment:
 51			if (nr_phys_segs == 1 && seg_size >
 52			    fbio->bi_seg_front_size)
 53				fbio->bi_seg_front_size = seg_size;
 54
 55			nr_phys_segs++;
 56			bvprv = bv;
 57			seg_size = bv->bv_len;
 58			highprv = high;
 59		}
 60		bbio = bio;
 61	}
 62
 63	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
 64		fbio->bi_seg_front_size = seg_size;
 65	if (seg_size > bbio->bi_seg_back_size)
 66		bbio->bi_seg_back_size = seg_size;
 67
 68	return nr_phys_segs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 69}
 70
 71void blk_recalc_rq_segments(struct request *rq)
 72{
 73	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
 74}
 75
 76void blk_recount_segments(struct request_queue *q, struct bio *bio)
 77{
 78	struct bio *nxt = bio->bi_next;
 
 79
 80	bio->bi_next = NULL;
 81	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
 82	bio->bi_next = nxt;
 83	bio->bi_flags |= (1 << BIO_SEG_VALID);
 
 
 
 
 84}
 85EXPORT_SYMBOL(blk_recount_segments);
 86
 87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 88				   struct bio *nxt)
 
 89{
 90	if (!blk_queue_cluster(q))
 91		return 0;
 
 92
 93	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
 94	    queue_max_segment_size(q))
 95		return 0;
 96
 97	if (!bio_has_data(bio))
 98		return 1;
 99
100	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
101		return 0;
 
 
 
 
 
 
 
 
102
103	/*
104	 * bio and nxt are contiguous in memory; check if the queue allows
105	 * these two to be merged into one
106	 */
107	if (BIO_SEG_BOUNDARY(q, bio, nxt))
108		return 1;
 
109
110	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111}
112
113/*
114 * map a request to scatterlist, return number of sg entries setup. Caller
115 * must make sure sg can hold rq->nr_phys_segments entries
 
 
 
 
116 */
117int blk_rq_map_sg(struct request_queue *q, struct request *rq,
118		  struct scatterlist *sglist)
119{
120	struct bio_vec *bvec, *bvprv;
121	struct req_iterator iter;
122	struct scatterlist *sg;
123	int nsegs, cluster;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
125	nsegs = 0;
126	cluster = blk_queue_cluster(q);
127
128	/*
129	 * for each bio in rq
 
130	 */
131	bvprv = NULL;
132	sg = NULL;
133	rq_for_each_segment(bvec, rq, iter) {
134		int nbytes = bvec->bv_len;
135
136		if (bvprv && cluster) {
137			if (sg->length + nbytes > queue_max_segment_size(q))
138				goto new_segment;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
140			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
141				goto new_segment;
142			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
143				goto new_segment;
144
145			sg->length += nbytes;
146		} else {
147new_segment:
148			if (!sg)
149				sg = sglist;
150			else {
151				/*
152				 * If the driver previously mapped a shorter
153				 * list, we could see a termination bit
154				 * prematurely unless it fully inits the sg
155				 * table on each mapping. We KNOW that there
156				 * must be more entries here or the driver
157				 * would be buggy, so force clear the
158				 * termination bit to avoid doing a full
159				 * sg_init_table() in drivers for each command.
160				 */
161				sg->page_link &= ~0x02;
162				sg = sg_next(sg);
163			}
164
165			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166			nsegs++;
 
 
 
 
 
167		}
168		bvprv = bvec;
169	} /* segments in rq */
170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
172	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
173	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
174		unsigned int pad_len =
175			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
 
 
 
 
 
 
 
 
 
 
 
176
177		sg->length += pad_len;
178		rq->extra_len += pad_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
179	}
180
181	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
182		if (rq->cmd_flags & REQ_WRITE)
183			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
185		sg->page_link &= ~0x02;
186		sg = sg_next(sg);
187		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
188			    q->dma_drain_size,
189			    ((unsigned long)q->dma_drain_buffer) &
190			    (PAGE_SIZE - 1));
191		nsegs++;
192		rq->extra_len += q->dma_drain_size;
193	}
194
195	if (sg)
196		sg_mark_end(sg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
198	return nsegs;
199}
200EXPORT_SYMBOL(blk_rq_map_sg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
202static inline int ll_new_hw_segment(struct request_queue *q,
203				    struct request *req,
204				    struct bio *bio)
205{
206	int nr_phys_segs = bio_phys_segments(q, bio);
 
207
208	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
209		goto no_merge;
210
211	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
 
 
 
 
212		goto no_merge;
213
214	/*
215	 * This will form the start of a new hw segment.  Bump both
216	 * counters.
217	 */
218	req->nr_phys_segments += nr_phys_segs;
219	return 1;
220
221no_merge:
222	req->cmd_flags |= REQ_NOMERGE;
223	if (req == q->last_merge)
224		q->last_merge = NULL;
225	return 0;
226}
227
228int ll_back_merge_fn(struct request_queue *q, struct request *req,
229		     struct bio *bio)
230{
231	unsigned short max_sectors;
232
233	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
234		max_sectors = queue_max_hw_sectors(q);
235	else
236		max_sectors = queue_max_sectors(q);
237
238	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
239		req->cmd_flags |= REQ_NOMERGE;
240		if (req == q->last_merge)
241			q->last_merge = NULL;
242		return 0;
243	}
244	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
245		blk_recount_segments(q, req->biotail);
246	if (!bio_flagged(bio, BIO_SEG_VALID))
247		blk_recount_segments(q, bio);
248
249	return ll_new_hw_segment(q, req, bio);
250}
251
252int ll_front_merge_fn(struct request_queue *q, struct request *req,
253		      struct bio *bio)
254{
255	unsigned short max_sectors;
 
 
 
 
 
 
 
 
 
 
 
256
257	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
258		max_sectors = queue_max_hw_sectors(q);
259	else
260		max_sectors = queue_max_sectors(q);
261
 
 
 
 
262
263	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
264		req->cmd_flags |= REQ_NOMERGE;
265		if (req == q->last_merge)
266			q->last_merge = NULL;
267		return 0;
268	}
269	if (!bio_flagged(bio, BIO_SEG_VALID))
270		blk_recount_segments(q, bio);
271	if (!bio_flagged(req->bio, BIO_SEG_VALID))
272		blk_recount_segments(q, req->bio);
273
274	return ll_new_hw_segment(q, req, bio);
 
 
 
 
275}
276
277static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
278				struct request *next)
279{
280	int total_phys_segments;
281	unsigned int seg_size =
282		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
283
284	/*
285	 * First check if the either of the requests are re-queued
286	 * requests.  Can't merge them if they are.
287	 */
288	if (req->special || next->special)
289		return 0;
290
291	/*
292	 * Will it become too large?
293	 */
294	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
 
295		return 0;
296
297	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
298	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
299		if (req->nr_phys_segments == 1)
300			req->bio->bi_seg_front_size = seg_size;
301		if (next->nr_phys_segments == 1)
302			next->biotail->bi_seg_back_size = seg_size;
303		total_phys_segments--;
304	}
305
306	if (total_phys_segments > queue_max_segments(q))
307		return 0;
308
309	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
310		return 0;
311
312	/* Merge is OK... */
313	req->nr_phys_segments = total_phys_segments;
314	return 1;
315}
316
317/**
318 * blk_rq_set_mixed_merge - mark a request as mixed merge
319 * @rq: request to mark as mixed merge
320 *
321 * Description:
322 *     @rq is about to be mixed merged.  Make sure the attributes
323 *     which can be mixed are set in each bio and mark @rq as mixed
324 *     merged.
325 */
326void blk_rq_set_mixed_merge(struct request *rq)
327{
328	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
329	struct bio *bio;
330
331	if (rq->cmd_flags & REQ_MIXED_MERGE)
332		return;
333
334	/*
335	 * @rq will no longer represent mixable attributes for all the
336	 * contained bios.  It will just track those of the first one.
337	 * Distributes the attributs to each bio.
338	 */
339	for (bio = rq->bio; bio; bio = bio->bi_next) {
340		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
341			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
342		bio->bi_rw |= ff;
343	}
344	rq->cmd_flags |= REQ_MIXED_MERGE;
345}
346
347static void blk_account_io_merge(struct request *req)
348{
349	if (blk_do_io_stat(req)) {
350		struct hd_struct *part;
351		int cpu;
352
353		cpu = part_stat_lock();
354		part = req->part;
355
356		part_round_stats(cpu, part);
357		part_dec_in_flight(part, rq_data_dir(req));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
359		hd_struct_put(part);
 
 
 
 
360		part_stat_unlock();
361	}
362}
363
 
 
 
 
 
 
 
 
 
 
 
364/*
365 * Has to be called with the request spinlock acquired
 
366 */
367static int attempt_merge(struct request_queue *q, struct request *req,
368			  struct request *next)
369{
370	if (!rq_mergeable(req) || !rq_mergeable(next))
371		return 0;
372
373	/*
374	 * Don't merge file system requests and discard requests
375	 */
376	if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
377		return 0;
378
379	/*
380	 * Don't merge discard requests and secure discard requests
381	 */
382	if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
383		return 0;
384
385	/*
386	 * not contiguous
387	 */
388	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
389		return 0;
390
391	if (rq_data_dir(req) != rq_data_dir(next)
392	    || req->rq_disk != next->rq_disk
393	    || next->special)
394		return 0;
395
396	/*
397	 * If we are allowed to merge, then append bio list
398	 * from next to rq and release next. merge_requests_fn
399	 * will have updated segment counts, update sector
400	 * counts here.
 
401	 */
402	if (!ll_merge_requests_fn(q, req, next))
403		return 0;
 
 
 
 
 
 
 
 
 
 
 
404
405	/*
406	 * If failfast settings disagree or any of the two is already
407	 * a mixed merge, mark both as mixed before proceeding.  This
408	 * makes sure that all involved bios have mixable attributes
409	 * set properly.
410	 */
411	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
412	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
413	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
414		blk_rq_set_mixed_merge(req);
415		blk_rq_set_mixed_merge(next);
416	}
417
418	/*
419	 * At this point we have either done a back merge
420	 * or front merge. We need the smaller start_time of
421	 * the merged requests to be the current request
422	 * for accounting purposes.
423	 */
424	if (time_after(req->start_time, next->start_time))
425		req->start_time = next->start_time;
426
427	req->biotail->bi_next = next->bio;
428	req->biotail = next->biotail;
429
430	req->__data_len += blk_rq_bytes(next);
431
432	elv_merge_requests(q, req, next);
 
 
 
433
434	/*
435	 * 'next' is going away, so update stats accordingly
436	 */
437	blk_account_io_merge(next);
438
439	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
440	if (blk_rq_cpu_valid(next))
441		req->cpu = next->cpu;
442
443	/* owner-ship of bio passed from next to req */
 
 
 
444	next->bio = NULL;
445	__blk_put_request(q, next);
446	return 1;
447}
448
449int attempt_back_merge(struct request_queue *q, struct request *rq)
 
450{
451	struct request *next = elv_latter_request(q, rq);
452
453	if (next)
454		return attempt_merge(q, rq, next);
455
456	return 0;
457}
458
459int attempt_front_merge(struct request_queue *q, struct request *rq)
 
460{
461	struct request *prev = elv_former_request(q, rq);
462
463	if (prev)
464		return attempt_merge(q, prev, rq);
465
466	return 0;
467}
468
469int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
470			  struct request *next)
 
 
 
 
 
471{
472	return attempt_merge(q, rq, next);
473}
474
475bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
476{
477	if (!rq_mergeable(rq))
478		return false;
479
480	/* don't merge file system requests and discard requests */
481	if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
482		return false;
483
484	/* don't merge discard requests and secure discard requests */
485	if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
486		return false;
487
488	/* different data direction or already started, don't merge */
489	if (bio_data_dir(bio) != rq_data_dir(rq))
490		return false;
491
492	/* must be same device and not a special request */
493	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
494		return false;
495
496	/* only merge integrity protected bio into ditto rq */
497	if (bio_integrity(bio) != blk_integrity_rq(rq))
 
 
 
 
 
 
 
498		return false;
499
500	return true;
501}
502
503int blk_try_merge(struct request *rq, struct bio *bio)
504{
505	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
 
 
506		return ELEVATOR_BACK_MERGE;
507	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
508		return ELEVATOR_FRONT_MERGE;
509	return ELEVATOR_NO_MERGE;
510}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to segment and merge handling
   4 */
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/blk-integrity.h>
  10#include <linux/scatterlist.h>
  11#include <linux/part_stat.h>
  12#include <linux/blk-cgroup.h>
  13
  14#include <trace/events/block.h>
  15
  16#include "blk.h"
  17#include "blk-mq-sched.h"
  18#include "blk-rq-qos.h"
  19#include "blk-throttle.h"
  20
  21static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
 
  22{
  23	*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
  24}
 
 
  25
  26static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
  27{
  28	struct bvec_iter iter = bio->bi_iter;
  29	int idx;
  30
  31	bio_get_first_bvec(bio, bv);
  32	if (bv->bv_len == bio->bi_iter.bi_size)
  33		return;		/* this bio only has a single bvec */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34
  35	bio_advance_iter(bio, &iter, iter.bi_size);
 
 
 
  36
  37	if (!iter.bi_bvec_done)
  38		idx = iter.bi_idx - 1;
  39	else	/* in the middle of bvec */
  40		idx = iter.bi_idx;
  41
  42	*bv = bio->bi_io_vec[idx];
  43
  44	/*
  45	 * iter.bi_bvec_done records actual length of the last bvec
  46	 * if this bio ends in the middle of one io vector
  47	 */
  48	if (iter.bi_bvec_done)
  49		bv->bv_len = iter.bi_bvec_done;
  50}
  51
  52static inline bool bio_will_gap(struct request_queue *q,
  53		struct request *prev_rq, struct bio *prev, struct bio *next)
  54{
  55	struct bio_vec pb, nb;
  56
  57	if (!bio_has_data(prev) || !queue_virt_boundary(q))
  58		return false;
  59
  60	/*
  61	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  62	 * is quite difficult to respect the sg gap limit.  We work hard to
  63	 * merge a huge number of small single bios in case of mkfs.
  64	 */
  65	if (prev_rq)
  66		bio_get_first_bvec(prev_rq->bio, &pb);
  67	else
  68		bio_get_first_bvec(prev, &pb);
  69	if (pb.bv_offset & queue_virt_boundary(q))
  70		return true;
  71
  72	/*
  73	 * We don't need to worry about the situation that the merged segment
  74	 * ends in unaligned virt boundary:
  75	 *
  76	 * - if 'pb' ends aligned, the merged segment ends aligned
  77	 * - if 'pb' ends unaligned, the next bio must include
  78	 *   one single bvec of 'nb', otherwise the 'nb' can't
  79	 *   merge with 'pb'
  80	 */
  81	bio_get_last_bvec(prev, &pb);
  82	bio_get_first_bvec(next, &nb);
  83	if (biovec_phys_mergeable(q, &pb, &nb))
  84		return false;
  85	return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
  86}
  87
  88static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  89{
  90	return bio_will_gap(req->q, req, req->biotail, bio);
  91}
  92
  93static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  94{
  95	return bio_will_gap(req->q, NULL, bio, req->bio);
  96}
  97
  98/*
  99 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
 100 * is defined as 'unsigned int', meantime it has to be aligned to with the
 101 * logical block size, which is the minimum accepted unit by hardware.
 102 */
 103static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
 104{
 105	return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
 106}
 
 107
 108static struct bio *bio_split_discard(struct bio *bio,
 109				     const struct queue_limits *lim,
 110				     unsigned *nsegs, struct bio_set *bs)
 111{
 112	unsigned int max_discard_sectors, granularity;
 113	sector_t tmp;
 114	unsigned split_sectors;
 115
 116	*nsegs = 1;
 
 
 117
 118	granularity = max(lim->discard_granularity >> 9, 1U);
 
 119
 120	max_discard_sectors =
 121		min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
 122	max_discard_sectors -= max_discard_sectors % granularity;
 123	if (unlikely(!max_discard_sectors))
 124		return NULL;
 125
 126	if (bio_sectors(bio) <= max_discard_sectors)
 127		return NULL;
 128
 129	split_sectors = max_discard_sectors;
 130
 131	/*
 132	 * If the next starting sector would be misaligned, stop the discard at
 133	 * the previous aligned sector.
 134	 */
 135	tmp = bio->bi_iter.bi_sector + split_sectors -
 136		((lim->discard_alignment >> 9) % granularity);
 137	tmp = sector_div(tmp, granularity);
 138
 139	if (split_sectors > tmp)
 140		split_sectors -= tmp;
 141
 142	return bio_split(bio, split_sectors, GFP_NOIO, bs);
 143}
 144
 145static struct bio *bio_split_write_zeroes(struct bio *bio,
 146					  const struct queue_limits *lim,
 147					  unsigned *nsegs, struct bio_set *bs)
 148{
 149	*nsegs = 0;
 150	if (!lim->max_write_zeroes_sectors)
 151		return NULL;
 152	if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
 153		return NULL;
 154	return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
 155}
 156
 157/*
 158 * Return the maximum number of sectors from the start of a bio that may be
 159 * submitted as a single request to a block device. If enough sectors remain,
 160 * align the end to the physical block size. Otherwise align the end to the
 161 * logical block size. This approach minimizes the number of non-aligned
 162 * requests that are submitted to a block device if the start of a bio is not
 163 * aligned to a physical block boundary.
 164 */
 165static inline unsigned get_max_io_size(struct bio *bio,
 166				       const struct queue_limits *lim)
 167{
 168	unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
 169	unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
 170	unsigned max_sectors = lim->max_sectors, start, end;
 171
 172	if (lim->chunk_sectors) {
 173		max_sectors = min(max_sectors,
 174			blk_chunk_sectors_left(bio->bi_iter.bi_sector,
 175					       lim->chunk_sectors));
 176	}
 177
 178	start = bio->bi_iter.bi_sector & (pbs - 1);
 179	end = (start + max_sectors) & ~(pbs - 1);
 180	if (end > start)
 181		return end - start;
 182	return max_sectors & ~(lbs - 1);
 183}
 184
 185/**
 186 * get_max_segment_size() - maximum number of bytes to add as a single segment
 187 * @lim: Request queue limits.
 188 * @start_page: See below.
 189 * @offset: Offset from @start_page where to add a segment.
 190 *
 191 * Returns the maximum number of bytes that can be added as a single segment.
 192 */
 193static inline unsigned get_max_segment_size(const struct queue_limits *lim,
 194		struct page *start_page, unsigned long offset)
 195{
 196	unsigned long mask = lim->seg_boundary_mask;
 197
 198	offset = mask & (page_to_phys(start_page) + offset);
 
 199
 200	/*
 201	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
 202	 * after having calculated the minimum.
 203	 */
 204	return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
 205}
 
 
 206
 207/**
 208 * bvec_split_segs - verify whether or not a bvec should be split in the middle
 209 * @lim:      [in] queue limits to split based on
 210 * @bv:       [in] bvec to examine
 211 * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
 212 *            by the number of segments from @bv that may be appended to that
 213 *            bio without exceeding @max_segs
 214 * @bytes:    [in,out] Number of bytes in the bio being built. Incremented
 215 *            by the number of bytes from @bv that may be appended to that
 216 *            bio without exceeding @max_bytes
 217 * @max_segs: [in] upper bound for *@nsegs
 218 * @max_bytes: [in] upper bound for *@bytes
 219 *
 220 * When splitting a bio, it can happen that a bvec is encountered that is too
 221 * big to fit in a single segment and hence that it has to be split in the
 222 * middle. This function verifies whether or not that should happen. The value
 223 * %true is returned if and only if appending the entire @bv to a bio with
 224 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
 225 * the block driver.
 226 */
 227static bool bvec_split_segs(const struct queue_limits *lim,
 228		const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
 229		unsigned max_segs, unsigned max_bytes)
 230{
 231	unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
 232	unsigned len = min(bv->bv_len, max_len);
 233	unsigned total_len = 0;
 234	unsigned seg_size = 0;
 235
 236	while (len && *nsegs < max_segs) {
 237		seg_size = get_max_segment_size(lim, bv->bv_page,
 238						bv->bv_offset + total_len);
 239		seg_size = min(seg_size, len);
 240
 241		(*nsegs)++;
 242		total_len += seg_size;
 243		len -= seg_size;
 244
 245		if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
 246			break;
 247	}
 
 248
 249	*bytes += total_len;
 250
 251	/* tell the caller to split the bvec if it is too big to fit */
 252	return len > 0 || bv->bv_len > max_len;
 253}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 254
 255/**
 256 * bio_split_rw - split a bio in two bios
 257 * @bio:  [in] bio to be split
 258 * @lim:  [in] queue limits to split based on
 259 * @segs: [out] number of segments in the bio with the first half of the sectors
 260 * @bs:	  [in] bio set to allocate the clone from
 261 * @max_bytes: [in] maximum number of bytes per bio
 262 *
 263 * Clone @bio, update the bi_iter of the clone to represent the first sectors
 264 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
 265 * following is guaranteed for the cloned bio:
 266 * - That it has at most @max_bytes worth of data
 267 * - That it has at most queue_max_segments(@q) segments.
 268 *
 269 * Except for discard requests the cloned bio will point at the bi_io_vec of
 270 * the original bio. It is the responsibility of the caller to ensure that the
 271 * original bio is not freed before the cloned bio. The caller is also
 272 * responsible for ensuring that @bs is only destroyed after processing of the
 273 * split bio has finished.
 274 */
 275struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
 276		unsigned *segs, struct bio_set *bs, unsigned max_bytes)
 277{
 278	struct bio_vec bv, bvprv, *bvprvp = NULL;
 279	struct bvec_iter iter;
 280	unsigned nsegs = 0, bytes = 0;
 281
 282	bio_for_each_bvec(bv, bio, iter) {
 283		/*
 284		 * If the queue doesn't support SG gaps and adding this
 285		 * offset would create a gap, disallow it.
 286		 */
 287		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
 288			goto split;
 289
 290		if (nsegs < lim->max_segments &&
 291		    bytes + bv.bv_len <= max_bytes &&
 292		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
 293			nsegs++;
 294			bytes += bv.bv_len;
 295		} else {
 296			if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
 297					lim->max_segments, max_bytes))
 298				goto split;
 299		}
 
 
 300
 301		bvprv = bv;
 302		bvprvp = &bvprv;
 303	}
 304
 305	*segs = nsegs;
 306	return NULL;
 307split:
 308	/*
 309	 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
 310	 * with EAGAIN if splitting is required and return an error pointer.
 311	 */
 312	if (bio->bi_opf & REQ_NOWAIT) {
 313		bio->bi_status = BLK_STS_AGAIN;
 314		bio_endio(bio);
 315		return ERR_PTR(-EAGAIN);
 316	}
 317
 318	*segs = nsegs;
 319
 320	/*
 321	 * Individual bvecs might not be logical block aligned. Round down the
 322	 * split size so that each bio is properly block size aligned, even if
 323	 * we do not use the full hardware limits.
 324	 */
 325	bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
 326
 327	/*
 328	 * Bio splitting may cause subtle trouble such as hang when doing sync
 329	 * iopoll in direct IO routine. Given performance gain of iopoll for
 330	 * big IO can be trival, disable iopoll when split needed.
 331	 */
 332	bio_clear_polled(bio);
 333	return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
 334}
 335EXPORT_SYMBOL_GPL(bio_split_rw);
 336
 337/**
 338 * __bio_split_to_limits - split a bio to fit the queue limits
 339 * @bio:     bio to be split
 340 * @lim:     queue limits to split based on
 341 * @nr_segs: returns the number of segments in the returned bio
 342 *
 343 * Check if @bio needs splitting based on the queue limits, and if so split off
 344 * a bio fitting the limits from the beginning of @bio and return it.  @bio is
 345 * shortened to the remainder and re-submitted.
 346 *
 347 * The split bio is allocated from @q->bio_split, which is provided by the
 348 * block layer.
 349 */
 350struct bio *__bio_split_to_limits(struct bio *bio,
 351				  const struct queue_limits *lim,
 352				  unsigned int *nr_segs)
 353{
 354	struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
 355	struct bio *split;
 356
 357	switch (bio_op(bio)) {
 358	case REQ_OP_DISCARD:
 359	case REQ_OP_SECURE_ERASE:
 360		split = bio_split_discard(bio, lim, nr_segs, bs);
 361		break;
 362	case REQ_OP_WRITE_ZEROES:
 363		split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
 364		break;
 365	default:
 366		split = bio_split_rw(bio, lim, nr_segs, bs,
 367				get_max_io_size(bio, lim) << SECTOR_SHIFT);
 368		if (IS_ERR(split))
 369			return NULL;
 370		break;
 371	}
 372
 373	if (split) {
 374		/* there isn't chance to merge the split bio */
 375		split->bi_opf |= REQ_NOMERGE;
 376
 377		blkcg_bio_issue_init(split);
 378		bio_chain(split, bio);
 379		trace_block_split(split, bio->bi_iter.bi_sector);
 380		submit_bio_noacct(bio);
 381		return split;
 382	}
 383	return bio;
 384}
 385
 386/**
 387 * bio_split_to_limits - split a bio to fit the queue limits
 388 * @bio:     bio to be split
 389 *
 390 * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
 391 * if so split off a bio fitting the limits from the beginning of @bio and
 392 * return it.  @bio is shortened to the remainder and re-submitted.
 393 *
 394 * The split bio is allocated from @q->bio_split, which is provided by the
 395 * block layer.
 396 */
 397struct bio *bio_split_to_limits(struct bio *bio)
 398{
 399	const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
 400	unsigned int nr_segs;
 401
 402	if (bio_may_exceed_limits(bio, lim))
 403		return __bio_split_to_limits(bio, lim, &nr_segs);
 404	return bio;
 405}
 406EXPORT_SYMBOL(bio_split_to_limits);
 407
 408unsigned int blk_recalc_rq_segments(struct request *rq)
 409{
 410	unsigned int nr_phys_segs = 0;
 411	unsigned int bytes = 0;
 412	struct req_iterator iter;
 413	struct bio_vec bv;
 414
 415	if (!rq->bio)
 416		return 0;
 417
 418	switch (bio_op(rq->bio)) {
 419	case REQ_OP_DISCARD:
 420	case REQ_OP_SECURE_ERASE:
 421		if (queue_max_discard_segments(rq->q) > 1) {
 422			struct bio *bio = rq->bio;
 423
 424			for_each_bio(bio)
 425				nr_phys_segs++;
 426			return nr_phys_segs;
 427		}
 428		return 1;
 429	case REQ_OP_WRITE_ZEROES:
 430		return 0;
 431	default:
 432		break;
 433	}
 434
 435	rq_for_each_bvec(bv, rq, iter)
 436		bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
 437				UINT_MAX, UINT_MAX);
 438	return nr_phys_segs;
 439}
 440
 441static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
 442		struct scatterlist *sglist)
 443{
 444	if (!*sg)
 445		return sglist;
 446
 447	/*
 448	 * If the driver previously mapped a shorter list, we could see a
 449	 * termination bit prematurely unless it fully inits the sg table
 450	 * on each mapping. We KNOW that there must be more entries here
 451	 * or the driver would be buggy, so force clear the termination bit
 452	 * to avoid doing a full sg_init_table() in drivers for each command.
 453	 */
 454	sg_unmark_end(*sg);
 455	return sg_next(*sg);
 456}
 457
 458static unsigned blk_bvec_map_sg(struct request_queue *q,
 459		struct bio_vec *bvec, struct scatterlist *sglist,
 460		struct scatterlist **sg)
 461{
 462	unsigned nbytes = bvec->bv_len;
 463	unsigned nsegs = 0, total = 0;
 464
 465	while (nbytes > 0) {
 466		unsigned offset = bvec->bv_offset + total;
 467		unsigned len = min(get_max_segment_size(&q->limits,
 468				   bvec->bv_page, offset), nbytes);
 469		struct page *page = bvec->bv_page;
 470
 471		/*
 472		 * Unfortunately a fair number of drivers barf on scatterlists
 473		 * that have an offset larger than PAGE_SIZE, despite other
 474		 * subsystems dealing with that invariant just fine.  For now
 475		 * stick to the legacy format where we never present those from
 476		 * the block layer, but the code below should be removed once
 477		 * these offenders (mostly MMC/SD drivers) are fixed.
 478		 */
 479		page += (offset >> PAGE_SHIFT);
 480		offset &= ~PAGE_MASK;
 481
 482		*sg = blk_next_sg(sg, sglist);
 483		sg_set_page(*sg, page, len, offset);
 484
 485		total += len;
 486		nbytes -= len;
 
 487		nsegs++;
 
 488	}
 489
 490	return nsegs;
 491}
 492
 493static inline int __blk_bvec_map_sg(struct bio_vec bv,
 494		struct scatterlist *sglist, struct scatterlist **sg)
 495{
 496	*sg = blk_next_sg(sg, sglist);
 497	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
 498	return 1;
 499}
 500
 501/* only try to merge bvecs into one sg if they are from two bios */
 502static inline bool
 503__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
 504			   struct bio_vec *bvprv, struct scatterlist **sg)
 505{
 506
 507	int nbytes = bvec->bv_len;
 508
 509	if (!*sg)
 510		return false;
 511
 512	if ((*sg)->length + nbytes > queue_max_segment_size(q))
 513		return false;
 514
 515	if (!biovec_phys_mergeable(q, bvprv, bvec))
 516		return false;
 517
 518	(*sg)->length += nbytes;
 519
 520	return true;
 521}
 522
 523static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
 524			     struct scatterlist *sglist,
 525			     struct scatterlist **sg)
 526{
 527	struct bio_vec bvec, bvprv = { NULL };
 528	struct bvec_iter iter;
 529	int nsegs = 0;
 530	bool new_bio = false;
 531
 532	for_each_bio(bio) {
 533		bio_for_each_bvec(bvec, bio, iter) {
 534			/*
 535			 * Only try to merge bvecs from two bios given we
 536			 * have done bio internal merge when adding pages
 537			 * to bio
 538			 */
 539			if (new_bio &&
 540			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
 541				goto next_bvec;
 542
 543			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
 544				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
 545			else
 546				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
 547 next_bvec:
 548			new_bio = false;
 549		}
 550		if (likely(bio->bi_iter.bi_size)) {
 551			bvprv = bvec;
 552			new_bio = true;
 553		}
 554	}
 555
 556	return nsegs;
 557}
 558
 559/*
 560 * map a request to scatterlist, return number of sg entries setup. Caller
 561 * must make sure sg can hold rq->nr_phys_segments entries
 562 */
 563int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
 564		struct scatterlist *sglist, struct scatterlist **last_sg)
 565{
 566	int nsegs = 0;
 567
 568	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
 569		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
 570	else if (rq->bio)
 571		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
 572
 573	if (*last_sg)
 574		sg_mark_end(*last_sg);
 575
 576	/*
 577	 * Something must have been wrong if the figured number of
 578	 * segment is bigger than number of req's physical segments
 579	 */
 580	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
 581
 582	return nsegs;
 583}
 584EXPORT_SYMBOL(__blk_rq_map_sg);
 585
 586static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
 587						  sector_t offset)
 588{
 589	struct request_queue *q = rq->q;
 590	unsigned int max_sectors;
 591
 592	if (blk_rq_is_passthrough(rq))
 593		return q->limits.max_hw_sectors;
 594
 595	max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
 596	if (!q->limits.chunk_sectors ||
 597	    req_op(rq) == REQ_OP_DISCARD ||
 598	    req_op(rq) == REQ_OP_SECURE_ERASE)
 599		return max_sectors;
 600	return min(max_sectors,
 601		   blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
 602}
 603
 604static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
 605		unsigned int nr_phys_segs)
 
 606{
 607	if (!blk_cgroup_mergeable(req, bio))
 608		goto no_merge;
 609
 610	if (blk_integrity_merge_bio(req->q, req, bio) == false)
 611		goto no_merge;
 612
 613	/* discard request merge won't add new segment */
 614	if (req_op(req) == REQ_OP_DISCARD)
 615		return 1;
 616
 617	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
 618		goto no_merge;
 619
 620	/*
 621	 * This will form the start of a new hw segment.  Bump both
 622	 * counters.
 623	 */
 624	req->nr_phys_segments += nr_phys_segs;
 625	return 1;
 626
 627no_merge:
 628	req_set_nomerge(req->q, req);
 
 
 629	return 0;
 630}
 631
 632int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
 
 633{
 634	if (req_gap_back_merge(req, bio))
 635		return 0;
 636	if (blk_integrity_rq(req) &&
 637	    integrity_req_gap_back_merge(req, bio))
 638		return 0;
 639	if (!bio_crypt_ctx_back_mergeable(req, bio))
 640		return 0;
 641	if (blk_rq_sectors(req) + bio_sectors(bio) >
 642	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
 643		req_set_nomerge(req->q, req);
 
 644		return 0;
 645	}
 
 
 
 
 646
 647	return ll_new_hw_segment(req, bio, nr_segs);
 648}
 649
 650static int ll_front_merge_fn(struct request *req, struct bio *bio,
 651		unsigned int nr_segs)
 652{
 653	if (req_gap_front_merge(req, bio))
 654		return 0;
 655	if (blk_integrity_rq(req) &&
 656	    integrity_req_gap_front_merge(req, bio))
 657		return 0;
 658	if (!bio_crypt_ctx_front_mergeable(req, bio))
 659		return 0;
 660	if (blk_rq_sectors(req) + bio_sectors(bio) >
 661	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
 662		req_set_nomerge(req->q, req);
 663		return 0;
 664	}
 665
 666	return ll_new_hw_segment(req, bio, nr_segs);
 667}
 
 
 668
 669static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
 670		struct request *next)
 671{
 672	unsigned short segments = blk_rq_nr_discard_segments(req);
 673
 674	if (segments >= queue_max_discard_segments(q))
 675		goto no_merge;
 676	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
 677	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 678		goto no_merge;
 
 
 
 
 
 679
 680	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
 681	return true;
 682no_merge:
 683	req_set_nomerge(q, req);
 684	return false;
 685}
 686
 687static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 688				struct request *next)
 689{
 690	int total_phys_segments;
 
 
 691
 692	if (req_gap_back_merge(req, next->bio))
 
 
 
 
 693		return 0;
 694
 695	/*
 696	 * Will it become too large?
 697	 */
 698	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
 699	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 700		return 0;
 701
 702	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
 703	if (total_phys_segments > blk_rq_get_max_segments(req))
 704		return 0;
 705
 706	if (!blk_cgroup_mergeable(req, next->bio))
 707		return 0;
 
 
 708
 709	if (blk_integrity_merge_rq(q, req, next) == false)
 710		return 0;
 711
 712	if (!bio_crypt_ctx_merge_rq(req, next))
 713		return 0;
 714
 715	/* Merge is OK... */
 716	req->nr_phys_segments = total_phys_segments;
 717	return 1;
 718}
 719
 720/**
 721 * blk_rq_set_mixed_merge - mark a request as mixed merge
 722 * @rq: request to mark as mixed merge
 723 *
 724 * Description:
 725 *     @rq is about to be mixed merged.  Make sure the attributes
 726 *     which can be mixed are set in each bio and mark @rq as mixed
 727 *     merged.
 728 */
 729void blk_rq_set_mixed_merge(struct request *rq)
 730{
 731	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 732	struct bio *bio;
 733
 734	if (rq->rq_flags & RQF_MIXED_MERGE)
 735		return;
 736
 737	/*
 738	 * @rq will no longer represent mixable attributes for all the
 739	 * contained bios.  It will just track those of the first one.
 740	 * Distributes the attributs to each bio.
 741	 */
 742	for (bio = rq->bio; bio; bio = bio->bi_next) {
 743		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
 744			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
 745		bio->bi_opf |= ff;
 746	}
 747	rq->rq_flags |= RQF_MIXED_MERGE;
 748}
 749
 750static inline blk_opf_t bio_failfast(const struct bio *bio)
 751{
 752	if (bio->bi_opf & REQ_RAHEAD)
 753		return REQ_FAILFAST_MASK;
 
 754
 755	return bio->bi_opf & REQ_FAILFAST_MASK;
 756}
 757
 758/*
 759 * After we are marked as MIXED_MERGE, any new RA bio has to be updated
 760 * as failfast, and request's failfast has to be updated in case of
 761 * front merge.
 762 */
 763static inline void blk_update_mixed_merge(struct request *req,
 764		struct bio *bio, bool front_merge)
 765{
 766	if (req->rq_flags & RQF_MIXED_MERGE) {
 767		if (bio->bi_opf & REQ_RAHEAD)
 768			bio->bi_opf |= REQ_FAILFAST_MASK;
 769
 770		if (front_merge) {
 771			req->cmd_flags &= ~REQ_FAILFAST_MASK;
 772			req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
 773		}
 774	}
 775}
 776
 777static void blk_account_io_merge_request(struct request *req)
 778{
 779	if (blk_do_io_stat(req)) {
 780		part_stat_lock();
 781		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 782		part_stat_unlock();
 783	}
 784}
 785
 786static enum elv_merge blk_try_req_merge(struct request *req,
 787					struct request *next)
 788{
 789	if (blk_discard_mergable(req))
 790		return ELEVATOR_DISCARD_MERGE;
 791	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
 792		return ELEVATOR_BACK_MERGE;
 793
 794	return ELEVATOR_NO_MERGE;
 795}
 796
 797/*
 798 * For non-mq, this has to be called with the request spinlock acquired.
 799 * For mq with scheduling, the appropriate queue wide lock should be held.
 800 */
 801static struct request *attempt_merge(struct request_queue *q,
 802				     struct request *req, struct request *next)
 803{
 804	if (!rq_mergeable(req) || !rq_mergeable(next))
 805		return NULL;
 806
 807	if (req_op(req) != req_op(next))
 808		return NULL;
 
 
 
 809
 810	if (rq_data_dir(req) != rq_data_dir(next))
 811		return NULL;
 
 
 
 
 
 
 
 
 
 812
 813	if (req->ioprio != next->ioprio)
 814		return NULL;
 
 
 815
 816	/*
 817	 * If we are allowed to merge, then append bio list
 818	 * from next to rq and release next. merge_requests_fn
 819	 * will have updated segment counts, update sector
 820	 * counts here. Handle DISCARDs separately, as they
 821	 * have separate settings.
 822	 */
 823
 824	switch (blk_try_req_merge(req, next)) {
 825	case ELEVATOR_DISCARD_MERGE:
 826		if (!req_attempt_discard_merge(q, req, next))
 827			return NULL;
 828		break;
 829	case ELEVATOR_BACK_MERGE:
 830		if (!ll_merge_requests_fn(q, req, next))
 831			return NULL;
 832		break;
 833	default:
 834		return NULL;
 835	}
 836
 837	/*
 838	 * If failfast settings disagree or any of the two is already
 839	 * a mixed merge, mark both as mixed before proceeding.  This
 840	 * makes sure that all involved bios have mixable attributes
 841	 * set properly.
 842	 */
 843	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
 844	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
 845	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
 846		blk_rq_set_mixed_merge(req);
 847		blk_rq_set_mixed_merge(next);
 848	}
 849
 850	/*
 851	 * At this point we have either done a back merge or front merge. We
 852	 * need the smaller start_time_ns of the merged requests to be the
 853	 * current request for accounting purposes.
 
 854	 */
 855	if (next->start_time_ns < req->start_time_ns)
 856		req->start_time_ns = next->start_time_ns;
 857
 858	req->biotail->bi_next = next->bio;
 859	req->biotail = next->biotail;
 860
 861	req->__data_len += blk_rq_bytes(next);
 862
 863	if (!blk_discard_mergable(req))
 864		elv_merge_requests(q, req, next);
 865
 866	blk_crypto_rq_put_keyslot(next);
 867
 868	/*
 869	 * 'next' is going away, so update stats accordingly
 870	 */
 871	blk_account_io_merge_request(next);
 872
 873	trace_block_rq_merge(next);
 
 
 874
 875	/*
 876	 * ownership of bio passed from next to req, return 'next' for
 877	 * the caller to free
 878	 */
 879	next->bio = NULL;
 880	return next;
 
 881}
 882
 883static struct request *attempt_back_merge(struct request_queue *q,
 884		struct request *rq)
 885{
 886	struct request *next = elv_latter_request(q, rq);
 887
 888	if (next)
 889		return attempt_merge(q, rq, next);
 890
 891	return NULL;
 892}
 893
 894static struct request *attempt_front_merge(struct request_queue *q,
 895		struct request *rq)
 896{
 897	struct request *prev = elv_former_request(q, rq);
 898
 899	if (prev)
 900		return attempt_merge(q, prev, rq);
 901
 902	return NULL;
 903}
 904
 905/*
 906 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
 907 * otherwise. The caller is responsible for freeing 'next' if the merge
 908 * happened.
 909 */
 910bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 911			   struct request *next)
 912{
 913	return attempt_merge(q, rq, next);
 914}
 915
 916bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 917{
 918	if (!rq_mergeable(rq) || !bio_mergeable(bio))
 
 
 
 
 919		return false;
 920
 921	if (req_op(rq) != bio_op(bio))
 
 922		return false;
 923
 924	/* different data direction or already started, don't merge */
 925	if (bio_data_dir(bio) != rq_data_dir(rq))
 926		return false;
 927
 928	/* don't merge across cgroup boundaries */
 929	if (!blk_cgroup_mergeable(rq, bio))
 930		return false;
 931
 932	/* only merge integrity protected bio into ditto rq */
 933	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
 934		return false;
 935
 936	/* Only merge if the crypt contexts are compatible */
 937	if (!bio_crypt_rq_ctx_compatible(rq, bio))
 938		return false;
 939
 940	if (rq->ioprio != bio_prio(bio))
 941		return false;
 942
 943	return true;
 944}
 945
 946enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 947{
 948	if (blk_discard_mergable(rq))
 949		return ELEVATOR_DISCARD_MERGE;
 950	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
 951		return ELEVATOR_BACK_MERGE;
 952	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
 953		return ELEVATOR_FRONT_MERGE;
 954	return ELEVATOR_NO_MERGE;
 955}
 956
 957static void blk_account_io_merge_bio(struct request *req)
 958{
 959	if (!blk_do_io_stat(req))
 960		return;
 961
 962	part_stat_lock();
 963	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 964	part_stat_unlock();
 965}
 966
 967enum bio_merge_status {
 968	BIO_MERGE_OK,
 969	BIO_MERGE_NONE,
 970	BIO_MERGE_FAILED,
 971};
 972
 973static enum bio_merge_status bio_attempt_back_merge(struct request *req,
 974		struct bio *bio, unsigned int nr_segs)
 975{
 976	const blk_opf_t ff = bio_failfast(bio);
 977
 978	if (!ll_back_merge_fn(req, bio, nr_segs))
 979		return BIO_MERGE_FAILED;
 980
 981	trace_block_bio_backmerge(bio);
 982	rq_qos_merge(req->q, req, bio);
 983
 984	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
 985		blk_rq_set_mixed_merge(req);
 986
 987	blk_update_mixed_merge(req, bio, false);
 988
 989	req->biotail->bi_next = bio;
 990	req->biotail = bio;
 991	req->__data_len += bio->bi_iter.bi_size;
 992
 993	bio_crypt_free_ctx(bio);
 994
 995	blk_account_io_merge_bio(req);
 996	return BIO_MERGE_OK;
 997}
 998
 999static enum bio_merge_status bio_attempt_front_merge(struct request *req,
1000		struct bio *bio, unsigned int nr_segs)
1001{
1002	const blk_opf_t ff = bio_failfast(bio);
1003
1004	if (!ll_front_merge_fn(req, bio, nr_segs))
1005		return BIO_MERGE_FAILED;
1006
1007	trace_block_bio_frontmerge(bio);
1008	rq_qos_merge(req->q, req, bio);
1009
1010	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1011		blk_rq_set_mixed_merge(req);
1012
1013	blk_update_mixed_merge(req, bio, true);
1014
1015	bio->bi_next = req->bio;
1016	req->bio = bio;
1017
1018	req->__sector = bio->bi_iter.bi_sector;
1019	req->__data_len += bio->bi_iter.bi_size;
1020
1021	bio_crypt_do_front_merge(req, bio);
1022
1023	blk_account_io_merge_bio(req);
1024	return BIO_MERGE_OK;
1025}
1026
1027static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
1028		struct request *req, struct bio *bio)
1029{
1030	unsigned short segments = blk_rq_nr_discard_segments(req);
1031
1032	if (segments >= queue_max_discard_segments(q))
1033		goto no_merge;
1034	if (blk_rq_sectors(req) + bio_sectors(bio) >
1035	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1036		goto no_merge;
1037
1038	rq_qos_merge(q, req, bio);
1039
1040	req->biotail->bi_next = bio;
1041	req->biotail = bio;
1042	req->__data_len += bio->bi_iter.bi_size;
1043	req->nr_phys_segments = segments + 1;
1044
1045	blk_account_io_merge_bio(req);
1046	return BIO_MERGE_OK;
1047no_merge:
1048	req_set_nomerge(q, req);
1049	return BIO_MERGE_FAILED;
1050}
1051
1052static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1053						   struct request *rq,
1054						   struct bio *bio,
1055						   unsigned int nr_segs,
1056						   bool sched_allow_merge)
1057{
1058	if (!blk_rq_merge_ok(rq, bio))
1059		return BIO_MERGE_NONE;
1060
1061	switch (blk_try_merge(rq, bio)) {
1062	case ELEVATOR_BACK_MERGE:
1063		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1064			return bio_attempt_back_merge(rq, bio, nr_segs);
1065		break;
1066	case ELEVATOR_FRONT_MERGE:
1067		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1068			return bio_attempt_front_merge(rq, bio, nr_segs);
1069		break;
1070	case ELEVATOR_DISCARD_MERGE:
1071		return bio_attempt_discard_merge(q, rq, bio);
1072	default:
1073		return BIO_MERGE_NONE;
1074	}
1075
1076	return BIO_MERGE_FAILED;
1077}
1078
1079/**
1080 * blk_attempt_plug_merge - try to merge with %current's plugged list
1081 * @q: request_queue new bio is being queued at
1082 * @bio: new bio being queued
1083 * @nr_segs: number of segments in @bio
1084 * from the passed in @q already in the plug list
1085 *
1086 * Determine whether @bio being queued on @q can be merged with the previous
1087 * request on %current's plugged list.  Returns %true if merge was successful,
1088 * otherwise %false.
1089 *
1090 * Plugging coalesces IOs from the same issuer for the same purpose without
1091 * going through @q->queue_lock.  As such it's more of an issuing mechanism
1092 * than scheduling, and the request, while may have elvpriv data, is not
1093 * added on the elevator at this point.  In addition, we don't have
1094 * reliable access to the elevator outside queue lock.  Only check basic
1095 * merging parameters without querying the elevator.
1096 *
1097 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1098 */
1099bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1100		unsigned int nr_segs)
1101{
1102	struct blk_plug *plug;
1103	struct request *rq;
1104
1105	plug = blk_mq_plug(bio);
1106	if (!plug || rq_list_empty(plug->mq_list))
1107		return false;
1108
1109	rq_list_for_each(&plug->mq_list, rq) {
1110		if (rq->q == q) {
1111			if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1112			    BIO_MERGE_OK)
1113				return true;
1114			break;
1115		}
1116
1117		/*
1118		 * Only keep iterating plug list for merges if we have multiple
1119		 * queues
1120		 */
1121		if (!plug->multiple_queues)
1122			break;
1123	}
1124	return false;
1125}
1126
1127/*
1128 * Iterate list of requests and see if we can merge this bio with any
1129 * of them.
1130 */
1131bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1132			struct bio *bio, unsigned int nr_segs)
1133{
1134	struct request *rq;
1135	int checked = 8;
1136
1137	list_for_each_entry_reverse(rq, list, queuelist) {
1138		if (!checked--)
1139			break;
1140
1141		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1142		case BIO_MERGE_NONE:
1143			continue;
1144		case BIO_MERGE_OK:
1145			return true;
1146		case BIO_MERGE_FAILED:
1147			return false;
1148		}
1149
1150	}
1151
1152	return false;
1153}
1154EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1155
1156bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1157		unsigned int nr_segs, struct request **merged_request)
1158{
1159	struct request *rq;
1160
1161	switch (elv_merge(q, &rq, bio)) {
1162	case ELEVATOR_BACK_MERGE:
1163		if (!blk_mq_sched_allow_merge(q, rq, bio))
1164			return false;
1165		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1166			return false;
1167		*merged_request = attempt_back_merge(q, rq);
1168		if (!*merged_request)
1169			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1170		return true;
1171	case ELEVATOR_FRONT_MERGE:
1172		if (!blk_mq_sched_allow_merge(q, rq, bio))
1173			return false;
1174		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1175			return false;
1176		*merged_request = attempt_front_merge(q, rq);
1177		if (!*merged_request)
1178			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1179		return true;
1180	case ELEVATOR_DISCARD_MERGE:
1181		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1182	default:
1183		return false;
1184	}
1185}
1186EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);