Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to segment and merge handling
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/blk-integrity.h>
10#include <linux/scatterlist.h>
11#include <linux/part_stat.h>
12#include <linux/blk-cgroup.h>
13
14#include <trace/events/block.h>
15
16#include "blk.h"
17#include "blk-mq-sched.h"
18#include "blk-rq-qos.h"
19#include "blk-throttle.h"
20
21static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
22{
23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
24}
25
26static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
27{
28 struct bvec_iter iter = bio->bi_iter;
29 int idx;
30
31 bio_get_first_bvec(bio, bv);
32 if (bv->bv_len == bio->bi_iter.bi_size)
33 return; /* this bio only has a single bvec */
34
35 bio_advance_iter(bio, &iter, iter.bi_size);
36
37 if (!iter.bi_bvec_done)
38 idx = iter.bi_idx - 1;
39 else /* in the middle of bvec */
40 idx = iter.bi_idx;
41
42 *bv = bio->bi_io_vec[idx];
43
44 /*
45 * iter.bi_bvec_done records actual length of the last bvec
46 * if this bio ends in the middle of one io vector
47 */
48 if (iter.bi_bvec_done)
49 bv->bv_len = iter.bi_bvec_done;
50}
51
52static inline bool bio_will_gap(struct request_queue *q,
53 struct request *prev_rq, struct bio *prev, struct bio *next)
54{
55 struct bio_vec pb, nb;
56
57 if (!bio_has_data(prev) || !queue_virt_boundary(q))
58 return false;
59
60 /*
61 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
62 * is quite difficult to respect the sg gap limit. We work hard to
63 * merge a huge number of small single bios in case of mkfs.
64 */
65 if (prev_rq)
66 bio_get_first_bvec(prev_rq->bio, &pb);
67 else
68 bio_get_first_bvec(prev, &pb);
69 if (pb.bv_offset & queue_virt_boundary(q))
70 return true;
71
72 /*
73 * We don't need to worry about the situation that the merged segment
74 * ends in unaligned virt boundary:
75 *
76 * - if 'pb' ends aligned, the merged segment ends aligned
77 * - if 'pb' ends unaligned, the next bio must include
78 * one single bvec of 'nb', otherwise the 'nb' can't
79 * merge with 'pb'
80 */
81 bio_get_last_bvec(prev, &pb);
82 bio_get_first_bvec(next, &nb);
83 if (biovec_phys_mergeable(q, &pb, &nb))
84 return false;
85 return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
86}
87
88static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
89{
90 return bio_will_gap(req->q, req, req->biotail, bio);
91}
92
93static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
94{
95 return bio_will_gap(req->q, NULL, bio, req->bio);
96}
97
98/*
99 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
100 * is defined as 'unsigned int', meantime it has to be aligned to with the
101 * logical block size, which is the minimum accepted unit by hardware.
102 */
103static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
104{
105 return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
106}
107
108static struct bio *bio_split_discard(struct bio *bio,
109 const struct queue_limits *lim,
110 unsigned *nsegs, struct bio_set *bs)
111{
112 unsigned int max_discard_sectors, granularity;
113 sector_t tmp;
114 unsigned split_sectors;
115
116 *nsegs = 1;
117
118 /* Zero-sector (unknown) and one-sector granularities are the same. */
119 granularity = max(lim->discard_granularity >> 9, 1U);
120
121 max_discard_sectors =
122 min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
123 max_discard_sectors -= max_discard_sectors % granularity;
124
125 if (unlikely(!max_discard_sectors)) {
126 /* XXX: warn */
127 return NULL;
128 }
129
130 if (bio_sectors(bio) <= max_discard_sectors)
131 return NULL;
132
133 split_sectors = max_discard_sectors;
134
135 /*
136 * If the next starting sector would be misaligned, stop the discard at
137 * the previous aligned sector.
138 */
139 tmp = bio->bi_iter.bi_sector + split_sectors -
140 ((lim->discard_alignment >> 9) % granularity);
141 tmp = sector_div(tmp, granularity);
142
143 if (split_sectors > tmp)
144 split_sectors -= tmp;
145
146 return bio_split(bio, split_sectors, GFP_NOIO, bs);
147}
148
149static struct bio *bio_split_write_zeroes(struct bio *bio,
150 const struct queue_limits *lim,
151 unsigned *nsegs, struct bio_set *bs)
152{
153 *nsegs = 0;
154 if (!lim->max_write_zeroes_sectors)
155 return NULL;
156 if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
157 return NULL;
158 return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
159}
160
161/*
162 * Return the maximum number of sectors from the start of a bio that may be
163 * submitted as a single request to a block device. If enough sectors remain,
164 * align the end to the physical block size. Otherwise align the end to the
165 * logical block size. This approach minimizes the number of non-aligned
166 * requests that are submitted to a block device if the start of a bio is not
167 * aligned to a physical block boundary.
168 */
169static inline unsigned get_max_io_size(struct bio *bio,
170 const struct queue_limits *lim)
171{
172 unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
173 unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
174 unsigned max_sectors = lim->max_sectors, start, end;
175
176 if (lim->chunk_sectors) {
177 max_sectors = min(max_sectors,
178 blk_chunk_sectors_left(bio->bi_iter.bi_sector,
179 lim->chunk_sectors));
180 }
181
182 start = bio->bi_iter.bi_sector & (pbs - 1);
183 end = (start + max_sectors) & ~(pbs - 1);
184 if (end > start)
185 return end - start;
186 return max_sectors & ~(lbs - 1);
187}
188
189/**
190 * get_max_segment_size() - maximum number of bytes to add as a single segment
191 * @lim: Request queue limits.
192 * @start_page: See below.
193 * @offset: Offset from @start_page where to add a segment.
194 *
195 * Returns the maximum number of bytes that can be added as a single segment.
196 */
197static inline unsigned get_max_segment_size(const struct queue_limits *lim,
198 struct page *start_page, unsigned long offset)
199{
200 unsigned long mask = lim->seg_boundary_mask;
201
202 offset = mask & (page_to_phys(start_page) + offset);
203
204 /*
205 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
206 * after having calculated the minimum.
207 */
208 return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
209}
210
211/**
212 * bvec_split_segs - verify whether or not a bvec should be split in the middle
213 * @lim: [in] queue limits to split based on
214 * @bv: [in] bvec to examine
215 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
216 * by the number of segments from @bv that may be appended to that
217 * bio without exceeding @max_segs
218 * @bytes: [in,out] Number of bytes in the bio being built. Incremented
219 * by the number of bytes from @bv that may be appended to that
220 * bio without exceeding @max_bytes
221 * @max_segs: [in] upper bound for *@nsegs
222 * @max_bytes: [in] upper bound for *@bytes
223 *
224 * When splitting a bio, it can happen that a bvec is encountered that is too
225 * big to fit in a single segment and hence that it has to be split in the
226 * middle. This function verifies whether or not that should happen. The value
227 * %true is returned if and only if appending the entire @bv to a bio with
228 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
229 * the block driver.
230 */
231static bool bvec_split_segs(const struct queue_limits *lim,
232 const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
233 unsigned max_segs, unsigned max_bytes)
234{
235 unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
236 unsigned len = min(bv->bv_len, max_len);
237 unsigned total_len = 0;
238 unsigned seg_size = 0;
239
240 while (len && *nsegs < max_segs) {
241 seg_size = get_max_segment_size(lim, bv->bv_page,
242 bv->bv_offset + total_len);
243 seg_size = min(seg_size, len);
244
245 (*nsegs)++;
246 total_len += seg_size;
247 len -= seg_size;
248
249 if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
250 break;
251 }
252
253 *bytes += total_len;
254
255 /* tell the caller to split the bvec if it is too big to fit */
256 return len > 0 || bv->bv_len > max_len;
257}
258
259/**
260 * bio_split_rw - split a bio in two bios
261 * @bio: [in] bio to be split
262 * @lim: [in] queue limits to split based on
263 * @segs: [out] number of segments in the bio with the first half of the sectors
264 * @bs: [in] bio set to allocate the clone from
265 * @max_bytes: [in] maximum number of bytes per bio
266 *
267 * Clone @bio, update the bi_iter of the clone to represent the first sectors
268 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
269 * following is guaranteed for the cloned bio:
270 * - That it has at most @max_bytes worth of data
271 * - That it has at most queue_max_segments(@q) segments.
272 *
273 * Except for discard requests the cloned bio will point at the bi_io_vec of
274 * the original bio. It is the responsibility of the caller to ensure that the
275 * original bio is not freed before the cloned bio. The caller is also
276 * responsible for ensuring that @bs is only destroyed after processing of the
277 * split bio has finished.
278 */
279static struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
280 unsigned *segs, struct bio_set *bs, unsigned max_bytes)
281{
282 struct bio_vec bv, bvprv, *bvprvp = NULL;
283 struct bvec_iter iter;
284 unsigned nsegs = 0, bytes = 0;
285
286 bio_for_each_bvec(bv, bio, iter) {
287 /*
288 * If the queue doesn't support SG gaps and adding this
289 * offset would create a gap, disallow it.
290 */
291 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
292 goto split;
293
294 if (nsegs < lim->max_segments &&
295 bytes + bv.bv_len <= max_bytes &&
296 bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
297 nsegs++;
298 bytes += bv.bv_len;
299 } else {
300 if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
301 lim->max_segments, max_bytes))
302 goto split;
303 }
304
305 bvprv = bv;
306 bvprvp = &bvprv;
307 }
308
309 *segs = nsegs;
310 return NULL;
311split:
312 /*
313 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
314 * with EAGAIN if splitting is required and return an error pointer.
315 */
316 if (bio->bi_opf & REQ_NOWAIT) {
317 bio->bi_status = BLK_STS_AGAIN;
318 bio_endio(bio);
319 return ERR_PTR(-EAGAIN);
320 }
321
322 *segs = nsegs;
323
324 /*
325 * Individual bvecs might not be logical block aligned. Round down the
326 * split size so that each bio is properly block size aligned, even if
327 * we do not use the full hardware limits.
328 */
329 bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
330
331 /*
332 * Bio splitting may cause subtle trouble such as hang when doing sync
333 * iopoll in direct IO routine. Given performance gain of iopoll for
334 * big IO can be trival, disable iopoll when split needed.
335 */
336 bio_clear_polled(bio);
337 return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
338}
339
340/**
341 * __bio_split_to_limits - split a bio to fit the queue limits
342 * @bio: bio to be split
343 * @lim: queue limits to split based on
344 * @nr_segs: returns the number of segments in the returned bio
345 *
346 * Check if @bio needs splitting based on the queue limits, and if so split off
347 * a bio fitting the limits from the beginning of @bio and return it. @bio is
348 * shortened to the remainder and re-submitted.
349 *
350 * The split bio is allocated from @q->bio_split, which is provided by the
351 * block layer.
352 */
353struct bio *__bio_split_to_limits(struct bio *bio,
354 const struct queue_limits *lim,
355 unsigned int *nr_segs)
356{
357 struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
358 struct bio *split;
359
360 switch (bio_op(bio)) {
361 case REQ_OP_DISCARD:
362 case REQ_OP_SECURE_ERASE:
363 split = bio_split_discard(bio, lim, nr_segs, bs);
364 break;
365 case REQ_OP_WRITE_ZEROES:
366 split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
367 break;
368 default:
369 split = bio_split_rw(bio, lim, nr_segs, bs,
370 get_max_io_size(bio, lim) << SECTOR_SHIFT);
371 if (IS_ERR(split))
372 return NULL;
373 break;
374 }
375
376 if (split) {
377 /* there isn't chance to merge the split bio */
378 split->bi_opf |= REQ_NOMERGE;
379
380 blkcg_bio_issue_init(split);
381 bio_chain(split, bio);
382 trace_block_split(split, bio->bi_iter.bi_sector);
383 submit_bio_noacct(bio);
384 return split;
385 }
386 return bio;
387}
388
389/**
390 * bio_split_to_limits - split a bio to fit the queue limits
391 * @bio: bio to be split
392 *
393 * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
394 * if so split off a bio fitting the limits from the beginning of @bio and
395 * return it. @bio is shortened to the remainder and re-submitted.
396 *
397 * The split bio is allocated from @q->bio_split, which is provided by the
398 * block layer.
399 */
400struct bio *bio_split_to_limits(struct bio *bio)
401{
402 const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
403 unsigned int nr_segs;
404
405 if (bio_may_exceed_limits(bio, lim))
406 return __bio_split_to_limits(bio, lim, &nr_segs);
407 return bio;
408}
409EXPORT_SYMBOL(bio_split_to_limits);
410
411unsigned int blk_recalc_rq_segments(struct request *rq)
412{
413 unsigned int nr_phys_segs = 0;
414 unsigned int bytes = 0;
415 struct req_iterator iter;
416 struct bio_vec bv;
417
418 if (!rq->bio)
419 return 0;
420
421 switch (bio_op(rq->bio)) {
422 case REQ_OP_DISCARD:
423 case REQ_OP_SECURE_ERASE:
424 if (queue_max_discard_segments(rq->q) > 1) {
425 struct bio *bio = rq->bio;
426
427 for_each_bio(bio)
428 nr_phys_segs++;
429 return nr_phys_segs;
430 }
431 return 1;
432 case REQ_OP_WRITE_ZEROES:
433 return 0;
434 default:
435 break;
436 }
437
438 rq_for_each_bvec(bv, rq, iter)
439 bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
440 UINT_MAX, UINT_MAX);
441 return nr_phys_segs;
442}
443
444static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
445 struct scatterlist *sglist)
446{
447 if (!*sg)
448 return sglist;
449
450 /*
451 * If the driver previously mapped a shorter list, we could see a
452 * termination bit prematurely unless it fully inits the sg table
453 * on each mapping. We KNOW that there must be more entries here
454 * or the driver would be buggy, so force clear the termination bit
455 * to avoid doing a full sg_init_table() in drivers for each command.
456 */
457 sg_unmark_end(*sg);
458 return sg_next(*sg);
459}
460
461static unsigned blk_bvec_map_sg(struct request_queue *q,
462 struct bio_vec *bvec, struct scatterlist *sglist,
463 struct scatterlist **sg)
464{
465 unsigned nbytes = bvec->bv_len;
466 unsigned nsegs = 0, total = 0;
467
468 while (nbytes > 0) {
469 unsigned offset = bvec->bv_offset + total;
470 unsigned len = min(get_max_segment_size(&q->limits,
471 bvec->bv_page, offset), nbytes);
472 struct page *page = bvec->bv_page;
473
474 /*
475 * Unfortunately a fair number of drivers barf on scatterlists
476 * that have an offset larger than PAGE_SIZE, despite other
477 * subsystems dealing with that invariant just fine. For now
478 * stick to the legacy format where we never present those from
479 * the block layer, but the code below should be removed once
480 * these offenders (mostly MMC/SD drivers) are fixed.
481 */
482 page += (offset >> PAGE_SHIFT);
483 offset &= ~PAGE_MASK;
484
485 *sg = blk_next_sg(sg, sglist);
486 sg_set_page(*sg, page, len, offset);
487
488 total += len;
489 nbytes -= len;
490 nsegs++;
491 }
492
493 return nsegs;
494}
495
496static inline int __blk_bvec_map_sg(struct bio_vec bv,
497 struct scatterlist *sglist, struct scatterlist **sg)
498{
499 *sg = blk_next_sg(sg, sglist);
500 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
501 return 1;
502}
503
504/* only try to merge bvecs into one sg if they are from two bios */
505static inline bool
506__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
507 struct bio_vec *bvprv, struct scatterlist **sg)
508{
509
510 int nbytes = bvec->bv_len;
511
512 if (!*sg)
513 return false;
514
515 if ((*sg)->length + nbytes > queue_max_segment_size(q))
516 return false;
517
518 if (!biovec_phys_mergeable(q, bvprv, bvec))
519 return false;
520
521 (*sg)->length += nbytes;
522
523 return true;
524}
525
526static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
527 struct scatterlist *sglist,
528 struct scatterlist **sg)
529{
530 struct bio_vec bvec, bvprv = { NULL };
531 struct bvec_iter iter;
532 int nsegs = 0;
533 bool new_bio = false;
534
535 for_each_bio(bio) {
536 bio_for_each_bvec(bvec, bio, iter) {
537 /*
538 * Only try to merge bvecs from two bios given we
539 * have done bio internal merge when adding pages
540 * to bio
541 */
542 if (new_bio &&
543 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
544 goto next_bvec;
545
546 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
547 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
548 else
549 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
550 next_bvec:
551 new_bio = false;
552 }
553 if (likely(bio->bi_iter.bi_size)) {
554 bvprv = bvec;
555 new_bio = true;
556 }
557 }
558
559 return nsegs;
560}
561
562/*
563 * map a request to scatterlist, return number of sg entries setup. Caller
564 * must make sure sg can hold rq->nr_phys_segments entries
565 */
566int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
567 struct scatterlist *sglist, struct scatterlist **last_sg)
568{
569 int nsegs = 0;
570
571 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
572 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
573 else if (rq->bio)
574 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
575
576 if (*last_sg)
577 sg_mark_end(*last_sg);
578
579 /*
580 * Something must have been wrong if the figured number of
581 * segment is bigger than number of req's physical segments
582 */
583 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
584
585 return nsegs;
586}
587EXPORT_SYMBOL(__blk_rq_map_sg);
588
589static inline unsigned int blk_rq_get_max_segments(struct request *rq)
590{
591 if (req_op(rq) == REQ_OP_DISCARD)
592 return queue_max_discard_segments(rq->q);
593 return queue_max_segments(rq->q);
594}
595
596static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
597 sector_t offset)
598{
599 struct request_queue *q = rq->q;
600 unsigned int max_sectors;
601
602 if (blk_rq_is_passthrough(rq))
603 return q->limits.max_hw_sectors;
604
605 max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
606 if (!q->limits.chunk_sectors ||
607 req_op(rq) == REQ_OP_DISCARD ||
608 req_op(rq) == REQ_OP_SECURE_ERASE)
609 return max_sectors;
610 return min(max_sectors,
611 blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
612}
613
614static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
615 unsigned int nr_phys_segs)
616{
617 if (!blk_cgroup_mergeable(req, bio))
618 goto no_merge;
619
620 if (blk_integrity_merge_bio(req->q, req, bio) == false)
621 goto no_merge;
622
623 /* discard request merge won't add new segment */
624 if (req_op(req) == REQ_OP_DISCARD)
625 return 1;
626
627 if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
628 goto no_merge;
629
630 /*
631 * This will form the start of a new hw segment. Bump both
632 * counters.
633 */
634 req->nr_phys_segments += nr_phys_segs;
635 return 1;
636
637no_merge:
638 req_set_nomerge(req->q, req);
639 return 0;
640}
641
642int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
643{
644 if (req_gap_back_merge(req, bio))
645 return 0;
646 if (blk_integrity_rq(req) &&
647 integrity_req_gap_back_merge(req, bio))
648 return 0;
649 if (!bio_crypt_ctx_back_mergeable(req, bio))
650 return 0;
651 if (blk_rq_sectors(req) + bio_sectors(bio) >
652 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
653 req_set_nomerge(req->q, req);
654 return 0;
655 }
656
657 return ll_new_hw_segment(req, bio, nr_segs);
658}
659
660static int ll_front_merge_fn(struct request *req, struct bio *bio,
661 unsigned int nr_segs)
662{
663 if (req_gap_front_merge(req, bio))
664 return 0;
665 if (blk_integrity_rq(req) &&
666 integrity_req_gap_front_merge(req, bio))
667 return 0;
668 if (!bio_crypt_ctx_front_mergeable(req, bio))
669 return 0;
670 if (blk_rq_sectors(req) + bio_sectors(bio) >
671 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
672 req_set_nomerge(req->q, req);
673 return 0;
674 }
675
676 return ll_new_hw_segment(req, bio, nr_segs);
677}
678
679static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
680 struct request *next)
681{
682 unsigned short segments = blk_rq_nr_discard_segments(req);
683
684 if (segments >= queue_max_discard_segments(q))
685 goto no_merge;
686 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
687 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
688 goto no_merge;
689
690 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
691 return true;
692no_merge:
693 req_set_nomerge(q, req);
694 return false;
695}
696
697static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
698 struct request *next)
699{
700 int total_phys_segments;
701
702 if (req_gap_back_merge(req, next->bio))
703 return 0;
704
705 /*
706 * Will it become too large?
707 */
708 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
709 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
710 return 0;
711
712 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
713 if (total_phys_segments > blk_rq_get_max_segments(req))
714 return 0;
715
716 if (!blk_cgroup_mergeable(req, next->bio))
717 return 0;
718
719 if (blk_integrity_merge_rq(q, req, next) == false)
720 return 0;
721
722 if (!bio_crypt_ctx_merge_rq(req, next))
723 return 0;
724
725 /* Merge is OK... */
726 req->nr_phys_segments = total_phys_segments;
727 return 1;
728}
729
730/**
731 * blk_rq_set_mixed_merge - mark a request as mixed merge
732 * @rq: request to mark as mixed merge
733 *
734 * Description:
735 * @rq is about to be mixed merged. Make sure the attributes
736 * which can be mixed are set in each bio and mark @rq as mixed
737 * merged.
738 */
739void blk_rq_set_mixed_merge(struct request *rq)
740{
741 blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
742 struct bio *bio;
743
744 if (rq->rq_flags & RQF_MIXED_MERGE)
745 return;
746
747 /*
748 * @rq will no longer represent mixable attributes for all the
749 * contained bios. It will just track those of the first one.
750 * Distributes the attributs to each bio.
751 */
752 for (bio = rq->bio; bio; bio = bio->bi_next) {
753 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
754 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
755 bio->bi_opf |= ff;
756 }
757 rq->rq_flags |= RQF_MIXED_MERGE;
758}
759
760static void blk_account_io_merge_request(struct request *req)
761{
762 if (blk_do_io_stat(req)) {
763 part_stat_lock();
764 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
765 part_stat_unlock();
766 }
767}
768
769static enum elv_merge blk_try_req_merge(struct request *req,
770 struct request *next)
771{
772 if (blk_discard_mergable(req))
773 return ELEVATOR_DISCARD_MERGE;
774 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
775 return ELEVATOR_BACK_MERGE;
776
777 return ELEVATOR_NO_MERGE;
778}
779
780/*
781 * For non-mq, this has to be called with the request spinlock acquired.
782 * For mq with scheduling, the appropriate queue wide lock should be held.
783 */
784static struct request *attempt_merge(struct request_queue *q,
785 struct request *req, struct request *next)
786{
787 if (!rq_mergeable(req) || !rq_mergeable(next))
788 return NULL;
789
790 if (req_op(req) != req_op(next))
791 return NULL;
792
793 if (rq_data_dir(req) != rq_data_dir(next))
794 return NULL;
795
796 if (req->ioprio != next->ioprio)
797 return NULL;
798
799 /*
800 * If we are allowed to merge, then append bio list
801 * from next to rq and release next. merge_requests_fn
802 * will have updated segment counts, update sector
803 * counts here. Handle DISCARDs separately, as they
804 * have separate settings.
805 */
806
807 switch (blk_try_req_merge(req, next)) {
808 case ELEVATOR_DISCARD_MERGE:
809 if (!req_attempt_discard_merge(q, req, next))
810 return NULL;
811 break;
812 case ELEVATOR_BACK_MERGE:
813 if (!ll_merge_requests_fn(q, req, next))
814 return NULL;
815 break;
816 default:
817 return NULL;
818 }
819
820 /*
821 * If failfast settings disagree or any of the two is already
822 * a mixed merge, mark both as mixed before proceeding. This
823 * makes sure that all involved bios have mixable attributes
824 * set properly.
825 */
826 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
827 (req->cmd_flags & REQ_FAILFAST_MASK) !=
828 (next->cmd_flags & REQ_FAILFAST_MASK)) {
829 blk_rq_set_mixed_merge(req);
830 blk_rq_set_mixed_merge(next);
831 }
832
833 /*
834 * At this point we have either done a back merge or front merge. We
835 * need the smaller start_time_ns of the merged requests to be the
836 * current request for accounting purposes.
837 */
838 if (next->start_time_ns < req->start_time_ns)
839 req->start_time_ns = next->start_time_ns;
840
841 req->biotail->bi_next = next->bio;
842 req->biotail = next->biotail;
843
844 req->__data_len += blk_rq_bytes(next);
845
846 if (!blk_discard_mergable(req))
847 elv_merge_requests(q, req, next);
848
849 /*
850 * 'next' is going away, so update stats accordingly
851 */
852 blk_account_io_merge_request(next);
853
854 trace_block_rq_merge(next);
855
856 /*
857 * ownership of bio passed from next to req, return 'next' for
858 * the caller to free
859 */
860 next->bio = NULL;
861 return next;
862}
863
864static struct request *attempt_back_merge(struct request_queue *q,
865 struct request *rq)
866{
867 struct request *next = elv_latter_request(q, rq);
868
869 if (next)
870 return attempt_merge(q, rq, next);
871
872 return NULL;
873}
874
875static struct request *attempt_front_merge(struct request_queue *q,
876 struct request *rq)
877{
878 struct request *prev = elv_former_request(q, rq);
879
880 if (prev)
881 return attempt_merge(q, prev, rq);
882
883 return NULL;
884}
885
886/*
887 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
888 * otherwise. The caller is responsible for freeing 'next' if the merge
889 * happened.
890 */
891bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
892 struct request *next)
893{
894 return attempt_merge(q, rq, next);
895}
896
897bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
898{
899 if (!rq_mergeable(rq) || !bio_mergeable(bio))
900 return false;
901
902 if (req_op(rq) != bio_op(bio))
903 return false;
904
905 /* different data direction or already started, don't merge */
906 if (bio_data_dir(bio) != rq_data_dir(rq))
907 return false;
908
909 /* don't merge across cgroup boundaries */
910 if (!blk_cgroup_mergeable(rq, bio))
911 return false;
912
913 /* only merge integrity protected bio into ditto rq */
914 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
915 return false;
916
917 /* Only merge if the crypt contexts are compatible */
918 if (!bio_crypt_rq_ctx_compatible(rq, bio))
919 return false;
920
921 if (rq->ioprio != bio_prio(bio))
922 return false;
923
924 return true;
925}
926
927enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
928{
929 if (blk_discard_mergable(rq))
930 return ELEVATOR_DISCARD_MERGE;
931 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
932 return ELEVATOR_BACK_MERGE;
933 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
934 return ELEVATOR_FRONT_MERGE;
935 return ELEVATOR_NO_MERGE;
936}
937
938static void blk_account_io_merge_bio(struct request *req)
939{
940 if (!blk_do_io_stat(req))
941 return;
942
943 part_stat_lock();
944 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
945 part_stat_unlock();
946}
947
948enum bio_merge_status {
949 BIO_MERGE_OK,
950 BIO_MERGE_NONE,
951 BIO_MERGE_FAILED,
952};
953
954static enum bio_merge_status bio_attempt_back_merge(struct request *req,
955 struct bio *bio, unsigned int nr_segs)
956{
957 const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
958
959 if (!ll_back_merge_fn(req, bio, nr_segs))
960 return BIO_MERGE_FAILED;
961
962 trace_block_bio_backmerge(bio);
963 rq_qos_merge(req->q, req, bio);
964
965 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
966 blk_rq_set_mixed_merge(req);
967
968 req->biotail->bi_next = bio;
969 req->biotail = bio;
970 req->__data_len += bio->bi_iter.bi_size;
971
972 bio_crypt_free_ctx(bio);
973
974 blk_account_io_merge_bio(req);
975 return BIO_MERGE_OK;
976}
977
978static enum bio_merge_status bio_attempt_front_merge(struct request *req,
979 struct bio *bio, unsigned int nr_segs)
980{
981 const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
982
983 if (!ll_front_merge_fn(req, bio, nr_segs))
984 return BIO_MERGE_FAILED;
985
986 trace_block_bio_frontmerge(bio);
987 rq_qos_merge(req->q, req, bio);
988
989 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
990 blk_rq_set_mixed_merge(req);
991
992 bio->bi_next = req->bio;
993 req->bio = bio;
994
995 req->__sector = bio->bi_iter.bi_sector;
996 req->__data_len += bio->bi_iter.bi_size;
997
998 bio_crypt_do_front_merge(req, bio);
999
1000 blk_account_io_merge_bio(req);
1001 return BIO_MERGE_OK;
1002}
1003
1004static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
1005 struct request *req, struct bio *bio)
1006{
1007 unsigned short segments = blk_rq_nr_discard_segments(req);
1008
1009 if (segments >= queue_max_discard_segments(q))
1010 goto no_merge;
1011 if (blk_rq_sectors(req) + bio_sectors(bio) >
1012 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1013 goto no_merge;
1014
1015 rq_qos_merge(q, req, bio);
1016
1017 req->biotail->bi_next = bio;
1018 req->biotail = bio;
1019 req->__data_len += bio->bi_iter.bi_size;
1020 req->nr_phys_segments = segments + 1;
1021
1022 blk_account_io_merge_bio(req);
1023 return BIO_MERGE_OK;
1024no_merge:
1025 req_set_nomerge(q, req);
1026 return BIO_MERGE_FAILED;
1027}
1028
1029static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1030 struct request *rq,
1031 struct bio *bio,
1032 unsigned int nr_segs,
1033 bool sched_allow_merge)
1034{
1035 if (!blk_rq_merge_ok(rq, bio))
1036 return BIO_MERGE_NONE;
1037
1038 switch (blk_try_merge(rq, bio)) {
1039 case ELEVATOR_BACK_MERGE:
1040 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1041 return bio_attempt_back_merge(rq, bio, nr_segs);
1042 break;
1043 case ELEVATOR_FRONT_MERGE:
1044 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1045 return bio_attempt_front_merge(rq, bio, nr_segs);
1046 break;
1047 case ELEVATOR_DISCARD_MERGE:
1048 return bio_attempt_discard_merge(q, rq, bio);
1049 default:
1050 return BIO_MERGE_NONE;
1051 }
1052
1053 return BIO_MERGE_FAILED;
1054}
1055
1056/**
1057 * blk_attempt_plug_merge - try to merge with %current's plugged list
1058 * @q: request_queue new bio is being queued at
1059 * @bio: new bio being queued
1060 * @nr_segs: number of segments in @bio
1061 * from the passed in @q already in the plug list
1062 *
1063 * Determine whether @bio being queued on @q can be merged with the previous
1064 * request on %current's plugged list. Returns %true if merge was successful,
1065 * otherwise %false.
1066 *
1067 * Plugging coalesces IOs from the same issuer for the same purpose without
1068 * going through @q->queue_lock. As such it's more of an issuing mechanism
1069 * than scheduling, and the request, while may have elvpriv data, is not
1070 * added on the elevator at this point. In addition, we don't have
1071 * reliable access to the elevator outside queue lock. Only check basic
1072 * merging parameters without querying the elevator.
1073 *
1074 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1075 */
1076bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1077 unsigned int nr_segs)
1078{
1079 struct blk_plug *plug;
1080 struct request *rq;
1081
1082 plug = blk_mq_plug(bio);
1083 if (!plug || rq_list_empty(plug->mq_list))
1084 return false;
1085
1086 rq_list_for_each(&plug->mq_list, rq) {
1087 if (rq->q == q) {
1088 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1089 BIO_MERGE_OK)
1090 return true;
1091 break;
1092 }
1093
1094 /*
1095 * Only keep iterating plug list for merges if we have multiple
1096 * queues
1097 */
1098 if (!plug->multiple_queues)
1099 break;
1100 }
1101 return false;
1102}
1103
1104/*
1105 * Iterate list of requests and see if we can merge this bio with any
1106 * of them.
1107 */
1108bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1109 struct bio *bio, unsigned int nr_segs)
1110{
1111 struct request *rq;
1112 int checked = 8;
1113
1114 list_for_each_entry_reverse(rq, list, queuelist) {
1115 if (!checked--)
1116 break;
1117
1118 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1119 case BIO_MERGE_NONE:
1120 continue;
1121 case BIO_MERGE_OK:
1122 return true;
1123 case BIO_MERGE_FAILED:
1124 return false;
1125 }
1126
1127 }
1128
1129 return false;
1130}
1131EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1132
1133bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1134 unsigned int nr_segs, struct request **merged_request)
1135{
1136 struct request *rq;
1137
1138 switch (elv_merge(q, &rq, bio)) {
1139 case ELEVATOR_BACK_MERGE:
1140 if (!blk_mq_sched_allow_merge(q, rq, bio))
1141 return false;
1142 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1143 return false;
1144 *merged_request = attempt_back_merge(q, rq);
1145 if (!*merged_request)
1146 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1147 return true;
1148 case ELEVATOR_FRONT_MERGE:
1149 if (!blk_mq_sched_allow_merge(q, rq, bio))
1150 return false;
1151 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1152 return false;
1153 *merged_request = attempt_front_merge(q, rq);
1154 if (!*merged_request)
1155 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1156 return true;
1157 case ELEVATOR_DISCARD_MERGE:
1158 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1159 default:
1160 return false;
1161 }
1162}
1163EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to segment and merge handling
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include <trace/events/block.h>
12
13#include "blk.h"
14#include "blk-rq-qos.h"
15
16static inline bool bio_will_gap(struct request_queue *q,
17 struct request *prev_rq, struct bio *prev, struct bio *next)
18{
19 struct bio_vec pb, nb;
20
21 if (!bio_has_data(prev) || !queue_virt_boundary(q))
22 return false;
23
24 /*
25 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
26 * is quite difficult to respect the sg gap limit. We work hard to
27 * merge a huge number of small single bios in case of mkfs.
28 */
29 if (prev_rq)
30 bio_get_first_bvec(prev_rq->bio, &pb);
31 else
32 bio_get_first_bvec(prev, &pb);
33 if (pb.bv_offset & queue_virt_boundary(q))
34 return true;
35
36 /*
37 * We don't need to worry about the situation that the merged segment
38 * ends in unaligned virt boundary:
39 *
40 * - if 'pb' ends aligned, the merged segment ends aligned
41 * - if 'pb' ends unaligned, the next bio must include
42 * one single bvec of 'nb', otherwise the 'nb' can't
43 * merge with 'pb'
44 */
45 bio_get_last_bvec(prev, &pb);
46 bio_get_first_bvec(next, &nb);
47 if (biovec_phys_mergeable(q, &pb, &nb))
48 return false;
49 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
50}
51
52static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
53{
54 return bio_will_gap(req->q, req, req->biotail, bio);
55}
56
57static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
58{
59 return bio_will_gap(req->q, NULL, bio, req->bio);
60}
61
62static struct bio *blk_bio_discard_split(struct request_queue *q,
63 struct bio *bio,
64 struct bio_set *bs,
65 unsigned *nsegs)
66{
67 unsigned int max_discard_sectors, granularity;
68 int alignment;
69 sector_t tmp;
70 unsigned split_sectors;
71
72 *nsegs = 1;
73
74 /* Zero-sector (unknown) and one-sector granularities are the same. */
75 granularity = max(q->limits.discard_granularity >> 9, 1U);
76
77 max_discard_sectors = min(q->limits.max_discard_sectors,
78 bio_allowed_max_sectors(q));
79 max_discard_sectors -= max_discard_sectors % granularity;
80
81 if (unlikely(!max_discard_sectors)) {
82 /* XXX: warn */
83 return NULL;
84 }
85
86 if (bio_sectors(bio) <= max_discard_sectors)
87 return NULL;
88
89 split_sectors = max_discard_sectors;
90
91 /*
92 * If the next starting sector would be misaligned, stop the discard at
93 * the previous aligned sector.
94 */
95 alignment = (q->limits.discard_alignment >> 9) % granularity;
96
97 tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
98 tmp = sector_div(tmp, granularity);
99
100 if (split_sectors > tmp)
101 split_sectors -= tmp;
102
103 return bio_split(bio, split_sectors, GFP_NOIO, bs);
104}
105
106static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
107 struct bio *bio, struct bio_set *bs, unsigned *nsegs)
108{
109 *nsegs = 0;
110
111 if (!q->limits.max_write_zeroes_sectors)
112 return NULL;
113
114 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
115 return NULL;
116
117 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
118}
119
120static struct bio *blk_bio_write_same_split(struct request_queue *q,
121 struct bio *bio,
122 struct bio_set *bs,
123 unsigned *nsegs)
124{
125 *nsegs = 1;
126
127 if (!q->limits.max_write_same_sectors)
128 return NULL;
129
130 if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
131 return NULL;
132
133 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
134}
135
136/*
137 * Return the maximum number of sectors from the start of a bio that may be
138 * submitted as a single request to a block device. If enough sectors remain,
139 * align the end to the physical block size. Otherwise align the end to the
140 * logical block size. This approach minimizes the number of non-aligned
141 * requests that are submitted to a block device if the start of a bio is not
142 * aligned to a physical block boundary.
143 */
144static inline unsigned get_max_io_size(struct request_queue *q,
145 struct bio *bio)
146{
147 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
148 unsigned max_sectors = sectors;
149 unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
150 unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
151 unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
152
153 max_sectors += start_offset;
154 max_sectors &= ~(pbs - 1);
155 if (max_sectors > start_offset)
156 return max_sectors - start_offset;
157
158 return sectors & ~(lbs - 1);
159}
160
161static inline unsigned get_max_segment_size(const struct request_queue *q,
162 struct page *start_page,
163 unsigned long offset)
164{
165 unsigned long mask = queue_segment_boundary(q);
166
167 offset = mask & (page_to_phys(start_page) + offset);
168
169 /*
170 * overflow may be triggered in case of zero page physical address
171 * on 32bit arch, use queue's max segment size when that happens.
172 */
173 return min_not_zero(mask - offset + 1,
174 (unsigned long)queue_max_segment_size(q));
175}
176
177/**
178 * bvec_split_segs - verify whether or not a bvec should be split in the middle
179 * @q: [in] request queue associated with the bio associated with @bv
180 * @bv: [in] bvec to examine
181 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
182 * by the number of segments from @bv that may be appended to that
183 * bio without exceeding @max_segs
184 * @sectors: [in,out] Number of sectors in the bio being built. Incremented
185 * by the number of sectors from @bv that may be appended to that
186 * bio without exceeding @max_sectors
187 * @max_segs: [in] upper bound for *@nsegs
188 * @max_sectors: [in] upper bound for *@sectors
189 *
190 * When splitting a bio, it can happen that a bvec is encountered that is too
191 * big to fit in a single segment and hence that it has to be split in the
192 * middle. This function verifies whether or not that should happen. The value
193 * %true is returned if and only if appending the entire @bv to a bio with
194 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
195 * the block driver.
196 */
197static bool bvec_split_segs(const struct request_queue *q,
198 const struct bio_vec *bv, unsigned *nsegs,
199 unsigned *sectors, unsigned max_segs,
200 unsigned max_sectors)
201{
202 unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
203 unsigned len = min(bv->bv_len, max_len);
204 unsigned total_len = 0;
205 unsigned seg_size = 0;
206
207 while (len && *nsegs < max_segs) {
208 seg_size = get_max_segment_size(q, bv->bv_page,
209 bv->bv_offset + total_len);
210 seg_size = min(seg_size, len);
211
212 (*nsegs)++;
213 total_len += seg_size;
214 len -= seg_size;
215
216 if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
217 break;
218 }
219
220 *sectors += total_len >> 9;
221
222 /* tell the caller to split the bvec if it is too big to fit */
223 return len > 0 || bv->bv_len > max_len;
224}
225
226/**
227 * blk_bio_segment_split - split a bio in two bios
228 * @q: [in] request queue pointer
229 * @bio: [in] bio to be split
230 * @bs: [in] bio set to allocate the clone from
231 * @segs: [out] number of segments in the bio with the first half of the sectors
232 *
233 * Clone @bio, update the bi_iter of the clone to represent the first sectors
234 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
235 * following is guaranteed for the cloned bio:
236 * - That it has at most get_max_io_size(@q, @bio) sectors.
237 * - That it has at most queue_max_segments(@q) segments.
238 *
239 * Except for discard requests the cloned bio will point at the bi_io_vec of
240 * the original bio. It is the responsibility of the caller to ensure that the
241 * original bio is not freed before the cloned bio. The caller is also
242 * responsible for ensuring that @bs is only destroyed after processing of the
243 * split bio has finished.
244 */
245static struct bio *blk_bio_segment_split(struct request_queue *q,
246 struct bio *bio,
247 struct bio_set *bs,
248 unsigned *segs)
249{
250 struct bio_vec bv, bvprv, *bvprvp = NULL;
251 struct bvec_iter iter;
252 unsigned nsegs = 0, sectors = 0;
253 const unsigned max_sectors = get_max_io_size(q, bio);
254 const unsigned max_segs = queue_max_segments(q);
255
256 bio_for_each_bvec(bv, bio, iter) {
257 /*
258 * If the queue doesn't support SG gaps and adding this
259 * offset would create a gap, disallow it.
260 */
261 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
262 goto split;
263
264 if (nsegs < max_segs &&
265 sectors + (bv.bv_len >> 9) <= max_sectors &&
266 bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
267 nsegs++;
268 sectors += bv.bv_len >> 9;
269 } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs,
270 max_sectors)) {
271 goto split;
272 }
273
274 bvprv = bv;
275 bvprvp = &bvprv;
276 }
277
278 *segs = nsegs;
279 return NULL;
280split:
281 *segs = nsegs;
282
283 /*
284 * Bio splitting may cause subtle trouble such as hang when doing sync
285 * iopoll in direct IO routine. Given performance gain of iopoll for
286 * big IO can be trival, disable iopoll when split needed.
287 */
288 bio->bi_opf &= ~REQ_HIPRI;
289
290 return bio_split(bio, sectors, GFP_NOIO, bs);
291}
292
293/**
294 * __blk_queue_split - split a bio and submit the second half
295 * @bio: [in, out] bio to be split
296 * @nr_segs: [out] number of segments in the first bio
297 *
298 * Split a bio into two bios, chain the two bios, submit the second half and
299 * store a pointer to the first half in *@bio. If the second bio is still too
300 * big it will be split by a recursive call to this function. Since this
301 * function may allocate a new bio from q->bio_split, it is the responsibility
302 * of the caller to ensure that q->bio_split is only released after processing
303 * of the split bio has finished.
304 */
305void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
306{
307 struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
308 struct bio *split = NULL;
309
310 switch (bio_op(*bio)) {
311 case REQ_OP_DISCARD:
312 case REQ_OP_SECURE_ERASE:
313 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
314 break;
315 case REQ_OP_WRITE_ZEROES:
316 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
317 nr_segs);
318 break;
319 case REQ_OP_WRITE_SAME:
320 split = blk_bio_write_same_split(q, *bio, &q->bio_split,
321 nr_segs);
322 break;
323 default:
324 /*
325 * All drivers must accept single-segments bios that are <=
326 * PAGE_SIZE. This is a quick and dirty check that relies on
327 * the fact that bi_io_vec[0] is always valid if a bio has data.
328 * The check might lead to occasional false negatives when bios
329 * are cloned, but compared to the performance impact of cloned
330 * bios themselves the loop below doesn't matter anyway.
331 */
332 if (!q->limits.chunk_sectors &&
333 (*bio)->bi_vcnt == 1 &&
334 ((*bio)->bi_io_vec[0].bv_len +
335 (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
336 *nr_segs = 1;
337 break;
338 }
339 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
340 break;
341 }
342
343 if (split) {
344 /* there isn't chance to merge the splitted bio */
345 split->bi_opf |= REQ_NOMERGE;
346
347 bio_chain(split, *bio);
348 trace_block_split(split, (*bio)->bi_iter.bi_sector);
349 submit_bio_noacct(*bio);
350 *bio = split;
351
352 blk_throtl_charge_bio_split(*bio);
353 }
354}
355
356/**
357 * blk_queue_split - split a bio and submit the second half
358 * @bio: [in, out] bio to be split
359 *
360 * Split a bio into two bios, chains the two bios, submit the second half and
361 * store a pointer to the first half in *@bio. Since this function may allocate
362 * a new bio from q->bio_split, it is the responsibility of the caller to ensure
363 * that q->bio_split is only released after processing of the split bio has
364 * finished.
365 */
366void blk_queue_split(struct bio **bio)
367{
368 unsigned int nr_segs;
369
370 __blk_queue_split(bio, &nr_segs);
371}
372EXPORT_SYMBOL(blk_queue_split);
373
374unsigned int blk_recalc_rq_segments(struct request *rq)
375{
376 unsigned int nr_phys_segs = 0;
377 unsigned int nr_sectors = 0;
378 struct req_iterator iter;
379 struct bio_vec bv;
380
381 if (!rq->bio)
382 return 0;
383
384 switch (bio_op(rq->bio)) {
385 case REQ_OP_DISCARD:
386 case REQ_OP_SECURE_ERASE:
387 if (queue_max_discard_segments(rq->q) > 1) {
388 struct bio *bio = rq->bio;
389
390 for_each_bio(bio)
391 nr_phys_segs++;
392 return nr_phys_segs;
393 }
394 return 1;
395 case REQ_OP_WRITE_ZEROES:
396 return 0;
397 case REQ_OP_WRITE_SAME:
398 return 1;
399 }
400
401 rq_for_each_bvec(bv, rq, iter)
402 bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
403 UINT_MAX, UINT_MAX);
404 return nr_phys_segs;
405}
406
407static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
408 struct scatterlist *sglist)
409{
410 if (!*sg)
411 return sglist;
412
413 /*
414 * If the driver previously mapped a shorter list, we could see a
415 * termination bit prematurely unless it fully inits the sg table
416 * on each mapping. We KNOW that there must be more entries here
417 * or the driver would be buggy, so force clear the termination bit
418 * to avoid doing a full sg_init_table() in drivers for each command.
419 */
420 sg_unmark_end(*sg);
421 return sg_next(*sg);
422}
423
424static unsigned blk_bvec_map_sg(struct request_queue *q,
425 struct bio_vec *bvec, struct scatterlist *sglist,
426 struct scatterlist **sg)
427{
428 unsigned nbytes = bvec->bv_len;
429 unsigned nsegs = 0, total = 0;
430
431 while (nbytes > 0) {
432 unsigned offset = bvec->bv_offset + total;
433 unsigned len = min(get_max_segment_size(q, bvec->bv_page,
434 offset), nbytes);
435 struct page *page = bvec->bv_page;
436
437 /*
438 * Unfortunately a fair number of drivers barf on scatterlists
439 * that have an offset larger than PAGE_SIZE, despite other
440 * subsystems dealing with that invariant just fine. For now
441 * stick to the legacy format where we never present those from
442 * the block layer, but the code below should be removed once
443 * these offenders (mostly MMC/SD drivers) are fixed.
444 */
445 page += (offset >> PAGE_SHIFT);
446 offset &= ~PAGE_MASK;
447
448 *sg = blk_next_sg(sg, sglist);
449 sg_set_page(*sg, page, len, offset);
450
451 total += len;
452 nbytes -= len;
453 nsegs++;
454 }
455
456 return nsegs;
457}
458
459static inline int __blk_bvec_map_sg(struct bio_vec bv,
460 struct scatterlist *sglist, struct scatterlist **sg)
461{
462 *sg = blk_next_sg(sg, sglist);
463 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
464 return 1;
465}
466
467/* only try to merge bvecs into one sg if they are from two bios */
468static inline bool
469__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
470 struct bio_vec *bvprv, struct scatterlist **sg)
471{
472
473 int nbytes = bvec->bv_len;
474
475 if (!*sg)
476 return false;
477
478 if ((*sg)->length + nbytes > queue_max_segment_size(q))
479 return false;
480
481 if (!biovec_phys_mergeable(q, bvprv, bvec))
482 return false;
483
484 (*sg)->length += nbytes;
485
486 return true;
487}
488
489static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
490 struct scatterlist *sglist,
491 struct scatterlist **sg)
492{
493 struct bio_vec bvec, bvprv = { NULL };
494 struct bvec_iter iter;
495 int nsegs = 0;
496 bool new_bio = false;
497
498 for_each_bio(bio) {
499 bio_for_each_bvec(bvec, bio, iter) {
500 /*
501 * Only try to merge bvecs from two bios given we
502 * have done bio internal merge when adding pages
503 * to bio
504 */
505 if (new_bio &&
506 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
507 goto next_bvec;
508
509 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
510 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
511 else
512 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
513 next_bvec:
514 new_bio = false;
515 }
516 if (likely(bio->bi_iter.bi_size)) {
517 bvprv = bvec;
518 new_bio = true;
519 }
520 }
521
522 return nsegs;
523}
524
525/*
526 * map a request to scatterlist, return number of sg entries setup. Caller
527 * must make sure sg can hold rq->nr_phys_segments entries
528 */
529int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
530 struct scatterlist *sglist, struct scatterlist **last_sg)
531{
532 int nsegs = 0;
533
534 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
535 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
536 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
537 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
538 else if (rq->bio)
539 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
540
541 if (*last_sg)
542 sg_mark_end(*last_sg);
543
544 /*
545 * Something must have been wrong if the figured number of
546 * segment is bigger than number of req's physical segments
547 */
548 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
549
550 return nsegs;
551}
552EXPORT_SYMBOL(__blk_rq_map_sg);
553
554static inline unsigned int blk_rq_get_max_segments(struct request *rq)
555{
556 if (req_op(rq) == REQ_OP_DISCARD)
557 return queue_max_discard_segments(rq->q);
558 return queue_max_segments(rq->q);
559}
560
561static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
562 unsigned int nr_phys_segs)
563{
564 if (blk_integrity_merge_bio(req->q, req, bio) == false)
565 goto no_merge;
566
567 /* discard request merge won't add new segment */
568 if (req_op(req) == REQ_OP_DISCARD)
569 return 1;
570
571 if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
572 goto no_merge;
573
574 /*
575 * This will form the start of a new hw segment. Bump both
576 * counters.
577 */
578 req->nr_phys_segments += nr_phys_segs;
579 return 1;
580
581no_merge:
582 req_set_nomerge(req->q, req);
583 return 0;
584}
585
586int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
587{
588 if (req_gap_back_merge(req, bio))
589 return 0;
590 if (blk_integrity_rq(req) &&
591 integrity_req_gap_back_merge(req, bio))
592 return 0;
593 if (!bio_crypt_ctx_back_mergeable(req, bio))
594 return 0;
595 if (blk_rq_sectors(req) + bio_sectors(bio) >
596 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
597 req_set_nomerge(req->q, req);
598 return 0;
599 }
600
601 return ll_new_hw_segment(req, bio, nr_segs);
602}
603
604static int ll_front_merge_fn(struct request *req, struct bio *bio,
605 unsigned int nr_segs)
606{
607 if (req_gap_front_merge(req, bio))
608 return 0;
609 if (blk_integrity_rq(req) &&
610 integrity_req_gap_front_merge(req, bio))
611 return 0;
612 if (!bio_crypt_ctx_front_mergeable(req, bio))
613 return 0;
614 if (blk_rq_sectors(req) + bio_sectors(bio) >
615 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
616 req_set_nomerge(req->q, req);
617 return 0;
618 }
619
620 return ll_new_hw_segment(req, bio, nr_segs);
621}
622
623static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
624 struct request *next)
625{
626 unsigned short segments = blk_rq_nr_discard_segments(req);
627
628 if (segments >= queue_max_discard_segments(q))
629 goto no_merge;
630 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
631 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
632 goto no_merge;
633
634 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
635 return true;
636no_merge:
637 req_set_nomerge(q, req);
638 return false;
639}
640
641static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
642 struct request *next)
643{
644 int total_phys_segments;
645
646 if (req_gap_back_merge(req, next->bio))
647 return 0;
648
649 /*
650 * Will it become too large?
651 */
652 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
653 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
654 return 0;
655
656 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
657 if (total_phys_segments > blk_rq_get_max_segments(req))
658 return 0;
659
660 if (blk_integrity_merge_rq(q, req, next) == false)
661 return 0;
662
663 if (!bio_crypt_ctx_merge_rq(req, next))
664 return 0;
665
666 /* Merge is OK... */
667 req->nr_phys_segments = total_phys_segments;
668 return 1;
669}
670
671/**
672 * blk_rq_set_mixed_merge - mark a request as mixed merge
673 * @rq: request to mark as mixed merge
674 *
675 * Description:
676 * @rq is about to be mixed merged. Make sure the attributes
677 * which can be mixed are set in each bio and mark @rq as mixed
678 * merged.
679 */
680void blk_rq_set_mixed_merge(struct request *rq)
681{
682 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
683 struct bio *bio;
684
685 if (rq->rq_flags & RQF_MIXED_MERGE)
686 return;
687
688 /*
689 * @rq will no longer represent mixable attributes for all the
690 * contained bios. It will just track those of the first one.
691 * Distributes the attributs to each bio.
692 */
693 for (bio = rq->bio; bio; bio = bio->bi_next) {
694 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
695 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
696 bio->bi_opf |= ff;
697 }
698 rq->rq_flags |= RQF_MIXED_MERGE;
699}
700
701static void blk_account_io_merge_request(struct request *req)
702{
703 if (blk_do_io_stat(req)) {
704 part_stat_lock();
705 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
706 part_stat_unlock();
707 }
708}
709
710static enum elv_merge blk_try_req_merge(struct request *req,
711 struct request *next)
712{
713 if (blk_discard_mergable(req))
714 return ELEVATOR_DISCARD_MERGE;
715 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
716 return ELEVATOR_BACK_MERGE;
717
718 return ELEVATOR_NO_MERGE;
719}
720
721/*
722 * For non-mq, this has to be called with the request spinlock acquired.
723 * For mq with scheduling, the appropriate queue wide lock should be held.
724 */
725static struct request *attempt_merge(struct request_queue *q,
726 struct request *req, struct request *next)
727{
728 if (!rq_mergeable(req) || !rq_mergeable(next))
729 return NULL;
730
731 if (req_op(req) != req_op(next))
732 return NULL;
733
734 if (rq_data_dir(req) != rq_data_dir(next)
735 || req->rq_disk != next->rq_disk)
736 return NULL;
737
738 if (req_op(req) == REQ_OP_WRITE_SAME &&
739 !blk_write_same_mergeable(req->bio, next->bio))
740 return NULL;
741
742 /*
743 * Don't allow merge of different write hints, or for a hint with
744 * non-hint IO.
745 */
746 if (req->write_hint != next->write_hint)
747 return NULL;
748
749 if (req->ioprio != next->ioprio)
750 return NULL;
751
752 /*
753 * If we are allowed to merge, then append bio list
754 * from next to rq and release next. merge_requests_fn
755 * will have updated segment counts, update sector
756 * counts here. Handle DISCARDs separately, as they
757 * have separate settings.
758 */
759
760 switch (blk_try_req_merge(req, next)) {
761 case ELEVATOR_DISCARD_MERGE:
762 if (!req_attempt_discard_merge(q, req, next))
763 return NULL;
764 break;
765 case ELEVATOR_BACK_MERGE:
766 if (!ll_merge_requests_fn(q, req, next))
767 return NULL;
768 break;
769 default:
770 return NULL;
771 }
772
773 /*
774 * If failfast settings disagree or any of the two is already
775 * a mixed merge, mark both as mixed before proceeding. This
776 * makes sure that all involved bios have mixable attributes
777 * set properly.
778 */
779 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
780 (req->cmd_flags & REQ_FAILFAST_MASK) !=
781 (next->cmd_flags & REQ_FAILFAST_MASK)) {
782 blk_rq_set_mixed_merge(req);
783 blk_rq_set_mixed_merge(next);
784 }
785
786 /*
787 * At this point we have either done a back merge or front merge. We
788 * need the smaller start_time_ns of the merged requests to be the
789 * current request for accounting purposes.
790 */
791 if (next->start_time_ns < req->start_time_ns)
792 req->start_time_ns = next->start_time_ns;
793
794 req->biotail->bi_next = next->bio;
795 req->biotail = next->biotail;
796
797 req->__data_len += blk_rq_bytes(next);
798
799 if (!blk_discard_mergable(req))
800 elv_merge_requests(q, req, next);
801
802 /*
803 * 'next' is going away, so update stats accordingly
804 */
805 blk_account_io_merge_request(next);
806
807 trace_block_rq_merge(next);
808
809 /*
810 * ownership of bio passed from next to req, return 'next' for
811 * the caller to free
812 */
813 next->bio = NULL;
814 return next;
815}
816
817static struct request *attempt_back_merge(struct request_queue *q,
818 struct request *rq)
819{
820 struct request *next = elv_latter_request(q, rq);
821
822 if (next)
823 return attempt_merge(q, rq, next);
824
825 return NULL;
826}
827
828static struct request *attempt_front_merge(struct request_queue *q,
829 struct request *rq)
830{
831 struct request *prev = elv_former_request(q, rq);
832
833 if (prev)
834 return attempt_merge(q, prev, rq);
835
836 return NULL;
837}
838
839/*
840 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
841 * otherwise. The caller is responsible for freeing 'next' if the merge
842 * happened.
843 */
844bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
845 struct request *next)
846{
847 return attempt_merge(q, rq, next);
848}
849
850bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
851{
852 if (!rq_mergeable(rq) || !bio_mergeable(bio))
853 return false;
854
855 if (req_op(rq) != bio_op(bio))
856 return false;
857
858 /* different data direction or already started, don't merge */
859 if (bio_data_dir(bio) != rq_data_dir(rq))
860 return false;
861
862 /* must be same device */
863 if (rq->rq_disk != bio->bi_bdev->bd_disk)
864 return false;
865
866 /* only merge integrity protected bio into ditto rq */
867 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
868 return false;
869
870 /* Only merge if the crypt contexts are compatible */
871 if (!bio_crypt_rq_ctx_compatible(rq, bio))
872 return false;
873
874 /* must be using the same buffer */
875 if (req_op(rq) == REQ_OP_WRITE_SAME &&
876 !blk_write_same_mergeable(rq->bio, bio))
877 return false;
878
879 /*
880 * Don't allow merge of different write hints, or for a hint with
881 * non-hint IO.
882 */
883 if (rq->write_hint != bio->bi_write_hint)
884 return false;
885
886 if (rq->ioprio != bio_prio(bio))
887 return false;
888
889 return true;
890}
891
892enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
893{
894 if (blk_discard_mergable(rq))
895 return ELEVATOR_DISCARD_MERGE;
896 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
897 return ELEVATOR_BACK_MERGE;
898 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
899 return ELEVATOR_FRONT_MERGE;
900 return ELEVATOR_NO_MERGE;
901}
902
903static void blk_account_io_merge_bio(struct request *req)
904{
905 if (!blk_do_io_stat(req))
906 return;
907
908 part_stat_lock();
909 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
910 part_stat_unlock();
911}
912
913enum bio_merge_status {
914 BIO_MERGE_OK,
915 BIO_MERGE_NONE,
916 BIO_MERGE_FAILED,
917};
918
919static enum bio_merge_status bio_attempt_back_merge(struct request *req,
920 struct bio *bio, unsigned int nr_segs)
921{
922 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
923
924 if (!ll_back_merge_fn(req, bio, nr_segs))
925 return BIO_MERGE_FAILED;
926
927 trace_block_bio_backmerge(bio);
928 rq_qos_merge(req->q, req, bio);
929
930 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
931 blk_rq_set_mixed_merge(req);
932
933 req->biotail->bi_next = bio;
934 req->biotail = bio;
935 req->__data_len += bio->bi_iter.bi_size;
936
937 bio_crypt_free_ctx(bio);
938
939 blk_account_io_merge_bio(req);
940 return BIO_MERGE_OK;
941}
942
943static enum bio_merge_status bio_attempt_front_merge(struct request *req,
944 struct bio *bio, unsigned int nr_segs)
945{
946 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
947
948 if (!ll_front_merge_fn(req, bio, nr_segs))
949 return BIO_MERGE_FAILED;
950
951 trace_block_bio_frontmerge(bio);
952 rq_qos_merge(req->q, req, bio);
953
954 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
955 blk_rq_set_mixed_merge(req);
956
957 bio->bi_next = req->bio;
958 req->bio = bio;
959
960 req->__sector = bio->bi_iter.bi_sector;
961 req->__data_len += bio->bi_iter.bi_size;
962
963 bio_crypt_do_front_merge(req, bio);
964
965 blk_account_io_merge_bio(req);
966 return BIO_MERGE_OK;
967}
968
969static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
970 struct request *req, struct bio *bio)
971{
972 unsigned short segments = blk_rq_nr_discard_segments(req);
973
974 if (segments >= queue_max_discard_segments(q))
975 goto no_merge;
976 if (blk_rq_sectors(req) + bio_sectors(bio) >
977 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
978 goto no_merge;
979
980 rq_qos_merge(q, req, bio);
981
982 req->biotail->bi_next = bio;
983 req->biotail = bio;
984 req->__data_len += bio->bi_iter.bi_size;
985 req->nr_phys_segments = segments + 1;
986
987 blk_account_io_merge_bio(req);
988 return BIO_MERGE_OK;
989no_merge:
990 req_set_nomerge(q, req);
991 return BIO_MERGE_FAILED;
992}
993
994static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
995 struct request *rq,
996 struct bio *bio,
997 unsigned int nr_segs,
998 bool sched_allow_merge)
999{
1000 if (!blk_rq_merge_ok(rq, bio))
1001 return BIO_MERGE_NONE;
1002
1003 switch (blk_try_merge(rq, bio)) {
1004 case ELEVATOR_BACK_MERGE:
1005 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1006 return bio_attempt_back_merge(rq, bio, nr_segs);
1007 break;
1008 case ELEVATOR_FRONT_MERGE:
1009 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1010 return bio_attempt_front_merge(rq, bio, nr_segs);
1011 break;
1012 case ELEVATOR_DISCARD_MERGE:
1013 return bio_attempt_discard_merge(q, rq, bio);
1014 default:
1015 return BIO_MERGE_NONE;
1016 }
1017
1018 return BIO_MERGE_FAILED;
1019}
1020
1021/**
1022 * blk_attempt_plug_merge - try to merge with %current's plugged list
1023 * @q: request_queue new bio is being queued at
1024 * @bio: new bio being queued
1025 * @nr_segs: number of segments in @bio
1026 * @same_queue_rq: pointer to &struct request that gets filled in when
1027 * another request associated with @q is found on the plug list
1028 * (optional, may be %NULL)
1029 *
1030 * Determine whether @bio being queued on @q can be merged with a request
1031 * on %current's plugged list. Returns %true if merge was successful,
1032 * otherwise %false.
1033 *
1034 * Plugging coalesces IOs from the same issuer for the same purpose without
1035 * going through @q->queue_lock. As such it's more of an issuing mechanism
1036 * than scheduling, and the request, while may have elvpriv data, is not
1037 * added on the elevator at this point. In addition, we don't have
1038 * reliable access to the elevator outside queue lock. Only check basic
1039 * merging parameters without querying the elevator.
1040 *
1041 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1042 */
1043bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1044 unsigned int nr_segs, struct request **same_queue_rq)
1045{
1046 struct blk_plug *plug;
1047 struct request *rq;
1048 struct list_head *plug_list;
1049
1050 plug = blk_mq_plug(q, bio);
1051 if (!plug)
1052 return false;
1053
1054 plug_list = &plug->mq_list;
1055
1056 list_for_each_entry_reverse(rq, plug_list, queuelist) {
1057 if (rq->q == q && same_queue_rq) {
1058 /*
1059 * Only blk-mq multiple hardware queues case checks the
1060 * rq in the same queue, there should be only one such
1061 * rq in a queue
1062 **/
1063 *same_queue_rq = rq;
1064 }
1065
1066 if (rq->q != q)
1067 continue;
1068
1069 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1070 BIO_MERGE_OK)
1071 return true;
1072 }
1073
1074 return false;
1075}
1076
1077/*
1078 * Iterate list of requests and see if we can merge this bio with any
1079 * of them.
1080 */
1081bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1082 struct bio *bio, unsigned int nr_segs)
1083{
1084 struct request *rq;
1085 int checked = 8;
1086
1087 list_for_each_entry_reverse(rq, list, queuelist) {
1088 if (!checked--)
1089 break;
1090
1091 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1092 case BIO_MERGE_NONE:
1093 continue;
1094 case BIO_MERGE_OK:
1095 return true;
1096 case BIO_MERGE_FAILED:
1097 return false;
1098 }
1099
1100 }
1101
1102 return false;
1103}
1104EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1105
1106bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1107 unsigned int nr_segs, struct request **merged_request)
1108{
1109 struct request *rq;
1110
1111 switch (elv_merge(q, &rq, bio)) {
1112 case ELEVATOR_BACK_MERGE:
1113 if (!blk_mq_sched_allow_merge(q, rq, bio))
1114 return false;
1115 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1116 return false;
1117 *merged_request = attempt_back_merge(q, rq);
1118 if (!*merged_request)
1119 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1120 return true;
1121 case ELEVATOR_FRONT_MERGE:
1122 if (!blk_mq_sched_allow_merge(q, rq, bio))
1123 return false;
1124 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1125 return false;
1126 *merged_request = attempt_front_merge(q, rq);
1127 if (!*merged_request)
1128 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1129 return true;
1130 case ELEVATOR_DISCARD_MERGE:
1131 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1132 default:
1133 return false;
1134 }
1135}
1136EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);