Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to segment and merge handling
   4 */
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/blk-integrity.h>
  10#include <linux/scatterlist.h>
  11#include <linux/part_stat.h>
  12#include <linux/blk-cgroup.h>
  13
  14#include <trace/events/block.h>
  15
  16#include "blk.h"
  17#include "blk-mq-sched.h"
  18#include "blk-rq-qos.h"
  19#include "blk-throttle.h"
  20
  21static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
  22{
  23	*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
  24}
  25
  26static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
  27{
  28	struct bvec_iter iter = bio->bi_iter;
  29	int idx;
  30
  31	bio_get_first_bvec(bio, bv);
  32	if (bv->bv_len == bio->bi_iter.bi_size)
  33		return;		/* this bio only has a single bvec */
  34
  35	bio_advance_iter(bio, &iter, iter.bi_size);
  36
  37	if (!iter.bi_bvec_done)
  38		idx = iter.bi_idx - 1;
  39	else	/* in the middle of bvec */
  40		idx = iter.bi_idx;
  41
  42	*bv = bio->bi_io_vec[idx];
  43
  44	/*
  45	 * iter.bi_bvec_done records actual length of the last bvec
  46	 * if this bio ends in the middle of one io vector
  47	 */
  48	if (iter.bi_bvec_done)
  49		bv->bv_len = iter.bi_bvec_done;
  50}
  51
  52static inline bool bio_will_gap(struct request_queue *q,
  53		struct request *prev_rq, struct bio *prev, struct bio *next)
  54{
  55	struct bio_vec pb, nb;
  56
  57	if (!bio_has_data(prev) || !queue_virt_boundary(q))
  58		return false;
  59
  60	/*
  61	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  62	 * is quite difficult to respect the sg gap limit.  We work hard to
  63	 * merge a huge number of small single bios in case of mkfs.
  64	 */
  65	if (prev_rq)
  66		bio_get_first_bvec(prev_rq->bio, &pb);
  67	else
  68		bio_get_first_bvec(prev, &pb);
  69	if (pb.bv_offset & queue_virt_boundary(q))
  70		return true;
  71
  72	/*
  73	 * We don't need to worry about the situation that the merged segment
  74	 * ends in unaligned virt boundary:
  75	 *
  76	 * - if 'pb' ends aligned, the merged segment ends aligned
  77	 * - if 'pb' ends unaligned, the next bio must include
  78	 *   one single bvec of 'nb', otherwise the 'nb' can't
  79	 *   merge with 'pb'
  80	 */
  81	bio_get_last_bvec(prev, &pb);
  82	bio_get_first_bvec(next, &nb);
  83	if (biovec_phys_mergeable(q, &pb, &nb))
  84		return false;
  85	return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
  86}
  87
  88static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  89{
  90	return bio_will_gap(req->q, req, req->biotail, bio);
  91}
  92
  93static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  94{
  95	return bio_will_gap(req->q, NULL, bio, req->bio);
  96}
  97
  98/*
  99 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
 100 * is defined as 'unsigned int', meantime it has to be aligned to with the
 101 * logical block size, which is the minimum accepted unit by hardware.
 102 */
 103static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
 104{
 105	return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
 106}
 107
 108static struct bio *bio_split_discard(struct bio *bio,
 109				     const struct queue_limits *lim,
 110				     unsigned *nsegs, struct bio_set *bs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111{
 112	unsigned int max_discard_sectors, granularity;
 113	sector_t tmp;
 114	unsigned split_sectors;
 115
 116	*nsegs = 1;
 117
 118	/* Zero-sector (unknown) and one-sector granularities are the same.  */
 119	granularity = max(lim->discard_granularity >> 9, 1U);
 120
 121	max_discard_sectors =
 122		min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
 123	max_discard_sectors -= max_discard_sectors % granularity;
 124
 125	if (unlikely(!max_discard_sectors)) {
 126		/* XXX: warn */
 127		return NULL;
 128	}
 129
 130	if (bio_sectors(bio) <= max_discard_sectors)
 131		return NULL;
 132
 133	split_sectors = max_discard_sectors;
 134
 135	/*
 136	 * If the next starting sector would be misaligned, stop the discard at
 137	 * the previous aligned sector.
 138	 */
 139	tmp = bio->bi_iter.bi_sector + split_sectors -
 140		((lim->discard_alignment >> 9) % granularity);
 141	tmp = sector_div(tmp, granularity);
 142
 143	if (split_sectors > tmp)
 144		split_sectors -= tmp;
 145
 146	return bio_split(bio, split_sectors, GFP_NOIO, bs);
 147}
 148
 149static struct bio *bio_split_write_zeroes(struct bio *bio,
 150					  const struct queue_limits *lim,
 151					  unsigned *nsegs, struct bio_set *bs)
 152{
 153	*nsegs = 0;
 154	if (!lim->max_write_zeroes_sectors)
 155		return NULL;
 156	if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
 157		return NULL;
 158	return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
 
 
 159}
 160
 161/*
 162 * Return the maximum number of sectors from the start of a bio that may be
 163 * submitted as a single request to a block device. If enough sectors remain,
 164 * align the end to the physical block size. Otherwise align the end to the
 165 * logical block size. This approach minimizes the number of non-aligned
 166 * requests that are submitted to a block device if the start of a bio is not
 167 * aligned to a physical block boundary.
 168 */
 169static inline unsigned get_max_io_size(struct bio *bio,
 170				       const struct queue_limits *lim)
 171{
 172	unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
 173	unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
 174	unsigned max_sectors = lim->max_sectors, start, end;
 
 
 175
 176	if (lim->chunk_sectors) {
 
 
 
 
 
 
 
 
 
 
 
 177		max_sectors = min(max_sectors,
 178			blk_chunk_sectors_left(bio->bi_iter.bi_sector,
 179					       lim->chunk_sectors));
 180	}
 181
 182	start = bio->bi_iter.bi_sector & (pbs - 1);
 183	end = (start + max_sectors) & ~(pbs - 1);
 184	if (end > start)
 185		return end - start;
 186	return max_sectors & ~(lbs - 1);
 187}
 188
 189/**
 190 * get_max_segment_size() - maximum number of bytes to add as a single segment
 191 * @lim: Request queue limits.
 192 * @start_page: See below.
 193 * @offset: Offset from @start_page where to add a segment.
 194 *
 195 * Returns the maximum number of bytes that can be added as a single segment.
 
 196 */
 197static inline unsigned get_max_segment_size(const struct queue_limits *lim,
 198		struct page *start_page, unsigned long offset)
 199{
 200	unsigned long mask = lim->seg_boundary_mask;
 201
 202	offset = mask & (page_to_phys(start_page) + offset);
 203
 204	/*
 205	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
 206	 * after having calculated the minimum.
 207	 */
 208	return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
 
 
 209}
 210
 211/**
 212 * bvec_split_segs - verify whether or not a bvec should be split in the middle
 213 * @lim:      [in] queue limits to split based on
 214 * @bv:       [in] bvec to examine
 215 * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
 216 *            by the number of segments from @bv that may be appended to that
 217 *            bio without exceeding @max_segs
 218 * @bytes:    [in,out] Number of bytes in the bio being built. Incremented
 219 *            by the number of bytes from @bv that may be appended to that
 220 *            bio without exceeding @max_bytes
 221 * @max_segs: [in] upper bound for *@nsegs
 222 * @max_bytes: [in] upper bound for *@bytes
 223 *
 224 * When splitting a bio, it can happen that a bvec is encountered that is too
 225 * big to fit in a single segment and hence that it has to be split in the
 226 * middle. This function verifies whether or not that should happen. The value
 227 * %true is returned if and only if appending the entire @bv to a bio with
 228 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
 229 * the block driver.
 230 */
 231static bool bvec_split_segs(const struct queue_limits *lim,
 232		const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
 233		unsigned max_segs, unsigned max_bytes)
 234{
 235	unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
 236	unsigned len = min(bv->bv_len, max_len);
 237	unsigned total_len = 0;
 238	unsigned seg_size = 0;
 239
 240	while (len && *nsegs < max_segs) {
 241		seg_size = get_max_segment_size(lim, bv->bv_page,
 242						bv->bv_offset + total_len);
 243		seg_size = min(seg_size, len);
 244
 245		(*nsegs)++;
 246		total_len += seg_size;
 247		len -= seg_size;
 248
 249		if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
 250			break;
 251	}
 252
 253	*bytes += total_len;
 254
 255	/* tell the caller to split the bvec if it is too big to fit */
 256	return len > 0 || bv->bv_len > max_len;
 257}
 258
 
 
 
 
 
 
 
 
 259/**
 260 * bio_split_rw - split a bio in two bios
 261 * @bio:  [in] bio to be split
 262 * @lim:  [in] queue limits to split based on
 263 * @segs: [out] number of segments in the bio with the first half of the sectors
 264 * @bs:	  [in] bio set to allocate the clone from
 265 * @max_bytes: [in] maximum number of bytes per bio
 266 *
 267 * Clone @bio, update the bi_iter of the clone to represent the first sectors
 268 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
 269 * following is guaranteed for the cloned bio:
 270 * - That it has at most @max_bytes worth of data
 271 * - That it has at most queue_max_segments(@q) segments.
 272 *
 273 * Except for discard requests the cloned bio will point at the bi_io_vec of
 274 * the original bio. It is the responsibility of the caller to ensure that the
 275 * original bio is not freed before the cloned bio. The caller is also
 276 * responsible for ensuring that @bs is only destroyed after processing of the
 277 * split bio has finished.
 278 */
 279static struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
 280		unsigned *segs, struct bio_set *bs, unsigned max_bytes)
 281{
 282	struct bio_vec bv, bvprv, *bvprvp = NULL;
 283	struct bvec_iter iter;
 284	unsigned nsegs = 0, bytes = 0;
 285
 286	bio_for_each_bvec(bv, bio, iter) {
 287		/*
 288		 * If the queue doesn't support SG gaps and adding this
 289		 * offset would create a gap, disallow it.
 290		 */
 291		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
 292			goto split;
 293
 294		if (nsegs < lim->max_segments &&
 295		    bytes + bv.bv_len <= max_bytes &&
 296		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
 297			nsegs++;
 298			bytes += bv.bv_len;
 299		} else {
 300			if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
 301					lim->max_segments, max_bytes))
 302				goto split;
 303		}
 304
 305		bvprv = bv;
 306		bvprvp = &bvprv;
 307	}
 308
 309	*segs = nsegs;
 310	return NULL;
 311split:
 
 
 
 312	/*
 313	 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
 314	 * with EAGAIN if splitting is required and return an error pointer.
 315	 */
 316	if (bio->bi_opf & REQ_NOWAIT) {
 317		bio->bi_status = BLK_STS_AGAIN;
 318		bio_endio(bio);
 319		return ERR_PTR(-EAGAIN);
 320	}
 321
 322	*segs = nsegs;
 323
 324	/*
 325	 * Individual bvecs might not be logical block aligned. Round down the
 326	 * split size so that each bio is properly block size aligned, even if
 327	 * we do not use the full hardware limits.
 328	 */
 329	bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
 330
 331	/*
 332	 * Bio splitting may cause subtle trouble such as hang when doing sync
 333	 * iopoll in direct IO routine. Given performance gain of iopoll for
 334	 * big IO can be trival, disable iopoll when split needed.
 335	 */
 336	bio_clear_polled(bio);
 337	return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
 338}
 
 339
 340/**
 341 * __bio_split_to_limits - split a bio to fit the queue limits
 342 * @bio:     bio to be split
 343 * @lim:     queue limits to split based on
 344 * @nr_segs: returns the number of segments in the returned bio
 345 *
 346 * Check if @bio needs splitting based on the queue limits, and if so split off
 347 * a bio fitting the limits from the beginning of @bio and return it.  @bio is
 348 * shortened to the remainder and re-submitted.
 
 349 *
 350 * The split bio is allocated from @q->bio_split, which is provided by the
 351 * block layer.
 
 352 */
 353struct bio *__bio_split_to_limits(struct bio *bio,
 354				  const struct queue_limits *lim,
 355				  unsigned int *nr_segs)
 356{
 357	struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
 358	struct bio *split;
 359
 360	switch (bio_op(bio)) {
 361	case REQ_OP_DISCARD:
 362	case REQ_OP_SECURE_ERASE:
 363		split = bio_split_discard(bio, lim, nr_segs, bs);
 364		break;
 365	case REQ_OP_WRITE_ZEROES:
 366		split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
 367		break;
 368	default:
 369		split = bio_split_rw(bio, lim, nr_segs, bs,
 370				get_max_io_size(bio, lim) << SECTOR_SHIFT);
 371		if (IS_ERR(split))
 372			return NULL;
 373		break;
 374	}
 375
 376	if (split) {
 377		/* there isn't chance to merge the split bio */
 378		split->bi_opf |= REQ_NOMERGE;
 
 379
 380		blkcg_bio_issue_init(split);
 381		bio_chain(split, bio);
 382		trace_block_split(split, bio->bi_iter.bi_sector);
 383		submit_bio_noacct(bio);
 384		return split;
 385	}
 386	return bio;
 
 
 
 
 
 
 387}
 388
 389/**
 390 * bio_split_to_limits - split a bio to fit the queue limits
 391 * @bio:     bio to be split
 392 *
 393 * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
 394 * if so split off a bio fitting the limits from the beginning of @bio and
 395 * return it.  @bio is shortened to the remainder and re-submitted.
 396 *
 397 * The split bio is allocated from @q->bio_split, which is provided by the
 398 * block layer.
 399 */
 400struct bio *bio_split_to_limits(struct bio *bio)
 401{
 402	const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
 403	unsigned int nr_segs;
 404
 405	if (bio_may_exceed_limits(bio, lim))
 406		return __bio_split_to_limits(bio, lim, &nr_segs);
 407	return bio;
 408}
 409EXPORT_SYMBOL(bio_split_to_limits);
 410
 411unsigned int blk_recalc_rq_segments(struct request *rq)
 412{
 413	unsigned int nr_phys_segs = 0;
 414	unsigned int bytes = 0;
 415	struct req_iterator iter;
 416	struct bio_vec bv;
 417
 418	if (!rq->bio)
 419		return 0;
 420
 421	switch (bio_op(rq->bio)) {
 422	case REQ_OP_DISCARD:
 423	case REQ_OP_SECURE_ERASE:
 424		if (queue_max_discard_segments(rq->q) > 1) {
 425			struct bio *bio = rq->bio;
 426
 427			for_each_bio(bio)
 428				nr_phys_segs++;
 429			return nr_phys_segs;
 430		}
 431		return 1;
 432	case REQ_OP_WRITE_ZEROES:
 433		return 0;
 434	default:
 435		break;
 436	}
 437
 438	rq_for_each_bvec(bv, rq, iter)
 439		bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
 440				UINT_MAX, UINT_MAX);
 441	return nr_phys_segs;
 442}
 443
 444static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
 445		struct scatterlist *sglist)
 446{
 447	if (!*sg)
 448		return sglist;
 449
 450	/*
 451	 * If the driver previously mapped a shorter list, we could see a
 452	 * termination bit prematurely unless it fully inits the sg table
 453	 * on each mapping. We KNOW that there must be more entries here
 454	 * or the driver would be buggy, so force clear the termination bit
 455	 * to avoid doing a full sg_init_table() in drivers for each command.
 456	 */
 457	sg_unmark_end(*sg);
 458	return sg_next(*sg);
 459}
 460
 461static unsigned blk_bvec_map_sg(struct request_queue *q,
 462		struct bio_vec *bvec, struct scatterlist *sglist,
 463		struct scatterlist **sg)
 464{
 465	unsigned nbytes = bvec->bv_len;
 466	unsigned nsegs = 0, total = 0;
 467
 468	while (nbytes > 0) {
 469		unsigned offset = bvec->bv_offset + total;
 470		unsigned len = min(get_max_segment_size(&q->limits,
 471				   bvec->bv_page, offset), nbytes);
 472		struct page *page = bvec->bv_page;
 473
 474		/*
 475		 * Unfortunately a fair number of drivers barf on scatterlists
 476		 * that have an offset larger than PAGE_SIZE, despite other
 477		 * subsystems dealing with that invariant just fine.  For now
 478		 * stick to the legacy format where we never present those from
 479		 * the block layer, but the code below should be removed once
 480		 * these offenders (mostly MMC/SD drivers) are fixed.
 481		 */
 482		page += (offset >> PAGE_SHIFT);
 483		offset &= ~PAGE_MASK;
 484
 485		*sg = blk_next_sg(sg, sglist);
 486		sg_set_page(*sg, page, len, offset);
 487
 488		total += len;
 489		nbytes -= len;
 490		nsegs++;
 491	}
 492
 493	return nsegs;
 494}
 495
 496static inline int __blk_bvec_map_sg(struct bio_vec bv,
 497		struct scatterlist *sglist, struct scatterlist **sg)
 498{
 499	*sg = blk_next_sg(sg, sglist);
 500	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
 501	return 1;
 502}
 503
 504/* only try to merge bvecs into one sg if they are from two bios */
 505static inline bool
 506__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
 507			   struct bio_vec *bvprv, struct scatterlist **sg)
 508{
 509
 510	int nbytes = bvec->bv_len;
 511
 512	if (!*sg)
 513		return false;
 514
 515	if ((*sg)->length + nbytes > queue_max_segment_size(q))
 516		return false;
 517
 518	if (!biovec_phys_mergeable(q, bvprv, bvec))
 519		return false;
 520
 521	(*sg)->length += nbytes;
 522
 523	return true;
 524}
 525
 526static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
 527			     struct scatterlist *sglist,
 528			     struct scatterlist **sg)
 529{
 530	struct bio_vec bvec, bvprv = { NULL };
 531	struct bvec_iter iter;
 532	int nsegs = 0;
 533	bool new_bio = false;
 534
 535	for_each_bio(bio) {
 536		bio_for_each_bvec(bvec, bio, iter) {
 537			/*
 538			 * Only try to merge bvecs from two bios given we
 539			 * have done bio internal merge when adding pages
 540			 * to bio
 541			 */
 542			if (new_bio &&
 543			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
 544				goto next_bvec;
 545
 546			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
 547				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
 548			else
 549				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
 550 next_bvec:
 551			new_bio = false;
 552		}
 553		if (likely(bio->bi_iter.bi_size)) {
 554			bvprv = bvec;
 555			new_bio = true;
 556		}
 557	}
 558
 559	return nsegs;
 560}
 561
 562/*
 563 * map a request to scatterlist, return number of sg entries setup. Caller
 564 * must make sure sg can hold rq->nr_phys_segments entries
 565 */
 566int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
 567		struct scatterlist *sglist, struct scatterlist **last_sg)
 568{
 569	int nsegs = 0;
 570
 571	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
 572		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
 573	else if (rq->bio)
 574		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
 575
 576	if (*last_sg)
 577		sg_mark_end(*last_sg);
 578
 579	/*
 580	 * Something must have been wrong if the figured number of
 581	 * segment is bigger than number of req's physical segments
 582	 */
 583	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
 584
 585	return nsegs;
 586}
 587EXPORT_SYMBOL(__blk_rq_map_sg);
 588
 589static inline unsigned int blk_rq_get_max_segments(struct request *rq)
 590{
 591	if (req_op(rq) == REQ_OP_DISCARD)
 592		return queue_max_discard_segments(rq->q);
 593	return queue_max_segments(rq->q);
 594}
 595
 596static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
 597						  sector_t offset)
 598{
 599	struct request_queue *q = rq->q;
 600	unsigned int max_sectors;
 
 
 601
 602	if (blk_rq_is_passthrough(rq))
 603		return q->limits.max_hw_sectors;
 604
 605	max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
 606	if (!q->limits.chunk_sectors ||
 
 
 607	    req_op(rq) == REQ_OP_DISCARD ||
 608	    req_op(rq) == REQ_OP_SECURE_ERASE)
 609		return max_sectors;
 610	return min(max_sectors,
 611		   blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
 612}
 613
 614static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
 615		unsigned int nr_phys_segs)
 616{
 617	if (!blk_cgroup_mergeable(req, bio))
 618		goto no_merge;
 619
 620	if (blk_integrity_merge_bio(req->q, req, bio) == false)
 621		goto no_merge;
 622
 623	/* discard request merge won't add new segment */
 624	if (req_op(req) == REQ_OP_DISCARD)
 625		return 1;
 626
 627	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
 628		goto no_merge;
 629
 630	/*
 631	 * This will form the start of a new hw segment.  Bump both
 632	 * counters.
 633	 */
 634	req->nr_phys_segments += nr_phys_segs;
 
 
 
 635	return 1;
 636
 637no_merge:
 638	req_set_nomerge(req->q, req);
 639	return 0;
 640}
 641
 642int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
 643{
 644	if (req_gap_back_merge(req, bio))
 645		return 0;
 646	if (blk_integrity_rq(req) &&
 647	    integrity_req_gap_back_merge(req, bio))
 648		return 0;
 649	if (!bio_crypt_ctx_back_mergeable(req, bio))
 650		return 0;
 651	if (blk_rq_sectors(req) + bio_sectors(bio) >
 652	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
 653		req_set_nomerge(req->q, req);
 654		return 0;
 655	}
 656
 657	return ll_new_hw_segment(req, bio, nr_segs);
 658}
 659
 660static int ll_front_merge_fn(struct request *req, struct bio *bio,
 661		unsigned int nr_segs)
 662{
 663	if (req_gap_front_merge(req, bio))
 664		return 0;
 665	if (blk_integrity_rq(req) &&
 666	    integrity_req_gap_front_merge(req, bio))
 667		return 0;
 668	if (!bio_crypt_ctx_front_mergeable(req, bio))
 669		return 0;
 670	if (blk_rq_sectors(req) + bio_sectors(bio) >
 671	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
 672		req_set_nomerge(req->q, req);
 673		return 0;
 674	}
 675
 676	return ll_new_hw_segment(req, bio, nr_segs);
 677}
 678
 679static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
 680		struct request *next)
 681{
 682	unsigned short segments = blk_rq_nr_discard_segments(req);
 683
 684	if (segments >= queue_max_discard_segments(q))
 685		goto no_merge;
 686	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
 687	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 688		goto no_merge;
 689
 690	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
 691	return true;
 692no_merge:
 693	req_set_nomerge(q, req);
 694	return false;
 695}
 696
 697static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 698				struct request *next)
 699{
 700	int total_phys_segments;
 701
 702	if (req_gap_back_merge(req, next->bio))
 703		return 0;
 704
 705	/*
 706	 * Will it become too large?
 707	 */
 708	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
 709	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 710		return 0;
 711
 712	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
 713	if (total_phys_segments > blk_rq_get_max_segments(req))
 714		return 0;
 715
 716	if (!blk_cgroup_mergeable(req, next->bio))
 717		return 0;
 718
 719	if (blk_integrity_merge_rq(q, req, next) == false)
 720		return 0;
 721
 722	if (!bio_crypt_ctx_merge_rq(req, next))
 723		return 0;
 724
 725	/* Merge is OK... */
 726	req->nr_phys_segments = total_phys_segments;
 
 727	return 1;
 728}
 729
 730/**
 731 * blk_rq_set_mixed_merge - mark a request as mixed merge
 732 * @rq: request to mark as mixed merge
 733 *
 734 * Description:
 735 *     @rq is about to be mixed merged.  Make sure the attributes
 736 *     which can be mixed are set in each bio and mark @rq as mixed
 737 *     merged.
 738 */
 739void blk_rq_set_mixed_merge(struct request *rq)
 740{
 741	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 742	struct bio *bio;
 743
 744	if (rq->rq_flags & RQF_MIXED_MERGE)
 745		return;
 746
 747	/*
 748	 * @rq will no longer represent mixable attributes for all the
 749	 * contained bios.  It will just track those of the first one.
 750	 * Distributes the attributs to each bio.
 751	 */
 752	for (bio = rq->bio; bio; bio = bio->bi_next) {
 753		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
 754			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
 755		bio->bi_opf |= ff;
 756	}
 757	rq->rq_flags |= RQF_MIXED_MERGE;
 758}
 759
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 760static void blk_account_io_merge_request(struct request *req)
 761{
 762	if (blk_do_io_stat(req)) {
 763		part_stat_lock();
 764		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 
 
 765		part_stat_unlock();
 766	}
 767}
 768
 769static enum elv_merge blk_try_req_merge(struct request *req,
 770					struct request *next)
 771{
 772	if (blk_discard_mergable(req))
 773		return ELEVATOR_DISCARD_MERGE;
 774	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
 775		return ELEVATOR_BACK_MERGE;
 776
 777	return ELEVATOR_NO_MERGE;
 778}
 779
 
 
 
 
 
 
 
 
 
 
 
 
 780/*
 781 * For non-mq, this has to be called with the request spinlock acquired.
 782 * For mq with scheduling, the appropriate queue wide lock should be held.
 783 */
 784static struct request *attempt_merge(struct request_queue *q,
 785				     struct request *req, struct request *next)
 786{
 787	if (!rq_mergeable(req) || !rq_mergeable(next))
 788		return NULL;
 789
 790	if (req_op(req) != req_op(next))
 791		return NULL;
 792
 793	if (rq_data_dir(req) != rq_data_dir(next))
 794		return NULL;
 795
 796	if (req->ioprio != next->ioprio)
 
 797		return NULL;
 798
 799	/*
 800	 * If we are allowed to merge, then append bio list
 801	 * from next to rq and release next. merge_requests_fn
 802	 * will have updated segment counts, update sector
 803	 * counts here. Handle DISCARDs separately, as they
 804	 * have separate settings.
 805	 */
 806
 807	switch (blk_try_req_merge(req, next)) {
 808	case ELEVATOR_DISCARD_MERGE:
 809		if (!req_attempt_discard_merge(q, req, next))
 810			return NULL;
 811		break;
 812	case ELEVATOR_BACK_MERGE:
 813		if (!ll_merge_requests_fn(q, req, next))
 814			return NULL;
 815		break;
 816	default:
 817		return NULL;
 818	}
 819
 820	/*
 821	 * If failfast settings disagree or any of the two is already
 822	 * a mixed merge, mark both as mixed before proceeding.  This
 823	 * makes sure that all involved bios have mixable attributes
 824	 * set properly.
 825	 */
 826	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
 827	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
 828	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
 829		blk_rq_set_mixed_merge(req);
 830		blk_rq_set_mixed_merge(next);
 831	}
 832
 833	/*
 834	 * At this point we have either done a back merge or front merge. We
 835	 * need the smaller start_time_ns of the merged requests to be the
 836	 * current request for accounting purposes.
 837	 */
 838	if (next->start_time_ns < req->start_time_ns)
 839		req->start_time_ns = next->start_time_ns;
 840
 841	req->biotail->bi_next = next->bio;
 842	req->biotail = next->biotail;
 843
 844	req->__data_len += blk_rq_bytes(next);
 845
 846	if (!blk_discard_mergable(req))
 847		elv_merge_requests(q, req, next);
 848
 
 
 849	/*
 850	 * 'next' is going away, so update stats accordingly
 851	 */
 852	blk_account_io_merge_request(next);
 853
 854	trace_block_rq_merge(next);
 855
 856	/*
 857	 * ownership of bio passed from next to req, return 'next' for
 858	 * the caller to free
 859	 */
 860	next->bio = NULL;
 861	return next;
 862}
 863
 864static struct request *attempt_back_merge(struct request_queue *q,
 865		struct request *rq)
 866{
 867	struct request *next = elv_latter_request(q, rq);
 868
 869	if (next)
 870		return attempt_merge(q, rq, next);
 871
 872	return NULL;
 873}
 874
 875static struct request *attempt_front_merge(struct request_queue *q,
 876		struct request *rq)
 877{
 878	struct request *prev = elv_former_request(q, rq);
 879
 880	if (prev)
 881		return attempt_merge(q, prev, rq);
 882
 883	return NULL;
 884}
 885
 886/*
 887 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
 888 * otherwise. The caller is responsible for freeing 'next' if the merge
 889 * happened.
 890 */
 891bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 892			   struct request *next)
 893{
 894	return attempt_merge(q, rq, next);
 895}
 896
 897bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 898{
 899	if (!rq_mergeable(rq) || !bio_mergeable(bio))
 900		return false;
 901
 902	if (req_op(rq) != bio_op(bio))
 903		return false;
 904
 905	/* different data direction or already started, don't merge */
 906	if (bio_data_dir(bio) != rq_data_dir(rq))
 907		return false;
 908
 909	/* don't merge across cgroup boundaries */
 910	if (!blk_cgroup_mergeable(rq, bio))
 911		return false;
 912
 913	/* only merge integrity protected bio into ditto rq */
 914	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
 915		return false;
 916
 917	/* Only merge if the crypt contexts are compatible */
 918	if (!bio_crypt_rq_ctx_compatible(rq, bio))
 919		return false;
 920
 921	if (rq->ioprio != bio_prio(bio))
 
 
 
 922		return false;
 923
 924	return true;
 925}
 926
 927enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 928{
 929	if (blk_discard_mergable(rq))
 930		return ELEVATOR_DISCARD_MERGE;
 931	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
 932		return ELEVATOR_BACK_MERGE;
 933	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
 934		return ELEVATOR_FRONT_MERGE;
 935	return ELEVATOR_NO_MERGE;
 936}
 937
 938static void blk_account_io_merge_bio(struct request *req)
 939{
 940	if (!blk_do_io_stat(req))
 941		return;
 942
 943	part_stat_lock();
 944	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 945	part_stat_unlock();
 946}
 947
 948enum bio_merge_status {
 949	BIO_MERGE_OK,
 950	BIO_MERGE_NONE,
 951	BIO_MERGE_FAILED,
 952};
 953
 954static enum bio_merge_status bio_attempt_back_merge(struct request *req,
 955		struct bio *bio, unsigned int nr_segs)
 956{
 957	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
 958
 959	if (!ll_back_merge_fn(req, bio, nr_segs))
 960		return BIO_MERGE_FAILED;
 961
 962	trace_block_bio_backmerge(bio);
 963	rq_qos_merge(req->q, req, bio);
 964
 965	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
 966		blk_rq_set_mixed_merge(req);
 967
 
 
 
 
 
 968	req->biotail->bi_next = bio;
 969	req->biotail = bio;
 970	req->__data_len += bio->bi_iter.bi_size;
 971
 972	bio_crypt_free_ctx(bio);
 973
 974	blk_account_io_merge_bio(req);
 975	return BIO_MERGE_OK;
 976}
 977
 978static enum bio_merge_status bio_attempt_front_merge(struct request *req,
 979		struct bio *bio, unsigned int nr_segs)
 980{
 981	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
 
 
 
 
 
 
 
 
 982
 983	if (!ll_front_merge_fn(req, bio, nr_segs))
 984		return BIO_MERGE_FAILED;
 985
 986	trace_block_bio_frontmerge(bio);
 987	rq_qos_merge(req->q, req, bio);
 988
 989	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
 990		blk_rq_set_mixed_merge(req);
 991
 
 
 992	bio->bi_next = req->bio;
 993	req->bio = bio;
 994
 995	req->__sector = bio->bi_iter.bi_sector;
 996	req->__data_len += bio->bi_iter.bi_size;
 997
 998	bio_crypt_do_front_merge(req, bio);
 999
1000	blk_account_io_merge_bio(req);
1001	return BIO_MERGE_OK;
1002}
1003
1004static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
1005		struct request *req, struct bio *bio)
1006{
1007	unsigned short segments = blk_rq_nr_discard_segments(req);
1008
1009	if (segments >= queue_max_discard_segments(q))
1010		goto no_merge;
1011	if (blk_rq_sectors(req) + bio_sectors(bio) >
1012	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1013		goto no_merge;
1014
1015	rq_qos_merge(q, req, bio);
1016
1017	req->biotail->bi_next = bio;
1018	req->biotail = bio;
1019	req->__data_len += bio->bi_iter.bi_size;
1020	req->nr_phys_segments = segments + 1;
1021
1022	blk_account_io_merge_bio(req);
1023	return BIO_MERGE_OK;
1024no_merge:
1025	req_set_nomerge(q, req);
1026	return BIO_MERGE_FAILED;
1027}
1028
1029static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1030						   struct request *rq,
1031						   struct bio *bio,
1032						   unsigned int nr_segs,
1033						   bool sched_allow_merge)
1034{
1035	if (!blk_rq_merge_ok(rq, bio))
1036		return BIO_MERGE_NONE;
1037
1038	switch (blk_try_merge(rq, bio)) {
1039	case ELEVATOR_BACK_MERGE:
1040		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1041			return bio_attempt_back_merge(rq, bio, nr_segs);
1042		break;
1043	case ELEVATOR_FRONT_MERGE:
1044		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1045			return bio_attempt_front_merge(rq, bio, nr_segs);
1046		break;
1047	case ELEVATOR_DISCARD_MERGE:
1048		return bio_attempt_discard_merge(q, rq, bio);
1049	default:
1050		return BIO_MERGE_NONE;
1051	}
1052
1053	return BIO_MERGE_FAILED;
1054}
1055
1056/**
1057 * blk_attempt_plug_merge - try to merge with %current's plugged list
1058 * @q: request_queue new bio is being queued at
1059 * @bio: new bio being queued
1060 * @nr_segs: number of segments in @bio
1061 * from the passed in @q already in the plug list
1062 *
1063 * Determine whether @bio being queued on @q can be merged with the previous
1064 * request on %current's plugged list.  Returns %true if merge was successful,
1065 * otherwise %false.
1066 *
1067 * Plugging coalesces IOs from the same issuer for the same purpose without
1068 * going through @q->queue_lock.  As such it's more of an issuing mechanism
1069 * than scheduling, and the request, while may have elvpriv data, is not
1070 * added on the elevator at this point.  In addition, we don't have
1071 * reliable access to the elevator outside queue lock.  Only check basic
1072 * merging parameters without querying the elevator.
1073 *
1074 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1075 */
1076bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1077		unsigned int nr_segs)
1078{
1079	struct blk_plug *plug;
1080	struct request *rq;
1081
1082	plug = blk_mq_plug(bio);
1083	if (!plug || rq_list_empty(plug->mq_list))
1084		return false;
1085
1086	rq_list_for_each(&plug->mq_list, rq) {
1087		if (rq->q == q) {
1088			if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1089			    BIO_MERGE_OK)
1090				return true;
1091			break;
1092		}
1093
1094		/*
1095		 * Only keep iterating plug list for merges if we have multiple
1096		 * queues
1097		 */
1098		if (!plug->multiple_queues)
1099			break;
1100	}
1101	return false;
1102}
1103
1104/*
1105 * Iterate list of requests and see if we can merge this bio with any
1106 * of them.
1107 */
1108bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1109			struct bio *bio, unsigned int nr_segs)
1110{
1111	struct request *rq;
1112	int checked = 8;
1113
1114	list_for_each_entry_reverse(rq, list, queuelist) {
1115		if (!checked--)
1116			break;
1117
1118		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1119		case BIO_MERGE_NONE:
1120			continue;
1121		case BIO_MERGE_OK:
1122			return true;
1123		case BIO_MERGE_FAILED:
1124			return false;
1125		}
1126
1127	}
1128
1129	return false;
1130}
1131EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1132
1133bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1134		unsigned int nr_segs, struct request **merged_request)
1135{
1136	struct request *rq;
1137
1138	switch (elv_merge(q, &rq, bio)) {
1139	case ELEVATOR_BACK_MERGE:
1140		if (!blk_mq_sched_allow_merge(q, rq, bio))
1141			return false;
1142		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1143			return false;
1144		*merged_request = attempt_back_merge(q, rq);
1145		if (!*merged_request)
1146			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1147		return true;
1148	case ELEVATOR_FRONT_MERGE:
1149		if (!blk_mq_sched_allow_merge(q, rq, bio))
1150			return false;
1151		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1152			return false;
1153		*merged_request = attempt_front_merge(q, rq);
1154		if (!*merged_request)
1155			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1156		return true;
1157	case ELEVATOR_DISCARD_MERGE:
1158		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1159	default:
1160		return false;
1161	}
1162}
1163EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to segment and merge handling
   4 */
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/blk-integrity.h>
  10#include <linux/scatterlist.h>
  11#include <linux/part_stat.h>
  12#include <linux/blk-cgroup.h>
  13
  14#include <trace/events/block.h>
  15
  16#include "blk.h"
  17#include "blk-mq-sched.h"
  18#include "blk-rq-qos.h"
  19#include "blk-throttle.h"
  20
  21static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
  22{
  23	*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
  24}
  25
  26static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
  27{
  28	struct bvec_iter iter = bio->bi_iter;
  29	int idx;
  30
  31	bio_get_first_bvec(bio, bv);
  32	if (bv->bv_len == bio->bi_iter.bi_size)
  33		return;		/* this bio only has a single bvec */
  34
  35	bio_advance_iter(bio, &iter, iter.bi_size);
  36
  37	if (!iter.bi_bvec_done)
  38		idx = iter.bi_idx - 1;
  39	else	/* in the middle of bvec */
  40		idx = iter.bi_idx;
  41
  42	*bv = bio->bi_io_vec[idx];
  43
  44	/*
  45	 * iter.bi_bvec_done records actual length of the last bvec
  46	 * if this bio ends in the middle of one io vector
  47	 */
  48	if (iter.bi_bvec_done)
  49		bv->bv_len = iter.bi_bvec_done;
  50}
  51
  52static inline bool bio_will_gap(struct request_queue *q,
  53		struct request *prev_rq, struct bio *prev, struct bio *next)
  54{
  55	struct bio_vec pb, nb;
  56
  57	if (!bio_has_data(prev) || !queue_virt_boundary(q))
  58		return false;
  59
  60	/*
  61	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  62	 * is quite difficult to respect the sg gap limit.  We work hard to
  63	 * merge a huge number of small single bios in case of mkfs.
  64	 */
  65	if (prev_rq)
  66		bio_get_first_bvec(prev_rq->bio, &pb);
  67	else
  68		bio_get_first_bvec(prev, &pb);
  69	if (pb.bv_offset & queue_virt_boundary(q))
  70		return true;
  71
  72	/*
  73	 * We don't need to worry about the situation that the merged segment
  74	 * ends in unaligned virt boundary:
  75	 *
  76	 * - if 'pb' ends aligned, the merged segment ends aligned
  77	 * - if 'pb' ends unaligned, the next bio must include
  78	 *   one single bvec of 'nb', otherwise the 'nb' can't
  79	 *   merge with 'pb'
  80	 */
  81	bio_get_last_bvec(prev, &pb);
  82	bio_get_first_bvec(next, &nb);
  83	if (biovec_phys_mergeable(q, &pb, &nb))
  84		return false;
  85	return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
  86}
  87
  88static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  89{
  90	return bio_will_gap(req->q, req, req->biotail, bio);
  91}
  92
  93static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  94{
  95	return bio_will_gap(req->q, NULL, bio, req->bio);
  96}
  97
  98/*
  99 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
 100 * is defined as 'unsigned int', meantime it has to be aligned to with the
 101 * logical block size, which is the minimum accepted unit by hardware.
 102 */
 103static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
 104{
 105	return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
 106}
 107
 108static struct bio *bio_submit_split(struct bio *bio, int split_sectors)
 109{
 110	if (unlikely(split_sectors < 0))
 111		goto error;
 112
 113	if (split_sectors) {
 114		struct bio *split;
 115
 116		split = bio_split(bio, split_sectors, GFP_NOIO,
 117				&bio->bi_bdev->bd_disk->bio_split);
 118		if (IS_ERR(split)) {
 119			split_sectors = PTR_ERR(split);
 120			goto error;
 121		}
 122		split->bi_opf |= REQ_NOMERGE;
 123		blkcg_bio_issue_init(split);
 124		bio_chain(split, bio);
 125		trace_block_split(split, bio->bi_iter.bi_sector);
 126		WARN_ON_ONCE(bio_zone_write_plugging(bio));
 127		submit_bio_noacct(bio);
 128		return split;
 129	}
 130
 131	return bio;
 132error:
 133	bio->bi_status = errno_to_blk_status(split_sectors);
 134	bio_endio(bio);
 135	return NULL;
 136}
 137
 138struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
 139		unsigned *nsegs)
 140{
 141	unsigned int max_discard_sectors, granularity;
 142	sector_t tmp;
 143	unsigned split_sectors;
 144
 145	*nsegs = 1;
 146
 
 147	granularity = max(lim->discard_granularity >> 9, 1U);
 148
 149	max_discard_sectors =
 150		min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
 151	max_discard_sectors -= max_discard_sectors % granularity;
 152	if (unlikely(!max_discard_sectors))
 153		return bio;
 
 
 
 154
 155	if (bio_sectors(bio) <= max_discard_sectors)
 156		return bio;
 157
 158	split_sectors = max_discard_sectors;
 159
 160	/*
 161	 * If the next starting sector would be misaligned, stop the discard at
 162	 * the previous aligned sector.
 163	 */
 164	tmp = bio->bi_iter.bi_sector + split_sectors -
 165		((lim->discard_alignment >> 9) % granularity);
 166	tmp = sector_div(tmp, granularity);
 167
 168	if (split_sectors > tmp)
 169		split_sectors -= tmp;
 170
 171	return bio_submit_split(bio, split_sectors);
 172}
 173
 174static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim,
 175						bool is_atomic)
 
 176{
 177	/*
 178	 * chunk_sectors must be a multiple of atomic_write_boundary_sectors if
 179	 * both non-zero.
 180	 */
 181	if (is_atomic && lim->atomic_write_boundary_sectors)
 182		return lim->atomic_write_boundary_sectors;
 183
 184	return lim->chunk_sectors;
 185}
 186
 187/*
 188 * Return the maximum number of sectors from the start of a bio that may be
 189 * submitted as a single request to a block device. If enough sectors remain,
 190 * align the end to the physical block size. Otherwise align the end to the
 191 * logical block size. This approach minimizes the number of non-aligned
 192 * requests that are submitted to a block device if the start of a bio is not
 193 * aligned to a physical block boundary.
 194 */
 195static inline unsigned get_max_io_size(struct bio *bio,
 196				       const struct queue_limits *lim)
 197{
 198	unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
 199	unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
 200	bool is_atomic = bio->bi_opf & REQ_ATOMIC;
 201	unsigned boundary_sectors = blk_boundary_sectors(lim, is_atomic);
 202	unsigned max_sectors, start, end;
 203
 204	/*
 205	 * We ignore lim->max_sectors for atomic writes because it may less
 206	 * than the actual bio size, which we cannot tolerate.
 207	 */
 208	if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
 209		max_sectors = lim->max_write_zeroes_sectors;
 210	else if (is_atomic)
 211		max_sectors = lim->atomic_write_max_sectors;
 212	else
 213		max_sectors = lim->max_sectors;
 214
 215	if (boundary_sectors) {
 216		max_sectors = min(max_sectors,
 217			blk_boundary_sectors_left(bio->bi_iter.bi_sector,
 218					      boundary_sectors));
 219	}
 220
 221	start = bio->bi_iter.bi_sector & (pbs - 1);
 222	end = (start + max_sectors) & ~(pbs - 1);
 223	if (end > start)
 224		return end - start;
 225	return max_sectors & ~(lbs - 1);
 226}
 227
 228/**
 229 * get_max_segment_size() - maximum number of bytes to add as a single segment
 230 * @lim: Request queue limits.
 231 * @paddr: address of the range to add
 232 * @len: maximum length available to add at @paddr
 233 *
 234 * Returns the maximum number of bytes of the range starting at @paddr that can
 235 * be added to a single segment.
 236 */
 237static inline unsigned get_max_segment_size(const struct queue_limits *lim,
 238		phys_addr_t paddr, unsigned int len)
 239{
 
 
 
 
 240	/*
 241	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
 242	 * after having calculated the minimum.
 243	 */
 244	return min_t(unsigned long, len,
 245		min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
 246		    (unsigned long)lim->max_segment_size - 1) + 1);
 247}
 248
 249/**
 250 * bvec_split_segs - verify whether or not a bvec should be split in the middle
 251 * @lim:      [in] queue limits to split based on
 252 * @bv:       [in] bvec to examine
 253 * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
 254 *            by the number of segments from @bv that may be appended to that
 255 *            bio without exceeding @max_segs
 256 * @bytes:    [in,out] Number of bytes in the bio being built. Incremented
 257 *            by the number of bytes from @bv that may be appended to that
 258 *            bio without exceeding @max_bytes
 259 * @max_segs: [in] upper bound for *@nsegs
 260 * @max_bytes: [in] upper bound for *@bytes
 261 *
 262 * When splitting a bio, it can happen that a bvec is encountered that is too
 263 * big to fit in a single segment and hence that it has to be split in the
 264 * middle. This function verifies whether or not that should happen. The value
 265 * %true is returned if and only if appending the entire @bv to a bio with
 266 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
 267 * the block driver.
 268 */
 269static bool bvec_split_segs(const struct queue_limits *lim,
 270		const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
 271		unsigned max_segs, unsigned max_bytes)
 272{
 273	unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
 274	unsigned len = min(bv->bv_len, max_len);
 275	unsigned total_len = 0;
 276	unsigned seg_size = 0;
 277
 278	while (len && *nsegs < max_segs) {
 279		seg_size = get_max_segment_size(lim, bvec_phys(bv) + total_len, len);
 
 
 280
 281		(*nsegs)++;
 282		total_len += seg_size;
 283		len -= seg_size;
 284
 285		if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
 286			break;
 287	}
 288
 289	*bytes += total_len;
 290
 291	/* tell the caller to split the bvec if it is too big to fit */
 292	return len > 0 || bv->bv_len > max_len;
 293}
 294
 295static unsigned int bio_split_alignment(struct bio *bio,
 296		const struct queue_limits *lim)
 297{
 298	if (op_is_write(bio_op(bio)) && lim->zone_write_granularity)
 299		return lim->zone_write_granularity;
 300	return lim->logical_block_size;
 301}
 302
 303/**
 304 * bio_split_rw_at - check if and where to split a read/write bio
 305 * @bio:  [in] bio to be split
 306 * @lim:  [in] queue limits to split based on
 307 * @segs: [out] number of segments in the bio with the first half of the sectors
 
 308 * @max_bytes: [in] maximum number of bytes per bio
 309 *
 310 * Find out if @bio needs to be split to fit the queue limits in @lim and a
 311 * maximum size of @max_bytes.  Returns a negative error number if @bio can't be
 312 * split, 0 if the bio doesn't have to be split, or a positive sector offset if
 313 * @bio needs to be split.
 
 
 
 
 
 
 
 314 */
 315int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
 316		unsigned *segs, unsigned max_bytes)
 317{
 318	struct bio_vec bv, bvprv, *bvprvp = NULL;
 319	struct bvec_iter iter;
 320	unsigned nsegs = 0, bytes = 0;
 321
 322	bio_for_each_bvec(bv, bio, iter) {
 323		/*
 324		 * If the queue doesn't support SG gaps and adding this
 325		 * offset would create a gap, disallow it.
 326		 */
 327		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
 328			goto split;
 329
 330		if (nsegs < lim->max_segments &&
 331		    bytes + bv.bv_len <= max_bytes &&
 332		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
 333			nsegs++;
 334			bytes += bv.bv_len;
 335		} else {
 336			if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
 337					lim->max_segments, max_bytes))
 338				goto split;
 339		}
 340
 341		bvprv = bv;
 342		bvprvp = &bvprv;
 343	}
 344
 345	*segs = nsegs;
 346	return 0;
 347split:
 348	if (bio->bi_opf & REQ_ATOMIC)
 349		return -EINVAL;
 350
 351	/*
 352	 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
 353	 * with EAGAIN if splitting is required and return an error pointer.
 354	 */
 355	if (bio->bi_opf & REQ_NOWAIT)
 356		return -EAGAIN;
 
 
 
 357
 358	*segs = nsegs;
 359
 360	/*
 361	 * Individual bvecs might not be logical block aligned. Round down the
 362	 * split size so that each bio is properly block size aligned, even if
 363	 * we do not use the full hardware limits.
 364	 */
 365	bytes = ALIGN_DOWN(bytes, bio_split_alignment(bio, lim));
 366
 367	/*
 368	 * Bio splitting may cause subtle trouble such as hang when doing sync
 369	 * iopoll in direct IO routine. Given performance gain of iopoll for
 370	 * big IO can be trival, disable iopoll when split needed.
 371	 */
 372	bio_clear_polled(bio);
 373	return bytes >> SECTOR_SHIFT;
 374}
 375EXPORT_SYMBOL_GPL(bio_split_rw_at);
 376
 377struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
 378		unsigned *nr_segs)
 379{
 380	return bio_submit_split(bio,
 381		bio_split_rw_at(bio, lim, nr_segs,
 382			get_max_io_size(bio, lim) << SECTOR_SHIFT));
 383}
 384
 385/*
 386 * REQ_OP_ZONE_APPEND bios must never be split by the block layer.
 387 *
 388 * But we want the nr_segs calculation provided by bio_split_rw_at, and having
 389 * a good sanity check that the submitter built the bio correctly is nice to
 390 * have as well.
 391 */
 392struct bio *bio_split_zone_append(struct bio *bio,
 393		const struct queue_limits *lim, unsigned *nr_segs)
 
 394{
 395	int split_sectors;
 
 396
 397	split_sectors = bio_split_rw_at(bio, lim, nr_segs,
 398			lim->max_zone_append_sectors << SECTOR_SHIFT);
 399	if (WARN_ON_ONCE(split_sectors > 0))
 400		split_sectors = -EINVAL;
 401	return bio_submit_split(bio, split_sectors);
 402}
 
 
 
 
 
 
 
 
 
 403
 404struct bio *bio_split_write_zeroes(struct bio *bio,
 405		const struct queue_limits *lim, unsigned *nsegs)
 406{
 407	unsigned int max_sectors = get_max_io_size(bio, lim);
 408
 409	*nsegs = 0;
 410
 411	/*
 412	 * An unset limit should normally not happen, as bio submission is keyed
 413	 * off having a non-zero limit.  But SCSI can clear the limit in the
 414	 * I/O completion handler, and we can race and see this.  Splitting to a
 415	 * zero limit obviously doesn't make sense, so band-aid it here.
 416	 */
 417	if (!max_sectors)
 418		return bio;
 419	if (bio_sectors(bio) <= max_sectors)
 420		return bio;
 421	return bio_submit_split(bio, max_sectors);
 422}
 423
 424/**
 425 * bio_split_to_limits - split a bio to fit the queue limits
 426 * @bio:     bio to be split
 427 *
 428 * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
 429 * if so split off a bio fitting the limits from the beginning of @bio and
 430 * return it.  @bio is shortened to the remainder and re-submitted.
 431 *
 432 * The split bio is allocated from @q->bio_split, which is provided by the
 433 * block layer.
 434 */
 435struct bio *bio_split_to_limits(struct bio *bio)
 436{
 
 437	unsigned int nr_segs;
 438
 439	return __bio_split_to_limits(bio, bdev_limits(bio->bi_bdev), &nr_segs);
 
 
 440}
 441EXPORT_SYMBOL(bio_split_to_limits);
 442
 443unsigned int blk_recalc_rq_segments(struct request *rq)
 444{
 445	unsigned int nr_phys_segs = 0;
 446	unsigned int bytes = 0;
 447	struct req_iterator iter;
 448	struct bio_vec bv;
 449
 450	if (!rq->bio)
 451		return 0;
 452
 453	switch (bio_op(rq->bio)) {
 454	case REQ_OP_DISCARD:
 455	case REQ_OP_SECURE_ERASE:
 456		if (queue_max_discard_segments(rq->q) > 1) {
 457			struct bio *bio = rq->bio;
 458
 459			for_each_bio(bio)
 460				nr_phys_segs++;
 461			return nr_phys_segs;
 462		}
 463		return 1;
 464	case REQ_OP_WRITE_ZEROES:
 465		return 0;
 466	default:
 467		break;
 468	}
 469
 470	rq_for_each_bvec(bv, rq, iter)
 471		bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
 472				UINT_MAX, UINT_MAX);
 473	return nr_phys_segs;
 474}
 475
 476static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
 477		struct scatterlist *sglist)
 478{
 479	if (!*sg)
 480		return sglist;
 481
 482	/*
 483	 * If the driver previously mapped a shorter list, we could see a
 484	 * termination bit prematurely unless it fully inits the sg table
 485	 * on each mapping. We KNOW that there must be more entries here
 486	 * or the driver would be buggy, so force clear the termination bit
 487	 * to avoid doing a full sg_init_table() in drivers for each command.
 488	 */
 489	sg_unmark_end(*sg);
 490	return sg_next(*sg);
 491}
 492
 493static unsigned blk_bvec_map_sg(struct request_queue *q,
 494		struct bio_vec *bvec, struct scatterlist *sglist,
 495		struct scatterlist **sg)
 496{
 497	unsigned nbytes = bvec->bv_len;
 498	unsigned nsegs = 0, total = 0;
 499
 500	while (nbytes > 0) {
 501		unsigned offset = bvec->bv_offset + total;
 502		unsigned len = get_max_segment_size(&q->limits,
 503				bvec_phys(bvec) + total, nbytes);
 504		struct page *page = bvec->bv_page;
 505
 506		/*
 507		 * Unfortunately a fair number of drivers barf on scatterlists
 508		 * that have an offset larger than PAGE_SIZE, despite other
 509		 * subsystems dealing with that invariant just fine.  For now
 510		 * stick to the legacy format where we never present those from
 511		 * the block layer, but the code below should be removed once
 512		 * these offenders (mostly MMC/SD drivers) are fixed.
 513		 */
 514		page += (offset >> PAGE_SHIFT);
 515		offset &= ~PAGE_MASK;
 516
 517		*sg = blk_next_sg(sg, sglist);
 518		sg_set_page(*sg, page, len, offset);
 519
 520		total += len;
 521		nbytes -= len;
 522		nsegs++;
 523	}
 524
 525	return nsegs;
 526}
 527
 528static inline int __blk_bvec_map_sg(struct bio_vec bv,
 529		struct scatterlist *sglist, struct scatterlist **sg)
 530{
 531	*sg = blk_next_sg(sg, sglist);
 532	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
 533	return 1;
 534}
 535
 536/* only try to merge bvecs into one sg if they are from two bios */
 537static inline bool
 538__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
 539			   struct bio_vec *bvprv, struct scatterlist **sg)
 540{
 541
 542	int nbytes = bvec->bv_len;
 543
 544	if (!*sg)
 545		return false;
 546
 547	if ((*sg)->length + nbytes > queue_max_segment_size(q))
 548		return false;
 549
 550	if (!biovec_phys_mergeable(q, bvprv, bvec))
 551		return false;
 552
 553	(*sg)->length += nbytes;
 554
 555	return true;
 556}
 557
 558static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
 559			     struct scatterlist *sglist,
 560			     struct scatterlist **sg)
 561{
 562	struct bio_vec bvec, bvprv = { NULL };
 563	struct bvec_iter iter;
 564	int nsegs = 0;
 565	bool new_bio = false;
 566
 567	for_each_bio(bio) {
 568		bio_for_each_bvec(bvec, bio, iter) {
 569			/*
 570			 * Only try to merge bvecs from two bios given we
 571			 * have done bio internal merge when adding pages
 572			 * to bio
 573			 */
 574			if (new_bio &&
 575			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
 576				goto next_bvec;
 577
 578			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
 579				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
 580			else
 581				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
 582 next_bvec:
 583			new_bio = false;
 584		}
 585		if (likely(bio->bi_iter.bi_size)) {
 586			bvprv = bvec;
 587			new_bio = true;
 588		}
 589	}
 590
 591	return nsegs;
 592}
 593
 594/*
 595 * map a request to scatterlist, return number of sg entries setup. Caller
 596 * must make sure sg can hold rq->nr_phys_segments entries
 597 */
 598int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
 599		struct scatterlist *sglist, struct scatterlist **last_sg)
 600{
 601	int nsegs = 0;
 602
 603	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
 604		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
 605	else if (rq->bio)
 606		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
 607
 608	if (*last_sg)
 609		sg_mark_end(*last_sg);
 610
 611	/*
 612	 * Something must have been wrong if the figured number of
 613	 * segment is bigger than number of req's physical segments
 614	 */
 615	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
 616
 617	return nsegs;
 618}
 619EXPORT_SYMBOL(__blk_rq_map_sg);
 620
 
 
 
 
 
 
 
 621static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
 622						  sector_t offset)
 623{
 624	struct request_queue *q = rq->q;
 625	struct queue_limits *lim = &q->limits;
 626	unsigned int max_sectors, boundary_sectors;
 627	bool is_atomic = rq->cmd_flags & REQ_ATOMIC;
 628
 629	if (blk_rq_is_passthrough(rq))
 630		return q->limits.max_hw_sectors;
 631
 632	boundary_sectors = blk_boundary_sectors(lim, is_atomic);
 633	max_sectors = blk_queue_get_max_sectors(rq);
 634
 635	if (!boundary_sectors ||
 636	    req_op(rq) == REQ_OP_DISCARD ||
 637	    req_op(rq) == REQ_OP_SECURE_ERASE)
 638		return max_sectors;
 639	return min(max_sectors,
 640		   blk_boundary_sectors_left(offset, boundary_sectors));
 641}
 642
 643static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
 644		unsigned int nr_phys_segs)
 645{
 646	if (!blk_cgroup_mergeable(req, bio))
 647		goto no_merge;
 648
 649	if (blk_integrity_merge_bio(req->q, req, bio) == false)
 650		goto no_merge;
 651
 652	/* discard request merge won't add new segment */
 653	if (req_op(req) == REQ_OP_DISCARD)
 654		return 1;
 655
 656	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
 657		goto no_merge;
 658
 659	/*
 660	 * This will form the start of a new hw segment.  Bump both
 661	 * counters.
 662	 */
 663	req->nr_phys_segments += nr_phys_segs;
 664	if (bio_integrity(bio))
 665		req->nr_integrity_segments += blk_rq_count_integrity_sg(req->q,
 666									bio);
 667	return 1;
 668
 669no_merge:
 670	req_set_nomerge(req->q, req);
 671	return 0;
 672}
 673
 674int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
 675{
 676	if (req_gap_back_merge(req, bio))
 677		return 0;
 678	if (blk_integrity_rq(req) &&
 679	    integrity_req_gap_back_merge(req, bio))
 680		return 0;
 681	if (!bio_crypt_ctx_back_mergeable(req, bio))
 682		return 0;
 683	if (blk_rq_sectors(req) + bio_sectors(bio) >
 684	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
 685		req_set_nomerge(req->q, req);
 686		return 0;
 687	}
 688
 689	return ll_new_hw_segment(req, bio, nr_segs);
 690}
 691
 692static int ll_front_merge_fn(struct request *req, struct bio *bio,
 693		unsigned int nr_segs)
 694{
 695	if (req_gap_front_merge(req, bio))
 696		return 0;
 697	if (blk_integrity_rq(req) &&
 698	    integrity_req_gap_front_merge(req, bio))
 699		return 0;
 700	if (!bio_crypt_ctx_front_mergeable(req, bio))
 701		return 0;
 702	if (blk_rq_sectors(req) + bio_sectors(bio) >
 703	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
 704		req_set_nomerge(req->q, req);
 705		return 0;
 706	}
 707
 708	return ll_new_hw_segment(req, bio, nr_segs);
 709}
 710
 711static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
 712		struct request *next)
 713{
 714	unsigned short segments = blk_rq_nr_discard_segments(req);
 715
 716	if (segments >= queue_max_discard_segments(q))
 717		goto no_merge;
 718	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
 719	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 720		goto no_merge;
 721
 722	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
 723	return true;
 724no_merge:
 725	req_set_nomerge(q, req);
 726	return false;
 727}
 728
 729static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 730				struct request *next)
 731{
 732	int total_phys_segments;
 733
 734	if (req_gap_back_merge(req, next->bio))
 735		return 0;
 736
 737	/*
 738	 * Will it become too large?
 739	 */
 740	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
 741	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 742		return 0;
 743
 744	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
 745	if (total_phys_segments > blk_rq_get_max_segments(req))
 746		return 0;
 747
 748	if (!blk_cgroup_mergeable(req, next->bio))
 749		return 0;
 750
 751	if (blk_integrity_merge_rq(q, req, next) == false)
 752		return 0;
 753
 754	if (!bio_crypt_ctx_merge_rq(req, next))
 755		return 0;
 756
 757	/* Merge is OK... */
 758	req->nr_phys_segments = total_phys_segments;
 759	req->nr_integrity_segments += next->nr_integrity_segments;
 760	return 1;
 761}
 762
 763/**
 764 * blk_rq_set_mixed_merge - mark a request as mixed merge
 765 * @rq: request to mark as mixed merge
 766 *
 767 * Description:
 768 *     @rq is about to be mixed merged.  Make sure the attributes
 769 *     which can be mixed are set in each bio and mark @rq as mixed
 770 *     merged.
 771 */
 772static void blk_rq_set_mixed_merge(struct request *rq)
 773{
 774	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 775	struct bio *bio;
 776
 777	if (rq->rq_flags & RQF_MIXED_MERGE)
 778		return;
 779
 780	/*
 781	 * @rq will no longer represent mixable attributes for all the
 782	 * contained bios.  It will just track those of the first one.
 783	 * Distributes the attributs to each bio.
 784	 */
 785	for (bio = rq->bio; bio; bio = bio->bi_next) {
 786		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
 787			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
 788		bio->bi_opf |= ff;
 789	}
 790	rq->rq_flags |= RQF_MIXED_MERGE;
 791}
 792
 793static inline blk_opf_t bio_failfast(const struct bio *bio)
 794{
 795	if (bio->bi_opf & REQ_RAHEAD)
 796		return REQ_FAILFAST_MASK;
 797
 798	return bio->bi_opf & REQ_FAILFAST_MASK;
 799}
 800
 801/*
 802 * After we are marked as MIXED_MERGE, any new RA bio has to be updated
 803 * as failfast, and request's failfast has to be updated in case of
 804 * front merge.
 805 */
 806static inline void blk_update_mixed_merge(struct request *req,
 807		struct bio *bio, bool front_merge)
 808{
 809	if (req->rq_flags & RQF_MIXED_MERGE) {
 810		if (bio->bi_opf & REQ_RAHEAD)
 811			bio->bi_opf |= REQ_FAILFAST_MASK;
 812
 813		if (front_merge) {
 814			req->cmd_flags &= ~REQ_FAILFAST_MASK;
 815			req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
 816		}
 817	}
 818}
 819
 820static void blk_account_io_merge_request(struct request *req)
 821{
 822	if (req->rq_flags & RQF_IO_STAT) {
 823		part_stat_lock();
 824		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 825		part_stat_local_dec(req->part,
 826				    in_flight[op_is_write(req_op(req))]);
 827		part_stat_unlock();
 828	}
 829}
 830
 831static enum elv_merge blk_try_req_merge(struct request *req,
 832					struct request *next)
 833{
 834	if (blk_discard_mergable(req))
 835		return ELEVATOR_DISCARD_MERGE;
 836	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
 837		return ELEVATOR_BACK_MERGE;
 838
 839	return ELEVATOR_NO_MERGE;
 840}
 841
 842static bool blk_atomic_write_mergeable_rq_bio(struct request *rq,
 843					      struct bio *bio)
 844{
 845	return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC);
 846}
 847
 848static bool blk_atomic_write_mergeable_rqs(struct request *rq,
 849					   struct request *next)
 850{
 851	return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC);
 852}
 853
 854/*
 855 * For non-mq, this has to be called with the request spinlock acquired.
 856 * For mq with scheduling, the appropriate queue wide lock should be held.
 857 */
 858static struct request *attempt_merge(struct request_queue *q,
 859				     struct request *req, struct request *next)
 860{
 861	if (!rq_mergeable(req) || !rq_mergeable(next))
 862		return NULL;
 863
 864	if (req_op(req) != req_op(next))
 865		return NULL;
 866
 867	if (req->bio->bi_write_hint != next->bio->bi_write_hint)
 868		return NULL;
 869	if (req->bio->bi_ioprio != next->bio->bi_ioprio)
 870		return NULL;
 871	if (!blk_atomic_write_mergeable_rqs(req, next))
 872		return NULL;
 873
 874	/*
 875	 * If we are allowed to merge, then append bio list
 876	 * from next to rq and release next. merge_requests_fn
 877	 * will have updated segment counts, update sector
 878	 * counts here. Handle DISCARDs separately, as they
 879	 * have separate settings.
 880	 */
 881
 882	switch (blk_try_req_merge(req, next)) {
 883	case ELEVATOR_DISCARD_MERGE:
 884		if (!req_attempt_discard_merge(q, req, next))
 885			return NULL;
 886		break;
 887	case ELEVATOR_BACK_MERGE:
 888		if (!ll_merge_requests_fn(q, req, next))
 889			return NULL;
 890		break;
 891	default:
 892		return NULL;
 893	}
 894
 895	/*
 896	 * If failfast settings disagree or any of the two is already
 897	 * a mixed merge, mark both as mixed before proceeding.  This
 898	 * makes sure that all involved bios have mixable attributes
 899	 * set properly.
 900	 */
 901	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
 902	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
 903	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
 904		blk_rq_set_mixed_merge(req);
 905		blk_rq_set_mixed_merge(next);
 906	}
 907
 908	/*
 909	 * At this point we have either done a back merge or front merge. We
 910	 * need the smaller start_time_ns of the merged requests to be the
 911	 * current request for accounting purposes.
 912	 */
 913	if (next->start_time_ns < req->start_time_ns)
 914		req->start_time_ns = next->start_time_ns;
 915
 916	req->biotail->bi_next = next->bio;
 917	req->biotail = next->biotail;
 918
 919	req->__data_len += blk_rq_bytes(next);
 920
 921	if (!blk_discard_mergable(req))
 922		elv_merge_requests(q, req, next);
 923
 924	blk_crypto_rq_put_keyslot(next);
 925
 926	/*
 927	 * 'next' is going away, so update stats accordingly
 928	 */
 929	blk_account_io_merge_request(next);
 930
 931	trace_block_rq_merge(next);
 932
 933	/*
 934	 * ownership of bio passed from next to req, return 'next' for
 935	 * the caller to free
 936	 */
 937	next->bio = NULL;
 938	return next;
 939}
 940
 941static struct request *attempt_back_merge(struct request_queue *q,
 942		struct request *rq)
 943{
 944	struct request *next = elv_latter_request(q, rq);
 945
 946	if (next)
 947		return attempt_merge(q, rq, next);
 948
 949	return NULL;
 950}
 951
 952static struct request *attempt_front_merge(struct request_queue *q,
 953		struct request *rq)
 954{
 955	struct request *prev = elv_former_request(q, rq);
 956
 957	if (prev)
 958		return attempt_merge(q, prev, rq);
 959
 960	return NULL;
 961}
 962
 963/*
 964 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
 965 * otherwise. The caller is responsible for freeing 'next' if the merge
 966 * happened.
 967 */
 968bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 969			   struct request *next)
 970{
 971	return attempt_merge(q, rq, next);
 972}
 973
 974bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 975{
 976	if (!rq_mergeable(rq) || !bio_mergeable(bio))
 977		return false;
 978
 979	if (req_op(rq) != bio_op(bio))
 980		return false;
 981
 
 
 
 
 
 982	if (!blk_cgroup_mergeable(rq, bio))
 983		return false;
 
 
 984	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
 985		return false;
 
 
 986	if (!bio_crypt_rq_ctx_compatible(rq, bio))
 987		return false;
 988	if (rq->bio->bi_write_hint != bio->bi_write_hint)
 989		return false;
 990	if (rq->bio->bi_ioprio != bio->bi_ioprio)
 991		return false;
 992	if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
 993		return false;
 994
 995	return true;
 996}
 997
 998enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 999{
1000	if (blk_discard_mergable(rq))
1001		return ELEVATOR_DISCARD_MERGE;
1002	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
1003		return ELEVATOR_BACK_MERGE;
1004	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
1005		return ELEVATOR_FRONT_MERGE;
1006	return ELEVATOR_NO_MERGE;
1007}
1008
1009static void blk_account_io_merge_bio(struct request *req)
1010{
1011	if (req->rq_flags & RQF_IO_STAT) {
1012		part_stat_lock();
1013		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
1014		part_stat_unlock();
1015	}
 
1016}
1017
1018enum bio_merge_status bio_attempt_back_merge(struct request *req,
 
 
 
 
 
 
1019		struct bio *bio, unsigned int nr_segs)
1020{
1021	const blk_opf_t ff = bio_failfast(bio);
1022
1023	if (!ll_back_merge_fn(req, bio, nr_segs))
1024		return BIO_MERGE_FAILED;
1025
1026	trace_block_bio_backmerge(bio);
1027	rq_qos_merge(req->q, req, bio);
1028
1029	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1030		blk_rq_set_mixed_merge(req);
1031
1032	blk_update_mixed_merge(req, bio, false);
1033
1034	if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING)
1035		blk_zone_write_plug_bio_merged(bio);
1036
1037	req->biotail->bi_next = bio;
1038	req->biotail = bio;
1039	req->__data_len += bio->bi_iter.bi_size;
1040
1041	bio_crypt_free_ctx(bio);
1042
1043	blk_account_io_merge_bio(req);
1044	return BIO_MERGE_OK;
1045}
1046
1047static enum bio_merge_status bio_attempt_front_merge(struct request *req,
1048		struct bio *bio, unsigned int nr_segs)
1049{
1050	const blk_opf_t ff = bio_failfast(bio);
1051
1052	/*
1053	 * A front merge for writes to sequential zones of a zoned block device
1054	 * can happen only if the user submitted writes out of order. Do not
1055	 * merge such write to let it fail.
1056	 */
1057	if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING)
1058		return BIO_MERGE_FAILED;
1059
1060	if (!ll_front_merge_fn(req, bio, nr_segs))
1061		return BIO_MERGE_FAILED;
1062
1063	trace_block_bio_frontmerge(bio);
1064	rq_qos_merge(req->q, req, bio);
1065
1066	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1067		blk_rq_set_mixed_merge(req);
1068
1069	blk_update_mixed_merge(req, bio, true);
1070
1071	bio->bi_next = req->bio;
1072	req->bio = bio;
1073
1074	req->__sector = bio->bi_iter.bi_sector;
1075	req->__data_len += bio->bi_iter.bi_size;
1076
1077	bio_crypt_do_front_merge(req, bio);
1078
1079	blk_account_io_merge_bio(req);
1080	return BIO_MERGE_OK;
1081}
1082
1083static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
1084		struct request *req, struct bio *bio)
1085{
1086	unsigned short segments = blk_rq_nr_discard_segments(req);
1087
1088	if (segments >= queue_max_discard_segments(q))
1089		goto no_merge;
1090	if (blk_rq_sectors(req) + bio_sectors(bio) >
1091	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1092		goto no_merge;
1093
1094	rq_qos_merge(q, req, bio);
1095
1096	req->biotail->bi_next = bio;
1097	req->biotail = bio;
1098	req->__data_len += bio->bi_iter.bi_size;
1099	req->nr_phys_segments = segments + 1;
1100
1101	blk_account_io_merge_bio(req);
1102	return BIO_MERGE_OK;
1103no_merge:
1104	req_set_nomerge(q, req);
1105	return BIO_MERGE_FAILED;
1106}
1107
1108static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1109						   struct request *rq,
1110						   struct bio *bio,
1111						   unsigned int nr_segs,
1112						   bool sched_allow_merge)
1113{
1114	if (!blk_rq_merge_ok(rq, bio))
1115		return BIO_MERGE_NONE;
1116
1117	switch (blk_try_merge(rq, bio)) {
1118	case ELEVATOR_BACK_MERGE:
1119		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1120			return bio_attempt_back_merge(rq, bio, nr_segs);
1121		break;
1122	case ELEVATOR_FRONT_MERGE:
1123		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1124			return bio_attempt_front_merge(rq, bio, nr_segs);
1125		break;
1126	case ELEVATOR_DISCARD_MERGE:
1127		return bio_attempt_discard_merge(q, rq, bio);
1128	default:
1129		return BIO_MERGE_NONE;
1130	}
1131
1132	return BIO_MERGE_FAILED;
1133}
1134
1135/**
1136 * blk_attempt_plug_merge - try to merge with %current's plugged list
1137 * @q: request_queue new bio is being queued at
1138 * @bio: new bio being queued
1139 * @nr_segs: number of segments in @bio
1140 * from the passed in @q already in the plug list
1141 *
1142 * Determine whether @bio being queued on @q can be merged with the previous
1143 * request on %current's plugged list.  Returns %true if merge was successful,
1144 * otherwise %false.
1145 *
1146 * Plugging coalesces IOs from the same issuer for the same purpose without
1147 * going through @q->queue_lock.  As such it's more of an issuing mechanism
1148 * than scheduling, and the request, while may have elvpriv data, is not
1149 * added on the elevator at this point.  In addition, we don't have
1150 * reliable access to the elevator outside queue lock.  Only check basic
1151 * merging parameters without querying the elevator.
1152 *
1153 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1154 */
1155bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1156		unsigned int nr_segs)
1157{
1158	struct blk_plug *plug = current->plug;
1159	struct request *rq;
1160
1161	if (!plug || rq_list_empty(&plug->mq_list))
 
1162		return false;
1163
1164	rq_list_for_each(&plug->mq_list, rq) {
1165		if (rq->q == q) {
1166			if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1167			    BIO_MERGE_OK)
1168				return true;
1169			break;
1170		}
1171
1172		/*
1173		 * Only keep iterating plug list for merges if we have multiple
1174		 * queues
1175		 */
1176		if (!plug->multiple_queues)
1177			break;
1178	}
1179	return false;
1180}
1181
1182/*
1183 * Iterate list of requests and see if we can merge this bio with any
1184 * of them.
1185 */
1186bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1187			struct bio *bio, unsigned int nr_segs)
1188{
1189	struct request *rq;
1190	int checked = 8;
1191
1192	list_for_each_entry_reverse(rq, list, queuelist) {
1193		if (!checked--)
1194			break;
1195
1196		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1197		case BIO_MERGE_NONE:
1198			continue;
1199		case BIO_MERGE_OK:
1200			return true;
1201		case BIO_MERGE_FAILED:
1202			return false;
1203		}
1204
1205	}
1206
1207	return false;
1208}
1209EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1210
1211bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1212		unsigned int nr_segs, struct request **merged_request)
1213{
1214	struct request *rq;
1215
1216	switch (elv_merge(q, &rq, bio)) {
1217	case ELEVATOR_BACK_MERGE:
1218		if (!blk_mq_sched_allow_merge(q, rq, bio))
1219			return false;
1220		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1221			return false;
1222		*merged_request = attempt_back_merge(q, rq);
1223		if (!*merged_request)
1224			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1225		return true;
1226	case ELEVATOR_FRONT_MERGE:
1227		if (!blk_mq_sched_allow_merge(q, rq, bio))
1228			return false;
1229		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1230			return false;
1231		*merged_request = attempt_front_merge(q, rq);
1232		if (!*merged_request)
1233			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1234		return true;
1235	case ELEVATOR_DISCARD_MERGE:
1236		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1237	default:
1238		return false;
1239	}
1240}
1241EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);