Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to segment and merge handling
   4 */
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/blk-integrity.h>
  10#include <linux/scatterlist.h>
  11#include <linux/part_stat.h>
  12#include <linux/blk-cgroup.h>
  13
  14#include <trace/events/block.h>
  15
  16#include "blk.h"
  17#include "blk-mq-sched.h"
  18#include "blk-rq-qos.h"
  19#include "blk-throttle.h"
  20
  21static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
 
  22{
  23	*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
  24}
  25
  26static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
  27{
  28	struct bvec_iter iter = bio->bi_iter;
  29	int idx;
  30
  31	bio_get_first_bvec(bio, bv);
  32	if (bv->bv_len == bio->bi_iter.bi_size)
  33		return;		/* this bio only has a single bvec */
  34
  35	bio_advance_iter(bio, &iter, iter.bi_size);
 
  36
  37	if (!iter.bi_bvec_done)
  38		idx = iter.bi_idx - 1;
  39	else	/* in the middle of bvec */
  40		idx = iter.bi_idx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41
  42	*bv = bio->bi_io_vec[idx];
 
 
 
  43
  44	/*
  45	 * iter.bi_bvec_done records actual length of the last bvec
  46	 * if this bio ends in the middle of one io vector
  47	 */
  48	if (iter.bi_bvec_done)
  49		bv->bv_len = iter.bi_bvec_done;
  50}
  51
  52static inline bool bio_will_gap(struct request_queue *q,
  53		struct request *prev_rq, struct bio *prev, struct bio *next)
  54{
  55	struct bio_vec pb, nb;
  56
  57	if (!bio_has_data(prev) || !queue_virt_boundary(q))
  58		return false;
  59
  60	/*
  61	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  62	 * is quite difficult to respect the sg gap limit.  We work hard to
  63	 * merge a huge number of small single bios in case of mkfs.
  64	 */
  65	if (prev_rq)
  66		bio_get_first_bvec(prev_rq->bio, &pb);
  67	else
  68		bio_get_first_bvec(prev, &pb);
  69	if (pb.bv_offset & queue_virt_boundary(q))
  70		return true;
  71
  72	/*
  73	 * We don't need to worry about the situation that the merged segment
  74	 * ends in unaligned virt boundary:
  75	 *
  76	 * - if 'pb' ends aligned, the merged segment ends aligned
  77	 * - if 'pb' ends unaligned, the next bio must include
  78	 *   one single bvec of 'nb', otherwise the 'nb' can't
  79	 *   merge with 'pb'
  80	 */
  81	bio_get_last_bvec(prev, &pb);
  82	bio_get_first_bvec(next, &nb);
  83	if (biovec_phys_mergeable(q, &pb, &nb))
  84		return false;
  85	return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
  86}
  87
  88static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  89{
  90	return bio_will_gap(req->q, req, req->biotail, bio);
  91}
  92
  93static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  94{
  95	return bio_will_gap(req->q, NULL, bio, req->bio);
 
  96}
 
  97
  98/*
  99 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
 100 * is defined as 'unsigned int', meantime it has to be aligned to with the
 101 * logical block size, which is the minimum accepted unit by hardware.
 102 */
 103static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
 104{
 105	return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
 106}
 107
 108static struct bio *bio_split_discard(struct bio *bio,
 109				     const struct queue_limits *lim,
 110				     unsigned *nsegs, struct bio_set *bs)
 111{
 112	unsigned int max_discard_sectors, granularity;
 113	sector_t tmp;
 114	unsigned split_sectors;
 115
 116	*nsegs = 1;
 117
 118	/* Zero-sector (unknown) and one-sector granularities are the same.  */
 119	granularity = max(lim->discard_granularity >> 9, 1U);
 120
 121	max_discard_sectors =
 122		min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
 123	max_discard_sectors -= max_discard_sectors % granularity;
 124
 125	if (unlikely(!max_discard_sectors)) {
 126		/* XXX: warn */
 127		return NULL;
 128	}
 129
 130	if (bio_sectors(bio) <= max_discard_sectors)
 131		return NULL;
 132
 133	split_sectors = max_discard_sectors;
 
 134
 135	/*
 136	 * If the next starting sector would be misaligned, stop the discard at
 137	 * the previous aligned sector.
 138	 */
 139	tmp = bio->bi_iter.bi_sector + split_sectors -
 140		((lim->discard_alignment >> 9) % granularity);
 141	tmp = sector_div(tmp, granularity);
 142
 143	if (split_sectors > tmp)
 144		split_sectors -= tmp;
 145
 146	return bio_split(bio, split_sectors, GFP_NOIO, bs);
 147}
 148
 149static struct bio *bio_split_write_zeroes(struct bio *bio,
 150					  const struct queue_limits *lim,
 151					  unsigned *nsegs, struct bio_set *bs)
 152{
 153	*nsegs = 0;
 154	if (!lim->max_write_zeroes_sectors)
 155		return NULL;
 156	if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
 157		return NULL;
 158	return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
 159}
 160
 161/*
 162 * Return the maximum number of sectors from the start of a bio that may be
 163 * submitted as a single request to a block device. If enough sectors remain,
 164 * align the end to the physical block size. Otherwise align the end to the
 165 * logical block size. This approach minimizes the number of non-aligned
 166 * requests that are submitted to a block device if the start of a bio is not
 167 * aligned to a physical block boundary.
 168 */
 169static inline unsigned get_max_io_size(struct bio *bio,
 170				       const struct queue_limits *lim)
 171{
 172	unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
 173	unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
 174	unsigned max_sectors = lim->max_sectors, start, end;
 175
 176	if (lim->chunk_sectors) {
 177		max_sectors = min(max_sectors,
 178			blk_chunk_sectors_left(bio->bi_iter.bi_sector,
 179					       lim->chunk_sectors));
 180	}
 181
 182	start = bio->bi_iter.bi_sector & (pbs - 1);
 183	end = (start + max_sectors) & ~(pbs - 1);
 184	if (end > start)
 185		return end - start;
 186	return max_sectors & ~(lbs - 1);
 187}
 188
 189/**
 190 * get_max_segment_size() - maximum number of bytes to add as a single segment
 191 * @lim: Request queue limits.
 192 * @start_page: See below.
 193 * @offset: Offset from @start_page where to add a segment.
 194 *
 195 * Returns the maximum number of bytes that can be added as a single segment.
 196 */
 197static inline unsigned get_max_segment_size(const struct queue_limits *lim,
 198		struct page *start_page, unsigned long offset)
 199{
 200	unsigned long mask = lim->seg_boundary_mask;
 
 
 
 201
 202	offset = mask & (page_to_phys(start_page) + offset);
 
 203
 204	/*
 205	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
 206	 * after having calculated the minimum.
 207	 */
 208	return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
 209}
 210
 211/**
 212 * bvec_split_segs - verify whether or not a bvec should be split in the middle
 213 * @lim:      [in] queue limits to split based on
 214 * @bv:       [in] bvec to examine
 215 * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
 216 *            by the number of segments from @bv that may be appended to that
 217 *            bio without exceeding @max_segs
 218 * @bytes:    [in,out] Number of bytes in the bio being built. Incremented
 219 *            by the number of bytes from @bv that may be appended to that
 220 *            bio without exceeding @max_bytes
 221 * @max_segs: [in] upper bound for *@nsegs
 222 * @max_bytes: [in] upper bound for *@bytes
 223 *
 224 * When splitting a bio, it can happen that a bvec is encountered that is too
 225 * big to fit in a single segment and hence that it has to be split in the
 226 * middle. This function verifies whether or not that should happen. The value
 227 * %true is returned if and only if appending the entire @bv to a bio with
 228 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
 229 * the block driver.
 230 */
 231static bool bvec_split_segs(const struct queue_limits *lim,
 232		const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
 233		unsigned max_segs, unsigned max_bytes)
 234{
 235	unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
 236	unsigned len = min(bv->bv_len, max_len);
 237	unsigned total_len = 0;
 238	unsigned seg_size = 0;
 239
 240	while (len && *nsegs < max_segs) {
 241		seg_size = get_max_segment_size(lim, bv->bv_page,
 242						bv->bv_offset + total_len);
 243		seg_size = min(seg_size, len);
 244
 245		(*nsegs)++;
 246		total_len += seg_size;
 247		len -= seg_size;
 248
 249		if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
 250			break;
 251	}
 252
 253	*bytes += total_len;
 
 
 
 254
 255	/* tell the caller to split the bvec if it is too big to fit */
 256	return len > 0 || bv->bv_len > max_len;
 257}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 258
 259/**
 260 * bio_split_rw - split a bio in two bios
 261 * @bio:  [in] bio to be split
 262 * @lim:  [in] queue limits to split based on
 263 * @segs: [out] number of segments in the bio with the first half of the sectors
 264 * @bs:	  [in] bio set to allocate the clone from
 265 * @max_bytes: [in] maximum number of bytes per bio
 266 *
 267 * Clone @bio, update the bi_iter of the clone to represent the first sectors
 268 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
 269 * following is guaranteed for the cloned bio:
 270 * - That it has at most @max_bytes worth of data
 271 * - That it has at most queue_max_segments(@q) segments.
 272 *
 273 * Except for discard requests the cloned bio will point at the bi_io_vec of
 274 * the original bio. It is the responsibility of the caller to ensure that the
 275 * original bio is not freed before the cloned bio. The caller is also
 276 * responsible for ensuring that @bs is only destroyed after processing of the
 277 * split bio has finished.
 278 */
 279static struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
 280		unsigned *segs, struct bio_set *bs, unsigned max_bytes)
 281{
 282	struct bio_vec bv, bvprv, *bvprvp = NULL;
 283	struct bvec_iter iter;
 284	unsigned nsegs = 0, bytes = 0;
 285
 286	bio_for_each_bvec(bv, bio, iter) {
 287		/*
 288		 * If the queue doesn't support SG gaps and adding this
 289		 * offset would create a gap, disallow it.
 290		 */
 291		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
 292			goto split;
 293
 294		if (nsegs < lim->max_segments &&
 295		    bytes + bv.bv_len <= max_bytes &&
 296		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
 297			nsegs++;
 298			bytes += bv.bv_len;
 299		} else {
 300			if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
 301					lim->max_segments, max_bytes))
 302				goto split;
 303		}
 
 
 304
 305		bvprv = bv;
 306		bvprvp = &bvprv;
 307	}
 308
 309	*segs = nsegs;
 310	return NULL;
 311split:
 312	/*
 313	 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
 314	 * with EAGAIN if splitting is required and return an error pointer.
 315	 */
 316	if (bio->bi_opf & REQ_NOWAIT) {
 317		bio->bi_status = BLK_STS_AGAIN;
 318		bio_endio(bio);
 319		return ERR_PTR(-EAGAIN);
 320	}
 321
 322	*segs = nsegs;
 323
 324	/*
 325	 * Individual bvecs might not be logical block aligned. Round down the
 326	 * split size so that each bio is properly block size aligned, even if
 327	 * we do not use the full hardware limits.
 328	 */
 329	bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
 330
 331	/*
 332	 * Bio splitting may cause subtle trouble such as hang when doing sync
 333	 * iopoll in direct IO routine. Given performance gain of iopoll for
 334	 * big IO can be trival, disable iopoll when split needed.
 335	 */
 336	bio_clear_polled(bio);
 337	return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
 338}
 339
 340/**
 341 * __bio_split_to_limits - split a bio to fit the queue limits
 342 * @bio:     bio to be split
 343 * @lim:     queue limits to split based on
 344 * @nr_segs: returns the number of segments in the returned bio
 345 *
 346 * Check if @bio needs splitting based on the queue limits, and if so split off
 347 * a bio fitting the limits from the beginning of @bio and return it.  @bio is
 348 * shortened to the remainder and re-submitted.
 349 *
 350 * The split bio is allocated from @q->bio_split, which is provided by the
 351 * block layer.
 352 */
 353struct bio *__bio_split_to_limits(struct bio *bio,
 354				  const struct queue_limits *lim,
 355				  unsigned int *nr_segs)
 356{
 357	struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
 358	struct bio *split;
 359
 360	switch (bio_op(bio)) {
 361	case REQ_OP_DISCARD:
 362	case REQ_OP_SECURE_ERASE:
 363		split = bio_split_discard(bio, lim, nr_segs, bs);
 364		break;
 365	case REQ_OP_WRITE_ZEROES:
 366		split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
 367		break;
 368	default:
 369		split = bio_split_rw(bio, lim, nr_segs, bs,
 370				get_max_io_size(bio, lim) << SECTOR_SHIFT);
 371		if (IS_ERR(split))
 372			return NULL;
 373		break;
 374	}
 375
 376	if (split) {
 377		/* there isn't chance to merge the split bio */
 378		split->bi_opf |= REQ_NOMERGE;
 379
 380		blkcg_bio_issue_init(split);
 381		bio_chain(split, bio);
 382		trace_block_split(split, bio->bi_iter.bi_sector);
 383		submit_bio_noacct(bio);
 384		return split;
 385	}
 386	return bio;
 387}
 388
 389/**
 390 * bio_split_to_limits - split a bio to fit the queue limits
 391 * @bio:     bio to be split
 392 *
 393 * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
 394 * if so split off a bio fitting the limits from the beginning of @bio and
 395 * return it.  @bio is shortened to the remainder and re-submitted.
 396 *
 397 * The split bio is allocated from @q->bio_split, which is provided by the
 398 * block layer.
 399 */
 400struct bio *bio_split_to_limits(struct bio *bio)
 401{
 402	const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
 403	unsigned int nr_segs;
 404
 405	if (bio_may_exceed_limits(bio, lim))
 406		return __bio_split_to_limits(bio, lim, &nr_segs);
 407	return bio;
 408}
 409EXPORT_SYMBOL(bio_split_to_limits);
 410
 411unsigned int blk_recalc_rq_segments(struct request *rq)
 412{
 413	unsigned int nr_phys_segs = 0;
 414	unsigned int bytes = 0;
 415	struct req_iterator iter;
 416	struct bio_vec bv;
 417
 418	if (!rq->bio)
 419		return 0;
 420
 421	switch (bio_op(rq->bio)) {
 422	case REQ_OP_DISCARD:
 423	case REQ_OP_SECURE_ERASE:
 424		if (queue_max_discard_segments(rq->q) > 1) {
 425			struct bio *bio = rq->bio;
 426
 427			for_each_bio(bio)
 428				nr_phys_segs++;
 429			return nr_phys_segs;
 430		}
 431		return 1;
 432	case REQ_OP_WRITE_ZEROES:
 433		return 0;
 434	default:
 435		break;
 436	}
 437
 438	rq_for_each_bvec(bv, rq, iter)
 439		bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
 440				UINT_MAX, UINT_MAX);
 441	return nr_phys_segs;
 442}
 443
 444static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
 445		struct scatterlist *sglist)
 446{
 447	if (!*sg)
 448		return sglist;
 449
 450	/*
 451	 * If the driver previously mapped a shorter list, we could see a
 452	 * termination bit prematurely unless it fully inits the sg table
 453	 * on each mapping. We KNOW that there must be more entries here
 454	 * or the driver would be buggy, so force clear the termination bit
 455	 * to avoid doing a full sg_init_table() in drivers for each command.
 456	 */
 457	sg_unmark_end(*sg);
 458	return sg_next(*sg);
 459}
 460
 461static unsigned blk_bvec_map_sg(struct request_queue *q,
 462		struct bio_vec *bvec, struct scatterlist *sglist,
 463		struct scatterlist **sg)
 464{
 465	unsigned nbytes = bvec->bv_len;
 466	unsigned nsegs = 0, total = 0;
 467
 468	while (nbytes > 0) {
 469		unsigned offset = bvec->bv_offset + total;
 470		unsigned len = min(get_max_segment_size(&q->limits,
 471				   bvec->bv_page, offset), nbytes);
 472		struct page *page = bvec->bv_page;
 473
 474		/*
 475		 * Unfortunately a fair number of drivers barf on scatterlists
 476		 * that have an offset larger than PAGE_SIZE, despite other
 477		 * subsystems dealing with that invariant just fine.  For now
 478		 * stick to the legacy format where we never present those from
 479		 * the block layer, but the code below should be removed once
 480		 * these offenders (mostly MMC/SD drivers) are fixed.
 481		 */
 482		page += (offset >> PAGE_SHIFT);
 483		offset &= ~PAGE_MASK;
 484
 485		*sg = blk_next_sg(sg, sglist);
 486		sg_set_page(*sg, page, len, offset);
 487
 488		total += len;
 489		nbytes -= len;
 490		nsegs++;
 
 491	}
 492
 493	return nsegs;
 494}
 495
 496static inline int __blk_bvec_map_sg(struct bio_vec bv,
 497		struct scatterlist *sglist, struct scatterlist **sg)
 498{
 499	*sg = blk_next_sg(sg, sglist);
 500	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
 501	return 1;
 502}
 503
 504/* only try to merge bvecs into one sg if they are from two bios */
 505static inline bool
 506__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
 507			   struct bio_vec *bvprv, struct scatterlist **sg)
 508{
 509
 510	int nbytes = bvec->bv_len;
 511
 512	if (!*sg)
 513		return false;
 514
 515	if ((*sg)->length + nbytes > queue_max_segment_size(q))
 516		return false;
 517
 518	if (!biovec_phys_mergeable(q, bvprv, bvec))
 519		return false;
 520
 521	(*sg)->length += nbytes;
 522
 523	return true;
 524}
 525
 526static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
 527			     struct scatterlist *sglist,
 528			     struct scatterlist **sg)
 529{
 530	struct bio_vec bvec, bvprv = { NULL };
 531	struct bvec_iter iter;
 532	int nsegs = 0;
 533	bool new_bio = false;
 534
 535	for_each_bio(bio) {
 536		bio_for_each_bvec(bvec, bio, iter) {
 537			/*
 538			 * Only try to merge bvecs from two bios given we
 539			 * have done bio internal merge when adding pages
 540			 * to bio
 541			 */
 542			if (new_bio &&
 543			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
 544				goto next_bvec;
 545
 546			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
 547				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
 548			else
 549				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
 550 next_bvec:
 551			new_bio = false;
 552		}
 553		if (likely(bio->bi_iter.bi_size)) {
 554			bvprv = bvec;
 555			new_bio = true;
 556		}
 557	}
 558
 559	return nsegs;
 560}
 
 561
 562/*
 563 * map a request to scatterlist, return number of sg entries setup. Caller
 564 * must make sure sg can hold rq->nr_phys_segments entries
 565 */
 566int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
 567		struct scatterlist *sglist, struct scatterlist **last_sg)
 568{
 569	int nsegs = 0;
 570
 571	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
 572		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
 573	else if (rq->bio)
 574		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
 575
 576	if (*last_sg)
 577		sg_mark_end(*last_sg);
 578
 579	/*
 580	 * Something must have been wrong if the figured number of
 581	 * segment is bigger than number of req's physical segments
 582	 */
 583	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
 584
 585	return nsegs;
 586}
 587EXPORT_SYMBOL(__blk_rq_map_sg);
 588
 589static inline unsigned int blk_rq_get_max_segments(struct request *rq)
 590{
 591	if (req_op(rq) == REQ_OP_DISCARD)
 592		return queue_max_discard_segments(rq->q);
 593	return queue_max_segments(rq->q);
 594}
 595
 596static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
 597						  sector_t offset)
 598{
 599	struct request_queue *q = rq->q;
 600	unsigned int max_sectors;
 601
 602	if (blk_rq_is_passthrough(rq))
 603		return q->limits.max_hw_sectors;
 604
 605	max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
 606	if (!q->limits.chunk_sectors ||
 607	    req_op(rq) == REQ_OP_DISCARD ||
 608	    req_op(rq) == REQ_OP_SECURE_ERASE)
 609		return max_sectors;
 610	return min(max_sectors,
 611		   blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
 612}
 613
 614static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
 615		unsigned int nr_phys_segs)
 616{
 617	if (!blk_cgroup_mergeable(req, bio))
 618		goto no_merge;
 619
 620	if (blk_integrity_merge_bio(req->q, req, bio) == false)
 621		goto no_merge;
 622
 623	/* discard request merge won't add new segment */
 624	if (req_op(req) == REQ_OP_DISCARD)
 625		return 1;
 626
 627	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
 628		goto no_merge;
 629
 630	/*
 631	 * This will form the start of a new hw segment.  Bump both
 632	 * counters.
 633	 */
 634	req->nr_phys_segments += nr_phys_segs;
 635	return 1;
 636
 637no_merge:
 638	req_set_nomerge(req->q, req);
 
 
 639	return 0;
 640}
 641
 642int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
 
 643{
 644	if (req_gap_back_merge(req, bio))
 645		return 0;
 646	if (blk_integrity_rq(req) &&
 647	    integrity_req_gap_back_merge(req, bio))
 648		return 0;
 649	if (!bio_crypt_ctx_back_mergeable(req, bio))
 650		return 0;
 651	if (blk_rq_sectors(req) + bio_sectors(bio) >
 652	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
 653		req_set_nomerge(req->q, req);
 654		return 0;
 655	}
 656
 657	return ll_new_hw_segment(req, bio, nr_segs);
 658}
 
 
 659
 660static int ll_front_merge_fn(struct request *req, struct bio *bio,
 661		unsigned int nr_segs)
 662{
 663	if (req_gap_front_merge(req, bio))
 664		return 0;
 665	if (blk_integrity_rq(req) &&
 666	    integrity_req_gap_front_merge(req, bio))
 667		return 0;
 668	if (!bio_crypt_ctx_front_mergeable(req, bio))
 669		return 0;
 670	if (blk_rq_sectors(req) + bio_sectors(bio) >
 671	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
 672		req_set_nomerge(req->q, req);
 673		return 0;
 674	}
 
 
 
 
 675
 676	return ll_new_hw_segment(req, bio, nr_segs);
 677}
 678
 679static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
 680		struct request *next)
 681{
 682	unsigned short segments = blk_rq_nr_discard_segments(req);
 683
 684	if (segments >= queue_max_discard_segments(q))
 685		goto no_merge;
 686	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
 687	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 688		goto no_merge;
 689
 690	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
 691	return true;
 692no_merge:
 693	req_set_nomerge(q, req);
 694	return false;
 
 
 
 
 
 
 
 
 695}
 696
 697static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 698				struct request *next)
 699{
 700	int total_phys_segments;
 
 
 701
 702	if (req_gap_back_merge(req, next->bio))
 
 
 
 
 703		return 0;
 704
 705	/*
 706	 * Will it become too large?
 707	 */
 708	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
 709	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 710		return 0;
 711
 712	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
 713	if (total_phys_segments > blk_rq_get_max_segments(req))
 714		return 0;
 715
 716	if (!blk_cgroup_mergeable(req, next->bio))
 717		return 0;
 
 
 718
 719	if (blk_integrity_merge_rq(q, req, next) == false)
 720		return 0;
 721
 722	if (!bio_crypt_ctx_merge_rq(req, next))
 723		return 0;
 724
 725	/* Merge is OK... */
 726	req->nr_phys_segments = total_phys_segments;
 727	return 1;
 728}
 729
 730/**
 731 * blk_rq_set_mixed_merge - mark a request as mixed merge
 732 * @rq: request to mark as mixed merge
 733 *
 734 * Description:
 735 *     @rq is about to be mixed merged.  Make sure the attributes
 736 *     which can be mixed are set in each bio and mark @rq as mixed
 737 *     merged.
 738 */
 739void blk_rq_set_mixed_merge(struct request *rq)
 740{
 741	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 742	struct bio *bio;
 743
 744	if (rq->rq_flags & RQF_MIXED_MERGE)
 745		return;
 746
 747	/*
 748	 * @rq will no longer represent mixable attributes for all the
 749	 * contained bios.  It will just track those of the first one.
 750	 * Distributes the attributs to each bio.
 751	 */
 752	for (bio = rq->bio; bio; bio = bio->bi_next) {
 753		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
 754			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
 755		bio->bi_opf |= ff;
 756	}
 757	rq->rq_flags |= RQF_MIXED_MERGE;
 758}
 759
 760static void blk_account_io_merge_request(struct request *req)
 761{
 762	if (blk_do_io_stat(req)) {
 763		part_stat_lock();
 764		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 765		part_stat_unlock();
 766	}
 767}
 768
 769static enum elv_merge blk_try_req_merge(struct request *req,
 770					struct request *next)
 771{
 772	if (blk_discard_mergable(req))
 773		return ELEVATOR_DISCARD_MERGE;
 774	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
 775		return ELEVATOR_BACK_MERGE;
 776
 777	return ELEVATOR_NO_MERGE;
 
 
 
 
 
 778}
 779
 780/*
 781 * For non-mq, this has to be called with the request spinlock acquired.
 782 * For mq with scheduling, the appropriate queue wide lock should be held.
 783 */
 784static struct request *attempt_merge(struct request_queue *q,
 785				     struct request *req, struct request *next)
 786{
 787	if (!rq_mergeable(req) || !rq_mergeable(next))
 788		return NULL;
 789
 790	if (req_op(req) != req_op(next))
 791		return NULL;
 
 
 
 
 
 
 
 
 
 792
 793	if (rq_data_dir(req) != rq_data_dir(next))
 794		return NULL;
 
 
 
 795
 796	if (req->ioprio != next->ioprio)
 797		return NULL;
 
 
 798
 799	/*
 800	 * If we are allowed to merge, then append bio list
 801	 * from next to rq and release next. merge_requests_fn
 802	 * will have updated segment counts, update sector
 803	 * counts here. Handle DISCARDs separately, as they
 804	 * have separate settings.
 805	 */
 806
 807	switch (blk_try_req_merge(req, next)) {
 808	case ELEVATOR_DISCARD_MERGE:
 809		if (!req_attempt_discard_merge(q, req, next))
 810			return NULL;
 811		break;
 812	case ELEVATOR_BACK_MERGE:
 813		if (!ll_merge_requests_fn(q, req, next))
 814			return NULL;
 815		break;
 816	default:
 817		return NULL;
 818	}
 819
 820	/*
 821	 * If failfast settings disagree or any of the two is already
 822	 * a mixed merge, mark both as mixed before proceeding.  This
 823	 * makes sure that all involved bios have mixable attributes
 824	 * set properly.
 825	 */
 826	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
 827	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
 828	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
 829		blk_rq_set_mixed_merge(req);
 830		blk_rq_set_mixed_merge(next);
 831	}
 832
 833	/*
 834	 * At this point we have either done a back merge or front merge. We
 835	 * need the smaller start_time_ns of the merged requests to be the
 836	 * current request for accounting purposes.
 
 837	 */
 838	if (next->start_time_ns < req->start_time_ns)
 839		req->start_time_ns = next->start_time_ns;
 840
 841	req->biotail->bi_next = next->bio;
 842	req->biotail = next->biotail;
 843
 844	req->__data_len += blk_rq_bytes(next);
 845
 846	if (!blk_discard_mergable(req))
 847		elv_merge_requests(q, req, next);
 848
 849	/*
 850	 * 'next' is going away, so update stats accordingly
 851	 */
 852	blk_account_io_merge_request(next);
 853
 854	trace_block_rq_merge(next);
 
 
 855
 856	/*
 857	 * ownership of bio passed from next to req, return 'next' for
 858	 * the caller to free
 859	 */
 860	next->bio = NULL;
 861	return next;
 
 862}
 863
 864static struct request *attempt_back_merge(struct request_queue *q,
 865		struct request *rq)
 866{
 867	struct request *next = elv_latter_request(q, rq);
 868
 869	if (next)
 870		return attempt_merge(q, rq, next);
 871
 872	return NULL;
 873}
 874
 875static struct request *attempt_front_merge(struct request_queue *q,
 876		struct request *rq)
 877{
 878	struct request *prev = elv_former_request(q, rq);
 879
 880	if (prev)
 881		return attempt_merge(q, prev, rq);
 882
 883	return NULL;
 884}
 885
 886/*
 887 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
 888 * otherwise. The caller is responsible for freeing 'next' if the merge
 889 * happened.
 890 */
 891bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 892			   struct request *next)
 893{
 894	return attempt_merge(q, rq, next);
 895}
 896
 897bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 898{
 899	if (!rq_mergeable(rq) || !bio_mergeable(bio))
 
 
 
 
 900		return false;
 901
 902	if (req_op(rq) != bio_op(bio))
 
 903		return false;
 904
 905	/* different data direction or already started, don't merge */
 906	if (bio_data_dir(bio) != rq_data_dir(rq))
 907		return false;
 908
 909	/* don't merge across cgroup boundaries */
 910	if (!blk_cgroup_mergeable(rq, bio))
 911		return false;
 912
 913	/* only merge integrity protected bio into ditto rq */
 914	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
 915		return false;
 916
 917	/* Only merge if the crypt contexts are compatible */
 918	if (!bio_crypt_rq_ctx_compatible(rq, bio))
 919		return false;
 920
 921	if (rq->ioprio != bio_prio(bio))
 922		return false;
 923
 924	return true;
 925}
 926
 927enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 928{
 929	if (blk_discard_mergable(rq))
 930		return ELEVATOR_DISCARD_MERGE;
 931	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
 932		return ELEVATOR_BACK_MERGE;
 933	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
 934		return ELEVATOR_FRONT_MERGE;
 935	return ELEVATOR_NO_MERGE;
 936}
 937
 938static void blk_account_io_merge_bio(struct request *req)
 939{
 940	if (!blk_do_io_stat(req))
 941		return;
 942
 943	part_stat_lock();
 944	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 945	part_stat_unlock();
 946}
 947
 948enum bio_merge_status {
 949	BIO_MERGE_OK,
 950	BIO_MERGE_NONE,
 951	BIO_MERGE_FAILED,
 952};
 953
 954static enum bio_merge_status bio_attempt_back_merge(struct request *req,
 955		struct bio *bio, unsigned int nr_segs)
 956{
 957	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
 958
 959	if (!ll_back_merge_fn(req, bio, nr_segs))
 960		return BIO_MERGE_FAILED;
 961
 962	trace_block_bio_backmerge(bio);
 963	rq_qos_merge(req->q, req, bio);
 964
 965	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
 966		blk_rq_set_mixed_merge(req);
 967
 968	req->biotail->bi_next = bio;
 969	req->biotail = bio;
 970	req->__data_len += bio->bi_iter.bi_size;
 971
 972	bio_crypt_free_ctx(bio);
 973
 974	blk_account_io_merge_bio(req);
 975	return BIO_MERGE_OK;
 976}
 977
 978static enum bio_merge_status bio_attempt_front_merge(struct request *req,
 979		struct bio *bio, unsigned int nr_segs)
 980{
 981	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
 982
 983	if (!ll_front_merge_fn(req, bio, nr_segs))
 984		return BIO_MERGE_FAILED;
 985
 986	trace_block_bio_frontmerge(bio);
 987	rq_qos_merge(req->q, req, bio);
 988
 989	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
 990		blk_rq_set_mixed_merge(req);
 991
 992	bio->bi_next = req->bio;
 993	req->bio = bio;
 994
 995	req->__sector = bio->bi_iter.bi_sector;
 996	req->__data_len += bio->bi_iter.bi_size;
 997
 998	bio_crypt_do_front_merge(req, bio);
 999
1000	blk_account_io_merge_bio(req);
1001	return BIO_MERGE_OK;
1002}
1003
1004static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
1005		struct request *req, struct bio *bio)
1006{
1007	unsigned short segments = blk_rq_nr_discard_segments(req);
1008
1009	if (segments >= queue_max_discard_segments(q))
1010		goto no_merge;
1011	if (blk_rq_sectors(req) + bio_sectors(bio) >
1012	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1013		goto no_merge;
1014
1015	rq_qos_merge(q, req, bio);
1016
1017	req->biotail->bi_next = bio;
1018	req->biotail = bio;
1019	req->__data_len += bio->bi_iter.bi_size;
1020	req->nr_phys_segments = segments + 1;
1021
1022	blk_account_io_merge_bio(req);
1023	return BIO_MERGE_OK;
1024no_merge:
1025	req_set_nomerge(q, req);
1026	return BIO_MERGE_FAILED;
1027}
1028
1029static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1030						   struct request *rq,
1031						   struct bio *bio,
1032						   unsigned int nr_segs,
1033						   bool sched_allow_merge)
1034{
1035	if (!blk_rq_merge_ok(rq, bio))
1036		return BIO_MERGE_NONE;
1037
1038	switch (blk_try_merge(rq, bio)) {
1039	case ELEVATOR_BACK_MERGE:
1040		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1041			return bio_attempt_back_merge(rq, bio, nr_segs);
1042		break;
1043	case ELEVATOR_FRONT_MERGE:
1044		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1045			return bio_attempt_front_merge(rq, bio, nr_segs);
1046		break;
1047	case ELEVATOR_DISCARD_MERGE:
1048		return bio_attempt_discard_merge(q, rq, bio);
1049	default:
1050		return BIO_MERGE_NONE;
1051	}
1052
1053	return BIO_MERGE_FAILED;
1054}
1055
1056/**
1057 * blk_attempt_plug_merge - try to merge with %current's plugged list
1058 * @q: request_queue new bio is being queued at
1059 * @bio: new bio being queued
1060 * @nr_segs: number of segments in @bio
1061 * from the passed in @q already in the plug list
1062 *
1063 * Determine whether @bio being queued on @q can be merged with the previous
1064 * request on %current's plugged list.  Returns %true if merge was successful,
1065 * otherwise %false.
1066 *
1067 * Plugging coalesces IOs from the same issuer for the same purpose without
1068 * going through @q->queue_lock.  As such it's more of an issuing mechanism
1069 * than scheduling, and the request, while may have elvpriv data, is not
1070 * added on the elevator at this point.  In addition, we don't have
1071 * reliable access to the elevator outside queue lock.  Only check basic
1072 * merging parameters without querying the elevator.
1073 *
1074 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1075 */
1076bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1077		unsigned int nr_segs)
1078{
1079	struct blk_plug *plug;
1080	struct request *rq;
1081
1082	plug = blk_mq_plug(bio);
1083	if (!plug || rq_list_empty(plug->mq_list))
1084		return false;
1085
1086	rq_list_for_each(&plug->mq_list, rq) {
1087		if (rq->q == q) {
1088			if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1089			    BIO_MERGE_OK)
1090				return true;
1091			break;
1092		}
1093
1094		/*
1095		 * Only keep iterating plug list for merges if we have multiple
1096		 * queues
1097		 */
1098		if (!plug->multiple_queues)
1099			break;
1100	}
1101	return false;
1102}
1103
1104/*
1105 * Iterate list of requests and see if we can merge this bio with any
1106 * of them.
1107 */
1108bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1109			struct bio *bio, unsigned int nr_segs)
1110{
1111	struct request *rq;
1112	int checked = 8;
1113
1114	list_for_each_entry_reverse(rq, list, queuelist) {
1115		if (!checked--)
1116			break;
1117
1118		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1119		case BIO_MERGE_NONE:
1120			continue;
1121		case BIO_MERGE_OK:
1122			return true;
1123		case BIO_MERGE_FAILED:
1124			return false;
1125		}
1126
1127	}
1128
1129	return false;
1130}
1131EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1132
1133bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1134		unsigned int nr_segs, struct request **merged_request)
1135{
1136	struct request *rq;
1137
1138	switch (elv_merge(q, &rq, bio)) {
1139	case ELEVATOR_BACK_MERGE:
1140		if (!blk_mq_sched_allow_merge(q, rq, bio))
1141			return false;
1142		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1143			return false;
1144		*merged_request = attempt_back_merge(q, rq);
1145		if (!*merged_request)
1146			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1147		return true;
1148	case ELEVATOR_FRONT_MERGE:
1149		if (!blk_mq_sched_allow_merge(q, rq, bio))
1150			return false;
1151		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1152			return false;
1153		*merged_request = attempt_front_merge(q, rq);
1154		if (!*merged_request)
1155			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1156		return true;
1157	case ELEVATOR_DISCARD_MERGE:
1158		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1159	default:
1160		return false;
1161	}
1162}
1163EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
v3.5.6
 
  1/*
  2 * Functions related to segment and merge handling
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/bio.h>
  7#include <linux/blkdev.h>
 
  8#include <linux/scatterlist.h>
 
 
 
 
  9
 10#include "blk.h"
 
 
 
 11
 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 13					     struct bio *bio)
 14{
 15	struct bio_vec *bv, *bvprv = NULL;
 16	int cluster, i, high, highprv = 1;
 17	unsigned int seg_size, nr_phys_segs;
 18	struct bio *fbio, *bbio;
 
 
 
 
 
 
 
 19
 20	if (!bio)
 21		return 0;
 22
 23	fbio = bio;
 24	cluster = blk_queue_cluster(q);
 25	seg_size = 0;
 26	nr_phys_segs = 0;
 27	for_each_bio(bio) {
 28		bio_for_each_segment(bv, bio, i) {
 29			/*
 30			 * the trick here is making sure that a high page is
 31			 * never considered part of another segment, since that
 32			 * might change with the bounce page.
 33			 */
 34			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
 35			if (high || highprv)
 36				goto new_segment;
 37			if (cluster) {
 38				if (seg_size + bv->bv_len
 39				    > queue_max_segment_size(q))
 40					goto new_segment;
 41				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
 42					goto new_segment;
 43				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
 44					goto new_segment;
 45
 46				seg_size += bv->bv_len;
 47				bvprv = bv;
 48				continue;
 49			}
 50new_segment:
 51			if (nr_phys_segs == 1 && seg_size >
 52			    fbio->bi_seg_front_size)
 53				fbio->bi_seg_front_size = seg_size;
 54
 55			nr_phys_segs++;
 56			bvprv = bv;
 57			seg_size = bv->bv_len;
 58			highprv = high;
 59		}
 60		bbio = bio;
 61	}
 62
 63	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
 64		fbio->bi_seg_front_size = seg_size;
 65	if (seg_size > bbio->bi_seg_back_size)
 66		bbio->bi_seg_back_size = seg_size;
 67
 68	return nr_phys_segs;
 
 
 
 
 
 69}
 70
 71void blk_recalc_rq_segments(struct request *rq)
 
 72{
 73	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74}
 75
 76void blk_recount_segments(struct request_queue *q, struct bio *bio)
 77{
 78	struct bio *nxt = bio->bi_next;
 
 79
 80	bio->bi_next = NULL;
 81	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
 82	bio->bi_next = nxt;
 83	bio->bi_flags |= (1 << BIO_SEG_VALID);
 84}
 85EXPORT_SYMBOL(blk_recount_segments);
 86
 87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 88				   struct bio *nxt)
 
 
 
 
 89{
 90	if (!blk_queue_cluster(q))
 91		return 0;
 92
 93	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
 94	    queue_max_segment_size(q))
 95		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96
 97	if (!bio_has_data(bio))
 98		return 1;
 99
100	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
101		return 0;
102
103	/*
104	 * bio and nxt are contiguous in memory; check if the queue allows
105	 * these two to be merged into one
106	 */
107	if (BIO_SEG_BOUNDARY(q, bio, nxt))
108		return 1;
 
 
 
 
 
 
 
109
110	return 0;
 
 
 
 
 
 
 
 
 
111}
112
113/*
114 * map a request to scatterlist, return number of sg entries setup. Caller
115 * must make sure sg can hold rq->nr_phys_segments entries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116 */
117int blk_rq_map_sg(struct request_queue *q, struct request *rq,
118		  struct scatterlist *sglist)
119{
120	struct bio_vec *bvec, *bvprv;
121	struct req_iterator iter;
122	struct scatterlist *sg;
123	int nsegs, cluster;
124
125	nsegs = 0;
126	cluster = blk_queue_cluster(q);
127
128	/*
129	 * for each bio in rq
 
130	 */
131	bvprv = NULL;
132	sg = NULL;
133	rq_for_each_segment(bvec, rq, iter) {
134		int nbytes = bvec->bv_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
136		if (bvprv && cluster) {
137			if (sg->length + nbytes > queue_max_segment_size(q))
138				goto new_segment;
139
140			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
141				goto new_segment;
142			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
143				goto new_segment;
144
145			sg->length += nbytes;
146		} else {
147new_segment:
148			if (!sg)
149				sg = sglist;
150			else {
151				/*
152				 * If the driver previously mapped a shorter
153				 * list, we could see a termination bit
154				 * prematurely unless it fully inits the sg
155				 * table on each mapping. We KNOW that there
156				 * must be more entries here or the driver
157				 * would be buggy, so force clear the
158				 * termination bit to avoid doing a full
159				 * sg_init_table() in drivers for each command.
160				 */
161				sg->page_link &= ~0x02;
162				sg = sg_next(sg);
163			}
164
165			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166			nsegs++;
 
 
 
 
 
167		}
168		bvprv = bvec;
169	} /* segments in rq */
170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
172	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
173	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
174		unsigned int pad_len =
175			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
 
 
 
 
 
 
 
 
 
 
 
176
177		sg->length += pad_len;
178		rq->extra_len += pad_len;
 
 
 
 
 
 
 
179	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
181	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
182		if (rq->cmd_flags & REQ_WRITE)
183			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
185		sg->page_link &= ~0x02;
186		sg = sg_next(sg);
187		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
188			    q->dma_drain_size,
189			    ((unsigned long)q->dma_drain_buffer) &
190			    (PAGE_SIZE - 1));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191		nsegs++;
192		rq->extra_len += q->dma_drain_size;
193	}
194
195	if (sg)
196		sg_mark_end(sg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
198	return nsegs;
199}
200EXPORT_SYMBOL(blk_rq_map_sg);
201
202static inline int ll_new_hw_segment(struct request_queue *q,
203				    struct request *req,
204				    struct bio *bio)
 
 
 
205{
206	int nr_phys_segs = bio_phys_segments(q, bio);
207
208	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209		goto no_merge;
210
211	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
 
 
 
 
212		goto no_merge;
213
214	/*
215	 * This will form the start of a new hw segment.  Bump both
216	 * counters.
217	 */
218	req->nr_phys_segments += nr_phys_segs;
219	return 1;
220
221no_merge:
222	req->cmd_flags |= REQ_NOMERGE;
223	if (req == q->last_merge)
224		q->last_merge = NULL;
225	return 0;
226}
227
228int ll_back_merge_fn(struct request_queue *q, struct request *req,
229		     struct bio *bio)
230{
231	unsigned short max_sectors;
 
 
 
 
 
 
 
 
 
 
 
232
233	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
234		max_sectors = queue_max_hw_sectors(q);
235	else
236		max_sectors = queue_max_sectors(q);
237
238	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
239		req->cmd_flags |= REQ_NOMERGE;
240		if (req == q->last_merge)
241			q->last_merge = NULL;
 
 
 
 
 
 
 
 
 
242		return 0;
243	}
244	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
245		blk_recount_segments(q, req->biotail);
246	if (!bio_flagged(bio, BIO_SEG_VALID))
247		blk_recount_segments(q, bio);
248
249	return ll_new_hw_segment(q, req, bio);
250}
251
252int ll_front_merge_fn(struct request_queue *q, struct request *req,
253		      struct bio *bio)
254{
255	unsigned short max_sectors;
256
257	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
258		max_sectors = queue_max_hw_sectors(q);
259	else
260		max_sectors = queue_max_sectors(q);
 
261
262
263	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
264		req->cmd_flags |= REQ_NOMERGE;
265		if (req == q->last_merge)
266			q->last_merge = NULL;
267		return 0;
268	}
269	if (!bio_flagged(bio, BIO_SEG_VALID))
270		blk_recount_segments(q, bio);
271	if (!bio_flagged(req->bio, BIO_SEG_VALID))
272		blk_recount_segments(q, req->bio);
273
274	return ll_new_hw_segment(q, req, bio);
275}
276
277static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
278				struct request *next)
279{
280	int total_phys_segments;
281	unsigned int seg_size =
282		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
283
284	/*
285	 * First check if the either of the requests are re-queued
286	 * requests.  Can't merge them if they are.
287	 */
288	if (req->special || next->special)
289		return 0;
290
291	/*
292	 * Will it become too large?
293	 */
294	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
 
295		return 0;
296
297	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
298	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
299		if (req->nr_phys_segments == 1)
300			req->bio->bi_seg_front_size = seg_size;
301		if (next->nr_phys_segments == 1)
302			next->biotail->bi_seg_back_size = seg_size;
303		total_phys_segments--;
304	}
305
306	if (total_phys_segments > queue_max_segments(q))
307		return 0;
308
309	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
310		return 0;
311
312	/* Merge is OK... */
313	req->nr_phys_segments = total_phys_segments;
314	return 1;
315}
316
317/**
318 * blk_rq_set_mixed_merge - mark a request as mixed merge
319 * @rq: request to mark as mixed merge
320 *
321 * Description:
322 *     @rq is about to be mixed merged.  Make sure the attributes
323 *     which can be mixed are set in each bio and mark @rq as mixed
324 *     merged.
325 */
326void blk_rq_set_mixed_merge(struct request *rq)
327{
328	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
329	struct bio *bio;
330
331	if (rq->cmd_flags & REQ_MIXED_MERGE)
332		return;
333
334	/*
335	 * @rq will no longer represent mixable attributes for all the
336	 * contained bios.  It will just track those of the first one.
337	 * Distributes the attributs to each bio.
338	 */
339	for (bio = rq->bio; bio; bio = bio->bi_next) {
340		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
341			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
342		bio->bi_rw |= ff;
343	}
344	rq->cmd_flags |= REQ_MIXED_MERGE;
345}
346
347static void blk_account_io_merge(struct request *req)
348{
349	if (blk_do_io_stat(req)) {
350		struct hd_struct *part;
351		int cpu;
 
 
 
352
353		cpu = part_stat_lock();
354		part = req->part;
 
 
 
 
 
355
356		part_round_stats(cpu, part);
357		part_dec_in_flight(part, rq_data_dir(req));
358
359		hd_struct_put(part);
360		part_stat_unlock();
361	}
362}
363
364/*
365 * Has to be called with the request spinlock acquired
 
366 */
367static int attempt_merge(struct request_queue *q, struct request *req,
368			  struct request *next)
369{
370	if (!rq_mergeable(req) || !rq_mergeable(next))
371		return 0;
372
373	/*
374	 * Don't merge file system requests and discard requests
375	 */
376	if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
377		return 0;
378
379	/*
380	 * Don't merge discard requests and secure discard requests
381	 */
382	if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
383		return 0;
384
385	/*
386	 * not contiguous
387	 */
388	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
389		return 0;
390
391	if (rq_data_dir(req) != rq_data_dir(next)
392	    || req->rq_disk != next->rq_disk
393	    || next->special)
394		return 0;
395
396	/*
397	 * If we are allowed to merge, then append bio list
398	 * from next to rq and release next. merge_requests_fn
399	 * will have updated segment counts, update sector
400	 * counts here.
 
401	 */
402	if (!ll_merge_requests_fn(q, req, next))
403		return 0;
 
 
 
 
 
 
 
 
 
 
 
404
405	/*
406	 * If failfast settings disagree or any of the two is already
407	 * a mixed merge, mark both as mixed before proceeding.  This
408	 * makes sure that all involved bios have mixable attributes
409	 * set properly.
410	 */
411	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
412	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
413	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
414		blk_rq_set_mixed_merge(req);
415		blk_rq_set_mixed_merge(next);
416	}
417
418	/*
419	 * At this point we have either done a back merge
420	 * or front merge. We need the smaller start_time of
421	 * the merged requests to be the current request
422	 * for accounting purposes.
423	 */
424	if (time_after(req->start_time, next->start_time))
425		req->start_time = next->start_time;
426
427	req->biotail->bi_next = next->bio;
428	req->biotail = next->biotail;
429
430	req->__data_len += blk_rq_bytes(next);
431
432	elv_merge_requests(q, req, next);
 
433
434	/*
435	 * 'next' is going away, so update stats accordingly
436	 */
437	blk_account_io_merge(next);
438
439	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
440	if (blk_rq_cpu_valid(next))
441		req->cpu = next->cpu;
442
443	/* owner-ship of bio passed from next to req */
 
 
 
444	next->bio = NULL;
445	__blk_put_request(q, next);
446	return 1;
447}
448
449int attempt_back_merge(struct request_queue *q, struct request *rq)
 
450{
451	struct request *next = elv_latter_request(q, rq);
452
453	if (next)
454		return attempt_merge(q, rq, next);
455
456	return 0;
457}
458
459int attempt_front_merge(struct request_queue *q, struct request *rq)
 
460{
461	struct request *prev = elv_former_request(q, rq);
462
463	if (prev)
464		return attempt_merge(q, prev, rq);
465
466	return 0;
467}
468
469int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
470			  struct request *next)
 
 
 
 
 
471{
472	return attempt_merge(q, rq, next);
473}
474
475bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
476{
477	if (!rq_mergeable(rq))
478		return false;
479
480	/* don't merge file system requests and discard requests */
481	if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
482		return false;
483
484	/* don't merge discard requests and secure discard requests */
485	if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
486		return false;
487
488	/* different data direction or already started, don't merge */
489	if (bio_data_dir(bio) != rq_data_dir(rq))
490		return false;
491
492	/* must be same device and not a special request */
493	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
494		return false;
495
496	/* only merge integrity protected bio into ditto rq */
497	if (bio_integrity(bio) != blk_integrity_rq(rq))
 
 
 
 
 
 
 
498		return false;
499
500	return true;
501}
502
503int blk_try_merge(struct request *rq, struct bio *bio)
504{
505	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
 
 
506		return ELEVATOR_BACK_MERGE;
507	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
508		return ELEVATOR_FRONT_MERGE;
509	return ELEVATOR_NO_MERGE;
510}