Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
   4 */
   5#include <linux/mm.h>
   6#include <linux/swap.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/uio.h>
  10#include <linux/iocontext.h>
  11#include <linux/slab.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/export.h>
  15#include <linux/mempool.h>
  16#include <linux/workqueue.h>
  17#include <linux/cgroup.h>
  18#include <linux/blk-cgroup.h>
  19#include <linux/highmem.h>
  20
  21#include <trace/events/block.h>
  22#include "blk.h"
  23#include "blk-rq-qos.h"
  24
  25/*
  26 * Test patch to inline a certain number of bi_io_vec's inside the bio
  27 * itself, to shrink a bio data allocation from two mempool calls to one
  28 */
  29#define BIO_INLINE_VECS		4
  30
  31/*
  32 * if you change this list, also change bvec_alloc or things will
  33 * break badly! cannot be bigger than what you can fit into an
  34 * unsigned short
  35 */
  36#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
  37static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
  38	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
  39};
  40#undef BV
  41
  42/*
  43 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
  44 * IO code that does not need private memory pools.
  45 */
  46struct bio_set fs_bio_set;
  47EXPORT_SYMBOL(fs_bio_set);
  48
  49/*
  50 * Our slab pool management
  51 */
  52struct bio_slab {
  53	struct kmem_cache *slab;
  54	unsigned int slab_ref;
  55	unsigned int slab_size;
  56	char name[8];
  57};
  58static DEFINE_MUTEX(bio_slab_lock);
  59static struct bio_slab *bio_slabs;
  60static unsigned int bio_slab_nr, bio_slab_max;
  61
  62static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
  63{
  64	unsigned int sz = sizeof(struct bio) + extra_size;
  65	struct kmem_cache *slab = NULL;
  66	struct bio_slab *bslab, *new_bio_slabs;
  67	unsigned int new_bio_slab_max;
  68	unsigned int i, entry = -1;
  69
  70	mutex_lock(&bio_slab_lock);
  71
  72	i = 0;
  73	while (i < bio_slab_nr) {
  74		bslab = &bio_slabs[i];
  75
  76		if (!bslab->slab && entry == -1)
  77			entry = i;
  78		else if (bslab->slab_size == sz) {
  79			slab = bslab->slab;
  80			bslab->slab_ref++;
  81			break;
  82		}
  83		i++;
  84	}
  85
  86	if (slab)
  87		goto out_unlock;
  88
  89	if (bio_slab_nr == bio_slab_max && entry == -1) {
  90		new_bio_slab_max = bio_slab_max << 1;
  91		new_bio_slabs = krealloc(bio_slabs,
  92					 new_bio_slab_max * sizeof(struct bio_slab),
  93					 GFP_KERNEL);
  94		if (!new_bio_slabs)
  95			goto out_unlock;
  96		bio_slab_max = new_bio_slab_max;
  97		bio_slabs = new_bio_slabs;
  98	}
  99	if (entry == -1)
 100		entry = bio_slab_nr++;
 101
 102	bslab = &bio_slabs[entry];
 103
 104	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
 105	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
 106				 SLAB_HWCACHE_ALIGN, NULL);
 107	if (!slab)
 108		goto out_unlock;
 109
 110	bslab->slab = slab;
 111	bslab->slab_ref = 1;
 112	bslab->slab_size = sz;
 113out_unlock:
 114	mutex_unlock(&bio_slab_lock);
 115	return slab;
 116}
 117
 118static void bio_put_slab(struct bio_set *bs)
 119{
 120	struct bio_slab *bslab = NULL;
 121	unsigned int i;
 122
 123	mutex_lock(&bio_slab_lock);
 124
 125	for (i = 0; i < bio_slab_nr; i++) {
 126		if (bs->bio_slab == bio_slabs[i].slab) {
 127			bslab = &bio_slabs[i];
 128			break;
 129		}
 130	}
 131
 132	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
 133		goto out;
 134
 135	WARN_ON(!bslab->slab_ref);
 136
 137	if (--bslab->slab_ref)
 138		goto out;
 139
 140	kmem_cache_destroy(bslab->slab);
 141	bslab->slab = NULL;
 142
 143out:
 144	mutex_unlock(&bio_slab_lock);
 145}
 146
 147unsigned int bvec_nr_vecs(unsigned short idx)
 148{
 149	return bvec_slabs[--idx].nr_vecs;
 150}
 151
 152void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
 153{
 154	if (!idx)
 155		return;
 156	idx--;
 157
 158	BIO_BUG_ON(idx >= BVEC_POOL_NR);
 159
 160	if (idx == BVEC_POOL_MAX) {
 161		mempool_free(bv, pool);
 162	} else {
 163		struct biovec_slab *bvs = bvec_slabs + idx;
 164
 165		kmem_cache_free(bvs->slab, bv);
 166	}
 167}
 168
 169struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
 170			   mempool_t *pool)
 171{
 172	struct bio_vec *bvl;
 173
 174	/*
 175	 * see comment near bvec_array define!
 176	 */
 177	switch (nr) {
 178	case 1:
 179		*idx = 0;
 180		break;
 181	case 2 ... 4:
 182		*idx = 1;
 183		break;
 184	case 5 ... 16:
 185		*idx = 2;
 186		break;
 187	case 17 ... 64:
 188		*idx = 3;
 189		break;
 190	case 65 ... 128:
 191		*idx = 4;
 192		break;
 193	case 129 ... BIO_MAX_PAGES:
 194		*idx = 5;
 195		break;
 196	default:
 197		return NULL;
 198	}
 199
 200	/*
 201	 * idx now points to the pool we want to allocate from. only the
 202	 * 1-vec entry pool is mempool backed.
 203	 */
 204	if (*idx == BVEC_POOL_MAX) {
 205fallback:
 206		bvl = mempool_alloc(pool, gfp_mask);
 207	} else {
 208		struct biovec_slab *bvs = bvec_slabs + *idx;
 209		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
 210
 211		/*
 212		 * Make this allocation restricted and don't dump info on
 213		 * allocation failures, since we'll fallback to the mempool
 214		 * in case of failure.
 215		 */
 216		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 217
 218		/*
 219		 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
 220		 * is set, retry with the 1-entry mempool
 221		 */
 222		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
 223		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
 224			*idx = BVEC_POOL_MAX;
 225			goto fallback;
 226		}
 227	}
 228
 229	(*idx)++;
 230	return bvl;
 231}
 232
 233void bio_uninit(struct bio *bio)
 234{
 235	bio_disassociate_blkg(bio);
 236}
 237EXPORT_SYMBOL(bio_uninit);
 238
 239static void bio_free(struct bio *bio)
 240{
 241	struct bio_set *bs = bio->bi_pool;
 242	void *p;
 243
 244	bio_uninit(bio);
 245
 246	if (bs) {
 247		bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
 248
 249		/*
 250		 * If we have front padding, adjust the bio pointer before freeing
 251		 */
 252		p = bio;
 253		p -= bs->front_pad;
 254
 255		mempool_free(p, &bs->bio_pool);
 256	} else {
 257		/* Bio was allocated by bio_kmalloc() */
 258		kfree(bio);
 259	}
 260}
 261
 262/*
 263 * Users of this function have their own bio allocation. Subsequently,
 264 * they must remember to pair any call to bio_init() with bio_uninit()
 265 * when IO has completed, or when the bio is released.
 266 */
 267void bio_init(struct bio *bio, struct bio_vec *table,
 268	      unsigned short max_vecs)
 269{
 270	memset(bio, 0, sizeof(*bio));
 271	atomic_set(&bio->__bi_remaining, 1);
 272	atomic_set(&bio->__bi_cnt, 1);
 273
 274	bio->bi_io_vec = table;
 275	bio->bi_max_vecs = max_vecs;
 276}
 277EXPORT_SYMBOL(bio_init);
 278
 279/**
 280 * bio_reset - reinitialize a bio
 281 * @bio:	bio to reset
 282 *
 283 * Description:
 284 *   After calling bio_reset(), @bio will be in the same state as a freshly
 285 *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
 286 *   preserved are the ones that are initialized by bio_alloc_bioset(). See
 287 *   comment in struct bio.
 288 */
 289void bio_reset(struct bio *bio)
 290{
 291	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
 292
 293	bio_uninit(bio);
 294
 295	memset(bio, 0, BIO_RESET_BYTES);
 296	bio->bi_flags = flags;
 297	atomic_set(&bio->__bi_remaining, 1);
 298}
 299EXPORT_SYMBOL(bio_reset);
 300
 301static struct bio *__bio_chain_endio(struct bio *bio)
 302{
 303	struct bio *parent = bio->bi_private;
 304
 305	if (!parent->bi_status)
 306		parent->bi_status = bio->bi_status;
 307	bio_put(bio);
 308	return parent;
 309}
 310
 311static void bio_chain_endio(struct bio *bio)
 312{
 313	bio_endio(__bio_chain_endio(bio));
 314}
 315
 316/**
 317 * bio_chain - chain bio completions
 318 * @bio: the target bio
 319 * @parent: the @bio's parent bio
 320 *
 321 * The caller won't have a bi_end_io called when @bio completes - instead,
 322 * @parent's bi_end_io won't be called until both @parent and @bio have
 323 * completed; the chained bio will also be freed when it completes.
 324 *
 325 * The caller must not set bi_private or bi_end_io in @bio.
 326 */
 327void bio_chain(struct bio *bio, struct bio *parent)
 328{
 329	BUG_ON(bio->bi_private || bio->bi_end_io);
 330
 331	bio->bi_private = parent;
 332	bio->bi_end_io	= bio_chain_endio;
 333	bio_inc_remaining(parent);
 334}
 335EXPORT_SYMBOL(bio_chain);
 336
 337static void bio_alloc_rescue(struct work_struct *work)
 338{
 339	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
 340	struct bio *bio;
 341
 342	while (1) {
 343		spin_lock(&bs->rescue_lock);
 344		bio = bio_list_pop(&bs->rescue_list);
 345		spin_unlock(&bs->rescue_lock);
 346
 347		if (!bio)
 348			break;
 349
 350		generic_make_request(bio);
 351	}
 352}
 353
 354static void punt_bios_to_rescuer(struct bio_set *bs)
 355{
 356	struct bio_list punt, nopunt;
 357	struct bio *bio;
 358
 359	if (WARN_ON_ONCE(!bs->rescue_workqueue))
 360		return;
 361	/*
 362	 * In order to guarantee forward progress we must punt only bios that
 363	 * were allocated from this bio_set; otherwise, if there was a bio on
 364	 * there for a stacking driver higher up in the stack, processing it
 365	 * could require allocating bios from this bio_set, and doing that from
 366	 * our own rescuer would be bad.
 367	 *
 368	 * Since bio lists are singly linked, pop them all instead of trying to
 369	 * remove from the middle of the list:
 370	 */
 371
 372	bio_list_init(&punt);
 373	bio_list_init(&nopunt);
 374
 375	while ((bio = bio_list_pop(&current->bio_list[0])))
 376		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 377	current->bio_list[0] = nopunt;
 378
 379	bio_list_init(&nopunt);
 380	while ((bio = bio_list_pop(&current->bio_list[1])))
 381		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 382	current->bio_list[1] = nopunt;
 383
 384	spin_lock(&bs->rescue_lock);
 385	bio_list_merge(&bs->rescue_list, &punt);
 386	spin_unlock(&bs->rescue_lock);
 387
 388	queue_work(bs->rescue_workqueue, &bs->rescue_work);
 389}
 390
 391/**
 392 * bio_alloc_bioset - allocate a bio for I/O
 393 * @gfp_mask:   the GFP_* mask given to the slab allocator
 394 * @nr_iovecs:	number of iovecs to pre-allocate
 395 * @bs:		the bio_set to allocate from.
 396 *
 397 * Description:
 398 *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
 399 *   backed by the @bs's mempool.
 400 *
 401 *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
 402 *   always be able to allocate a bio. This is due to the mempool guarantees.
 403 *   To make this work, callers must never allocate more than 1 bio at a time
 404 *   from this pool. Callers that need to allocate more than 1 bio must always
 405 *   submit the previously allocated bio for IO before attempting to allocate
 406 *   a new one. Failure to do so can cause deadlocks under memory pressure.
 407 *
 408 *   Note that when running under generic_make_request() (i.e. any block
 409 *   driver), bios are not submitted until after you return - see the code in
 410 *   generic_make_request() that converts recursion into iteration, to prevent
 411 *   stack overflows.
 412 *
 413 *   This would normally mean allocating multiple bios under
 414 *   generic_make_request() would be susceptible to deadlocks, but we have
 415 *   deadlock avoidance code that resubmits any blocked bios from a rescuer
 416 *   thread.
 417 *
 418 *   However, we do not guarantee forward progress for allocations from other
 419 *   mempools. Doing multiple allocations from the same mempool under
 420 *   generic_make_request() should be avoided - instead, use bio_set's front_pad
 421 *   for per bio allocations.
 422 *
 423 *   RETURNS:
 424 *   Pointer to new bio on success, NULL on failure.
 425 */
 426struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
 427			     struct bio_set *bs)
 428{
 429	gfp_t saved_gfp = gfp_mask;
 430	unsigned front_pad;
 431	unsigned inline_vecs;
 432	struct bio_vec *bvl = NULL;
 433	struct bio *bio;
 434	void *p;
 435
 436	if (!bs) {
 437		if (nr_iovecs > UIO_MAXIOV)
 438			return NULL;
 439
 440		p = kmalloc(sizeof(struct bio) +
 441			    nr_iovecs * sizeof(struct bio_vec),
 442			    gfp_mask);
 443		front_pad = 0;
 444		inline_vecs = nr_iovecs;
 445	} else {
 446		/* should not use nobvec bioset for nr_iovecs > 0 */
 447		if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
 448				 nr_iovecs > 0))
 449			return NULL;
 450		/*
 451		 * generic_make_request() converts recursion to iteration; this
 452		 * means if we're running beneath it, any bios we allocate and
 453		 * submit will not be submitted (and thus freed) until after we
 454		 * return.
 455		 *
 456		 * This exposes us to a potential deadlock if we allocate
 457		 * multiple bios from the same bio_set() while running
 458		 * underneath generic_make_request(). If we were to allocate
 459		 * multiple bios (say a stacking block driver that was splitting
 460		 * bios), we would deadlock if we exhausted the mempool's
 461		 * reserve.
 462		 *
 463		 * We solve this, and guarantee forward progress, with a rescuer
 464		 * workqueue per bio_set. If we go to allocate and there are
 465		 * bios on current->bio_list, we first try the allocation
 466		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
 467		 * bios we would be blocking to the rescuer workqueue before
 468		 * we retry with the original gfp_flags.
 469		 */
 470
 471		if (current->bio_list &&
 472		    (!bio_list_empty(&current->bio_list[0]) ||
 473		     !bio_list_empty(&current->bio_list[1])) &&
 474		    bs->rescue_workqueue)
 475			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 476
 477		p = mempool_alloc(&bs->bio_pool, gfp_mask);
 478		if (!p && gfp_mask != saved_gfp) {
 479			punt_bios_to_rescuer(bs);
 480			gfp_mask = saved_gfp;
 481			p = mempool_alloc(&bs->bio_pool, gfp_mask);
 482		}
 483
 484		front_pad = bs->front_pad;
 485		inline_vecs = BIO_INLINE_VECS;
 486	}
 487
 488	if (unlikely(!p))
 489		return NULL;
 490
 491	bio = p + front_pad;
 492	bio_init(bio, NULL, 0);
 493
 494	if (nr_iovecs > inline_vecs) {
 495		unsigned long idx = 0;
 496
 497		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
 498		if (!bvl && gfp_mask != saved_gfp) {
 499			punt_bios_to_rescuer(bs);
 500			gfp_mask = saved_gfp;
 501			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
 502		}
 503
 504		if (unlikely(!bvl))
 505			goto err_free;
 506
 507		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
 508	} else if (nr_iovecs) {
 509		bvl = bio->bi_inline_vecs;
 510	}
 511
 512	bio->bi_pool = bs;
 513	bio->bi_max_vecs = nr_iovecs;
 514	bio->bi_io_vec = bvl;
 515	return bio;
 516
 517err_free:
 518	mempool_free(p, &bs->bio_pool);
 519	return NULL;
 520}
 521EXPORT_SYMBOL(bio_alloc_bioset);
 522
 523void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
 524{
 525	unsigned long flags;
 526	struct bio_vec bv;
 527	struct bvec_iter iter;
 528
 529	__bio_for_each_segment(bv, bio, iter, start) {
 530		char *data = bvec_kmap_irq(&bv, &flags);
 531		memset(data, 0, bv.bv_len);
 532		flush_dcache_page(bv.bv_page);
 533		bvec_kunmap_irq(data, &flags);
 534	}
 535}
 536EXPORT_SYMBOL(zero_fill_bio_iter);
 537
 538/**
 539 * bio_put - release a reference to a bio
 540 * @bio:   bio to release reference to
 541 *
 542 * Description:
 543 *   Put a reference to a &struct bio, either one you have gotten with
 544 *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
 545 **/
 546void bio_put(struct bio *bio)
 547{
 548	if (!bio_flagged(bio, BIO_REFFED))
 549		bio_free(bio);
 550	else {
 551		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
 552
 553		/*
 554		 * last put frees it
 555		 */
 556		if (atomic_dec_and_test(&bio->__bi_cnt))
 557			bio_free(bio);
 558	}
 559}
 560EXPORT_SYMBOL(bio_put);
 561
 562/**
 563 * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
 564 * 	@bio: destination bio
 565 * 	@bio_src: bio to clone
 566 *
 567 *	Clone a &bio. Caller will own the returned bio, but not
 568 *	the actual data it points to. Reference count of returned
 569 * 	bio will be one.
 570 *
 571 * 	Caller must ensure that @bio_src is not freed before @bio.
 572 */
 573void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 574{
 575	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
 576
 577	/*
 578	 * most users will be overriding ->bi_disk with a new target,
 579	 * so we don't set nor calculate new physical/hw segment counts here
 580	 */
 581	bio->bi_disk = bio_src->bi_disk;
 582	bio->bi_partno = bio_src->bi_partno;
 583	bio_set_flag(bio, BIO_CLONED);
 584	if (bio_flagged(bio_src, BIO_THROTTLED))
 585		bio_set_flag(bio, BIO_THROTTLED);
 586	bio->bi_opf = bio_src->bi_opf;
 587	bio->bi_ioprio = bio_src->bi_ioprio;
 588	bio->bi_write_hint = bio_src->bi_write_hint;
 589	bio->bi_iter = bio_src->bi_iter;
 590	bio->bi_io_vec = bio_src->bi_io_vec;
 591
 592	bio_clone_blkg_association(bio, bio_src);
 593	blkcg_bio_issue_init(bio);
 594}
 595EXPORT_SYMBOL(__bio_clone_fast);
 596
 597/**
 598 *	bio_clone_fast - clone a bio that shares the original bio's biovec
 599 *	@bio: bio to clone
 600 *	@gfp_mask: allocation priority
 601 *	@bs: bio_set to allocate from
 602 *
 603 * 	Like __bio_clone_fast, only also allocates the returned bio
 604 */
 605struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
 606{
 607	struct bio *b;
 608
 609	b = bio_alloc_bioset(gfp_mask, 0, bs);
 610	if (!b)
 611		return NULL;
 612
 613	__bio_clone_fast(b, bio);
 614
 615	if (bio_integrity(bio)) {
 616		int ret;
 617
 618		ret = bio_integrity_clone(b, bio, gfp_mask);
 619
 620		if (ret < 0) {
 621			bio_put(b);
 622			return NULL;
 623		}
 624	}
 625
 626	return b;
 627}
 628EXPORT_SYMBOL(bio_clone_fast);
 629
 630static inline bool page_is_mergeable(const struct bio_vec *bv,
 631		struct page *page, unsigned int len, unsigned int off,
 632		bool *same_page)
 633{
 634	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
 635		bv->bv_offset + bv->bv_len - 1;
 636	phys_addr_t page_addr = page_to_phys(page);
 637
 638	if (vec_end_addr + 1 != page_addr + off)
 639		return false;
 640	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
 641		return false;
 642
 643	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
 644	if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
 645		return false;
 646	return true;
 647}
 648
 649static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
 650		struct page *page, unsigned len, unsigned offset,
 651		bool *same_page)
 652{
 653	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 654	unsigned long mask = queue_segment_boundary(q);
 655	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
 656	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
 657
 658	if ((addr1 | mask) != (addr2 | mask))
 659		return false;
 660	if (bv->bv_len + len > queue_max_segment_size(q))
 661		return false;
 662	return __bio_try_merge_page(bio, page, len, offset, same_page);
 663}
 664
 665/**
 666 *	__bio_add_pc_page	- attempt to add page to passthrough bio
 667 *	@q: the target queue
 668 *	@bio: destination bio
 669 *	@page: page to add
 670 *	@len: vec entry length
 671 *	@offset: vec entry offset
 672 *	@same_page: return if the merge happen inside the same page
 673 *
 674 *	Attempt to add a page to the bio_vec maplist. This can fail for a
 675 *	number of reasons, such as the bio being full or target block device
 676 *	limitations. The target block device must allow bio's up to PAGE_SIZE,
 677 *	so it is always possible to add a single page to an empty bio.
 678 *
 679 *	This should only be used by passthrough bios.
 680 */
 681static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
 682		struct page *page, unsigned int len, unsigned int offset,
 683		bool *same_page)
 684{
 685	struct bio_vec *bvec;
 686
 687	/*
 688	 * cloned bio must not modify vec list
 689	 */
 690	if (unlikely(bio_flagged(bio, BIO_CLONED)))
 691		return 0;
 692
 693	if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
 694		return 0;
 695
 696	if (bio->bi_vcnt > 0) {
 697		if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page))
 698			return len;
 699
 700		/*
 701		 * If the queue doesn't support SG gaps and adding this segment
 702		 * would create a gap, disallow it.
 703		 */
 704		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
 705		if (bvec_gap_to_prev(q, bvec, offset))
 706			return 0;
 707	}
 708
 709	if (bio_full(bio, len))
 710		return 0;
 711
 712	if (bio->bi_vcnt >= queue_max_segments(q))
 713		return 0;
 714
 715	bvec = &bio->bi_io_vec[bio->bi_vcnt];
 716	bvec->bv_page = page;
 717	bvec->bv_len = len;
 718	bvec->bv_offset = offset;
 719	bio->bi_vcnt++;
 720	bio->bi_iter.bi_size += len;
 721	return len;
 722}
 723
 724int bio_add_pc_page(struct request_queue *q, struct bio *bio,
 725		struct page *page, unsigned int len, unsigned int offset)
 726{
 727	bool same_page = false;
 728	return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
 729}
 730EXPORT_SYMBOL(bio_add_pc_page);
 731
 732/**
 733 * __bio_try_merge_page - try appending data to an existing bvec.
 734 * @bio: destination bio
 735 * @page: start page to add
 736 * @len: length of the data to add
 737 * @off: offset of the data relative to @page
 738 * @same_page: return if the segment has been merged inside the same page
 739 *
 740 * Try to add the data at @page + @off to the last bvec of @bio.  This is a
 741 * a useful optimisation for file systems with a block size smaller than the
 742 * page size.
 743 *
 744 * Warn if (@len, @off) crosses pages in case that @same_page is true.
 745 *
 746 * Return %true on success or %false on failure.
 747 */
 748bool __bio_try_merge_page(struct bio *bio, struct page *page,
 749		unsigned int len, unsigned int off, bool *same_page)
 750{
 751	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 752		return false;
 753
 754	if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
 755		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 756
 757		if (page_is_mergeable(bv, page, len, off, same_page)) {
 758			bv->bv_len += len;
 759			bio->bi_iter.bi_size += len;
 760			return true;
 761		}
 762	}
 763	return false;
 764}
 765EXPORT_SYMBOL_GPL(__bio_try_merge_page);
 766
 767/**
 768 * __bio_add_page - add page(s) to a bio in a new segment
 769 * @bio: destination bio
 770 * @page: start page to add
 771 * @len: length of the data to add, may cross pages
 772 * @off: offset of the data relative to @page, may cross pages
 773 *
 774 * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
 775 * that @bio has space for another bvec.
 776 */
 777void __bio_add_page(struct bio *bio, struct page *page,
 778		unsigned int len, unsigned int off)
 779{
 780	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
 781
 782	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
 783	WARN_ON_ONCE(bio_full(bio, len));
 784
 785	bv->bv_page = page;
 786	bv->bv_offset = off;
 787	bv->bv_len = len;
 788
 789	bio->bi_iter.bi_size += len;
 790	bio->bi_vcnt++;
 791
 792	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
 793		bio_set_flag(bio, BIO_WORKINGSET);
 794}
 795EXPORT_SYMBOL_GPL(__bio_add_page);
 796
 797/**
 798 *	bio_add_page	-	attempt to add page(s) to bio
 799 *	@bio: destination bio
 800 *	@page: start page to add
 801 *	@len: vec entry length, may cross pages
 802 *	@offset: vec entry offset relative to @page, may cross pages
 803 *
 804 *	Attempt to add page(s) to the bio_vec maplist. This will only fail
 805 *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
 806 */
 807int bio_add_page(struct bio *bio, struct page *page,
 808		 unsigned int len, unsigned int offset)
 809{
 810	bool same_page = false;
 811
 812	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
 813		if (bio_full(bio, len))
 814			return 0;
 815		__bio_add_page(bio, page, len, offset);
 816	}
 817	return len;
 818}
 819EXPORT_SYMBOL(bio_add_page);
 820
 821void bio_release_pages(struct bio *bio, bool mark_dirty)
 822{
 823	struct bvec_iter_all iter_all;
 824	struct bio_vec *bvec;
 825
 826	if (bio_flagged(bio, BIO_NO_PAGE_REF))
 827		return;
 828
 829	bio_for_each_segment_all(bvec, bio, iter_all) {
 830		if (mark_dirty && !PageCompound(bvec->bv_page))
 831			set_page_dirty_lock(bvec->bv_page);
 832		put_page(bvec->bv_page);
 833	}
 834}
 835
 836static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
 837{
 838	const struct bio_vec *bv = iter->bvec;
 839	unsigned int len;
 840	size_t size;
 841
 842	if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
 843		return -EINVAL;
 844
 845	len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
 846	size = bio_add_page(bio, bv->bv_page, len,
 847				bv->bv_offset + iter->iov_offset);
 848	if (unlikely(size != len))
 849		return -EINVAL;
 850	iov_iter_advance(iter, size);
 851	return 0;
 852}
 853
 854#define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
 855
 856/**
 857 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
 858 * @bio: bio to add pages to
 859 * @iter: iov iterator describing the region to be mapped
 860 *
 861 * Pins pages from *iter and appends them to @bio's bvec array. The
 862 * pages will have to be released using put_page() when done.
 863 * For multi-segment *iter, this function only adds pages from the
 864 * the next non-empty segment of the iov iterator.
 865 */
 866static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 867{
 868	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
 869	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
 870	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
 871	struct page **pages = (struct page **)bv;
 872	bool same_page = false;
 873	ssize_t size, left;
 874	unsigned len, i;
 875	size_t offset;
 876
 877	/*
 878	 * Move page array up in the allocated memory for the bio vecs as far as
 879	 * possible so that we can start filling biovecs from the beginning
 880	 * without overwriting the temporary page array.
 881	*/
 882	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
 883	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
 884
 885	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
 886	if (unlikely(size <= 0))
 887		return size ? size : -EFAULT;
 888
 889	for (left = size, i = 0; left > 0; left -= len, i++) {
 890		struct page *page = pages[i];
 891
 892		len = min_t(size_t, PAGE_SIZE - offset, left);
 893
 894		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
 895			if (same_page)
 896				put_page(page);
 897		} else {
 898			if (WARN_ON_ONCE(bio_full(bio, len)))
 899                                return -EINVAL;
 900			__bio_add_page(bio, page, len, offset);
 901		}
 902		offset = 0;
 903	}
 904
 905	iov_iter_advance(iter, size);
 906	return 0;
 907}
 908
 909/**
 910 * bio_iov_iter_get_pages - add user or kernel pages to a bio
 911 * @bio: bio to add pages to
 912 * @iter: iov iterator describing the region to be added
 913 *
 914 * This takes either an iterator pointing to user memory, or one pointing to
 915 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
 916 * map them into the kernel. On IO completion, the caller should put those
 917 * pages. If we're adding kernel pages, and the caller told us it's safe to
 918 * do so, we just have to add the pages to the bio directly. We don't grab an
 919 * extra reference to those pages (the user should already have that), and we
 920 * don't put the page on IO completion. The caller needs to check if the bio is
 921 * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
 922 * released.
 923 *
 924 * The function tries, but does not guarantee, to pin as many pages as
 925 * fit into the bio, or are requested in *iter, whatever is smaller. If
 926 * MM encounters an error pinning the requested pages, it stops. Error
 927 * is returned only if 0 pages could be pinned.
 928 */
 929int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 930{
 931	const bool is_bvec = iov_iter_is_bvec(iter);
 932	int ret;
 933
 934	if (WARN_ON_ONCE(bio->bi_vcnt))
 935		return -EINVAL;
 936
 937	do {
 938		if (is_bvec)
 939			ret = __bio_iov_bvec_add_pages(bio, iter);
 940		else
 941			ret = __bio_iov_iter_get_pages(bio, iter);
 942	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
 943
 944	if (is_bvec)
 945		bio_set_flag(bio, BIO_NO_PAGE_REF);
 946	return bio->bi_vcnt ? 0 : ret;
 947}
 948
 949static void submit_bio_wait_endio(struct bio *bio)
 950{
 951	complete(bio->bi_private);
 952}
 953
 954/**
 955 * submit_bio_wait - submit a bio, and wait until it completes
 956 * @bio: The &struct bio which describes the I/O
 957 *
 958 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
 959 * bio_endio() on failure.
 960 *
 961 * WARNING: Unlike to how submit_bio() is usually used, this function does not
 962 * result in bio reference to be consumed. The caller must drop the reference
 963 * on his own.
 964 */
 965int submit_bio_wait(struct bio *bio)
 966{
 967	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
 968
 969	bio->bi_private = &done;
 970	bio->bi_end_io = submit_bio_wait_endio;
 971	bio->bi_opf |= REQ_SYNC;
 972	submit_bio(bio);
 973	wait_for_completion_io(&done);
 974
 975	return blk_status_to_errno(bio->bi_status);
 976}
 977EXPORT_SYMBOL(submit_bio_wait);
 978
 979/**
 980 * bio_advance - increment/complete a bio by some number of bytes
 981 * @bio:	bio to advance
 982 * @bytes:	number of bytes to complete
 983 *
 984 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
 985 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
 986 * be updated on the last bvec as well.
 987 *
 988 * @bio will then represent the remaining, uncompleted portion of the io.
 989 */
 990void bio_advance(struct bio *bio, unsigned bytes)
 991{
 992	if (bio_integrity(bio))
 993		bio_integrity_advance(bio, bytes);
 994
 995	bio_advance_iter(bio, &bio->bi_iter, bytes);
 996}
 997EXPORT_SYMBOL(bio_advance);
 998
 999void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1000			struct bio *src, struct bvec_iter *src_iter)
1001{
1002	struct bio_vec src_bv, dst_bv;
1003	void *src_p, *dst_p;
1004	unsigned bytes;
1005
1006	while (src_iter->bi_size && dst_iter->bi_size) {
1007		src_bv = bio_iter_iovec(src, *src_iter);
1008		dst_bv = bio_iter_iovec(dst, *dst_iter);
1009
1010		bytes = min(src_bv.bv_len, dst_bv.bv_len);
1011
1012		src_p = kmap_atomic(src_bv.bv_page);
1013		dst_p = kmap_atomic(dst_bv.bv_page);
1014
1015		memcpy(dst_p + dst_bv.bv_offset,
1016		       src_p + src_bv.bv_offset,
1017		       bytes);
1018
1019		kunmap_atomic(dst_p);
1020		kunmap_atomic(src_p);
1021
1022		flush_dcache_page(dst_bv.bv_page);
1023
1024		bio_advance_iter(src, src_iter, bytes);
1025		bio_advance_iter(dst, dst_iter, bytes);
1026	}
1027}
1028EXPORT_SYMBOL(bio_copy_data_iter);
1029
1030/**
1031 * bio_copy_data - copy contents of data buffers from one bio to another
1032 * @src: source bio
1033 * @dst: destination bio
1034 *
1035 * Stops when it reaches the end of either @src or @dst - that is, copies
1036 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1037 */
1038void bio_copy_data(struct bio *dst, struct bio *src)
1039{
1040	struct bvec_iter src_iter = src->bi_iter;
1041	struct bvec_iter dst_iter = dst->bi_iter;
1042
1043	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1044}
1045EXPORT_SYMBOL(bio_copy_data);
1046
1047/**
1048 * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1049 * another
1050 * @src: source bio list
1051 * @dst: destination bio list
1052 *
1053 * Stops when it reaches the end of either the @src list or @dst list - that is,
1054 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1055 * bios).
1056 */
1057void bio_list_copy_data(struct bio *dst, struct bio *src)
1058{
1059	struct bvec_iter src_iter = src->bi_iter;
1060	struct bvec_iter dst_iter = dst->bi_iter;
1061
1062	while (1) {
1063		if (!src_iter.bi_size) {
1064			src = src->bi_next;
1065			if (!src)
1066				break;
1067
1068			src_iter = src->bi_iter;
1069		}
1070
1071		if (!dst_iter.bi_size) {
1072			dst = dst->bi_next;
1073			if (!dst)
1074				break;
1075
1076			dst_iter = dst->bi_iter;
1077		}
1078
1079		bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1080	}
1081}
1082EXPORT_SYMBOL(bio_list_copy_data);
1083
1084struct bio_map_data {
1085	int is_our_pages;
1086	struct iov_iter iter;
1087	struct iovec iov[];
1088};
1089
1090static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1091					       gfp_t gfp_mask)
1092{
1093	struct bio_map_data *bmd;
1094	if (data->nr_segs > UIO_MAXIOV)
1095		return NULL;
1096
1097	bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
1098	if (!bmd)
1099		return NULL;
1100	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
1101	bmd->iter = *data;
1102	bmd->iter.iov = bmd->iov;
1103	return bmd;
1104}
1105
1106/**
1107 * bio_copy_from_iter - copy all pages from iov_iter to bio
1108 * @bio: The &struct bio which describes the I/O as destination
1109 * @iter: iov_iter as source
1110 *
1111 * Copy all pages from iov_iter to bio.
1112 * Returns 0 on success, or error on failure.
1113 */
1114static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1115{
1116	struct bio_vec *bvec;
1117	struct bvec_iter_all iter_all;
1118
1119	bio_for_each_segment_all(bvec, bio, iter_all) {
1120		ssize_t ret;
1121
1122		ret = copy_page_from_iter(bvec->bv_page,
1123					  bvec->bv_offset,
1124					  bvec->bv_len,
1125					  iter);
1126
1127		if (!iov_iter_count(iter))
1128			break;
1129
1130		if (ret < bvec->bv_len)
1131			return -EFAULT;
1132	}
1133
1134	return 0;
1135}
1136
1137/**
1138 * bio_copy_to_iter - copy all pages from bio to iov_iter
1139 * @bio: The &struct bio which describes the I/O as source
1140 * @iter: iov_iter as destination
1141 *
1142 * Copy all pages from bio to iov_iter.
1143 * Returns 0 on success, or error on failure.
1144 */
1145static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1146{
1147	struct bio_vec *bvec;
1148	struct bvec_iter_all iter_all;
1149
1150	bio_for_each_segment_all(bvec, bio, iter_all) {
1151		ssize_t ret;
1152
1153		ret = copy_page_to_iter(bvec->bv_page,
1154					bvec->bv_offset,
1155					bvec->bv_len,
1156					&iter);
1157
1158		if (!iov_iter_count(&iter))
1159			break;
1160
1161		if (ret < bvec->bv_len)
1162			return -EFAULT;
1163	}
1164
1165	return 0;
1166}
1167
1168void bio_free_pages(struct bio *bio)
1169{
1170	struct bio_vec *bvec;
1171	struct bvec_iter_all iter_all;
1172
1173	bio_for_each_segment_all(bvec, bio, iter_all)
1174		__free_page(bvec->bv_page);
1175}
1176EXPORT_SYMBOL(bio_free_pages);
1177
1178/**
1179 *	bio_uncopy_user	-	finish previously mapped bio
1180 *	@bio: bio being terminated
1181 *
1182 *	Free pages allocated from bio_copy_user_iov() and write back data
1183 *	to user space in case of a read.
1184 */
1185int bio_uncopy_user(struct bio *bio)
1186{
1187	struct bio_map_data *bmd = bio->bi_private;
1188	int ret = 0;
1189
1190	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1191		/*
1192		 * if we're in a workqueue, the request is orphaned, so
1193		 * don't copy into a random user address space, just free
1194		 * and return -EINTR so user space doesn't expect any data.
1195		 */
1196		if (!current->mm)
1197			ret = -EINTR;
1198		else if (bio_data_dir(bio) == READ)
1199			ret = bio_copy_to_iter(bio, bmd->iter);
1200		if (bmd->is_our_pages)
1201			bio_free_pages(bio);
1202	}
1203	kfree(bmd);
1204	bio_put(bio);
1205	return ret;
1206}
1207
1208/**
1209 *	bio_copy_user_iov	-	copy user data to bio
1210 *	@q:		destination block queue
1211 *	@map_data:	pointer to the rq_map_data holding pages (if necessary)
1212 *	@iter:		iovec iterator
1213 *	@gfp_mask:	memory allocation flags
1214 *
1215 *	Prepares and returns a bio for indirect user io, bouncing data
1216 *	to/from kernel pages as necessary. Must be paired with
1217 *	call bio_uncopy_user() on io completion.
1218 */
1219struct bio *bio_copy_user_iov(struct request_queue *q,
1220			      struct rq_map_data *map_data,
1221			      struct iov_iter *iter,
1222			      gfp_t gfp_mask)
1223{
1224	struct bio_map_data *bmd;
1225	struct page *page;
1226	struct bio *bio;
1227	int i = 0, ret;
1228	int nr_pages;
1229	unsigned int len = iter->count;
1230	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1231
1232	bmd = bio_alloc_map_data(iter, gfp_mask);
1233	if (!bmd)
1234		return ERR_PTR(-ENOMEM);
1235
1236	/*
1237	 * We need to do a deep copy of the iov_iter including the iovecs.
1238	 * The caller provided iov might point to an on-stack or otherwise
1239	 * shortlived one.
1240	 */
1241	bmd->is_our_pages = map_data ? 0 : 1;
1242
1243	nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1244	if (nr_pages > BIO_MAX_PAGES)
1245		nr_pages = BIO_MAX_PAGES;
1246
1247	ret = -ENOMEM;
1248	bio = bio_kmalloc(gfp_mask, nr_pages);
1249	if (!bio)
1250		goto out_bmd;
1251
1252	ret = 0;
1253
1254	if (map_data) {
1255		nr_pages = 1 << map_data->page_order;
1256		i = map_data->offset / PAGE_SIZE;
1257	}
1258	while (len) {
1259		unsigned int bytes = PAGE_SIZE;
1260
1261		bytes -= offset;
1262
1263		if (bytes > len)
1264			bytes = len;
1265
1266		if (map_data) {
1267			if (i == map_data->nr_entries * nr_pages) {
1268				ret = -ENOMEM;
1269				break;
1270			}
1271
1272			page = map_data->pages[i / nr_pages];
1273			page += (i % nr_pages);
1274
1275			i++;
1276		} else {
1277			page = alloc_page(q->bounce_gfp | gfp_mask);
1278			if (!page) {
1279				ret = -ENOMEM;
1280				break;
1281			}
1282		}
1283
1284		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
1285			if (!map_data)
1286				__free_page(page);
1287			break;
1288		}
1289
1290		len -= bytes;
1291		offset = 0;
1292	}
1293
1294	if (ret)
1295		goto cleanup;
1296
1297	if (map_data)
1298		map_data->offset += bio->bi_iter.bi_size;
1299
1300	/*
1301	 * success
1302	 */
1303	if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
1304	    (map_data && map_data->from_user)) {
1305		ret = bio_copy_from_iter(bio, iter);
1306		if (ret)
1307			goto cleanup;
1308	} else {
1309		if (bmd->is_our_pages)
1310			zero_fill_bio(bio);
1311		iov_iter_advance(iter, bio->bi_iter.bi_size);
1312	}
1313
1314	bio->bi_private = bmd;
1315	if (map_data && map_data->null_mapped)
1316		bio_set_flag(bio, BIO_NULL_MAPPED);
1317	return bio;
1318cleanup:
1319	if (!map_data)
1320		bio_free_pages(bio);
1321	bio_put(bio);
1322out_bmd:
1323	kfree(bmd);
1324	return ERR_PTR(ret);
1325}
1326
1327/**
1328 *	bio_map_user_iov - map user iovec into bio
1329 *	@q:		the struct request_queue for the bio
1330 *	@iter:		iovec iterator
1331 *	@gfp_mask:	memory allocation flags
1332 *
1333 *	Map the user space address into a bio suitable for io to a block
1334 *	device. Returns an error pointer in case of error.
1335 */
1336struct bio *bio_map_user_iov(struct request_queue *q,
1337			     struct iov_iter *iter,
1338			     gfp_t gfp_mask)
1339{
1340	int j;
1341	struct bio *bio;
1342	int ret;
1343
1344	if (!iov_iter_count(iter))
1345		return ERR_PTR(-EINVAL);
1346
1347	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1348	if (!bio)
1349		return ERR_PTR(-ENOMEM);
1350
1351	while (iov_iter_count(iter)) {
1352		struct page **pages;
1353		ssize_t bytes;
1354		size_t offs, added = 0;
1355		int npages;
1356
1357		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1358		if (unlikely(bytes <= 0)) {
1359			ret = bytes ? bytes : -EFAULT;
1360			goto out_unmap;
1361		}
1362
1363		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1364
1365		if (unlikely(offs & queue_dma_alignment(q))) {
1366			ret = -EINVAL;
1367			j = 0;
1368		} else {
1369			for (j = 0; j < npages; j++) {
1370				struct page *page = pages[j];
1371				unsigned int n = PAGE_SIZE - offs;
1372				bool same_page = false;
1373
1374				if (n > bytes)
1375					n = bytes;
1376
1377				if (!__bio_add_pc_page(q, bio, page, n, offs,
1378						&same_page)) {
1379					if (same_page)
1380						put_page(page);
1381					break;
1382				}
1383
1384				added += n;
1385				bytes -= n;
1386				offs = 0;
1387			}
1388			iov_iter_advance(iter, added);
1389		}
1390		/*
1391		 * release the pages we didn't map into the bio, if any
1392		 */
1393		while (j < npages)
1394			put_page(pages[j++]);
1395		kvfree(pages);
1396		/* couldn't stuff something into bio? */
1397		if (bytes)
1398			break;
1399	}
1400
1401	bio_set_flag(bio, BIO_USER_MAPPED);
1402
1403	/*
1404	 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
1405	 * it would normally disappear when its bi_end_io is run.
1406	 * however, we need it for the unmap, so grab an extra
1407	 * reference to it
1408	 */
1409	bio_get(bio);
1410	return bio;
1411
1412 out_unmap:
1413	bio_release_pages(bio, false);
1414	bio_put(bio);
1415	return ERR_PTR(ret);
1416}
1417
1418/**
1419 *	bio_unmap_user	-	unmap a bio
1420 *	@bio:		the bio being unmapped
1421 *
1422 *	Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
1423 *	process context.
1424 *
1425 *	bio_unmap_user() may sleep.
1426 */
1427void bio_unmap_user(struct bio *bio)
1428{
1429	bio_release_pages(bio, bio_data_dir(bio) == READ);
1430	bio_put(bio);
1431	bio_put(bio);
1432}
1433
1434static void bio_invalidate_vmalloc_pages(struct bio *bio)
1435{
1436#ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
1437	if (bio->bi_private && !op_is_write(bio_op(bio))) {
1438		unsigned long i, len = 0;
1439
1440		for (i = 0; i < bio->bi_vcnt; i++)
1441			len += bio->bi_io_vec[i].bv_len;
1442		invalidate_kernel_vmap_range(bio->bi_private, len);
1443	}
1444#endif
1445}
1446
1447static void bio_map_kern_endio(struct bio *bio)
1448{
1449	bio_invalidate_vmalloc_pages(bio);
1450	bio_put(bio);
1451}
1452
1453/**
1454 *	bio_map_kern	-	map kernel address into bio
1455 *	@q: the struct request_queue for the bio
1456 *	@data: pointer to buffer to map
1457 *	@len: length in bytes
1458 *	@gfp_mask: allocation flags for bio allocation
1459 *
1460 *	Map the kernel address into a bio suitable for io to a block
1461 *	device. Returns an error pointer in case of error.
1462 */
1463struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1464			 gfp_t gfp_mask)
1465{
1466	unsigned long kaddr = (unsigned long)data;
1467	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1468	unsigned long start = kaddr >> PAGE_SHIFT;
1469	const int nr_pages = end - start;
1470	bool is_vmalloc = is_vmalloc_addr(data);
1471	struct page *page;
1472	int offset, i;
1473	struct bio *bio;
1474
1475	bio = bio_kmalloc(gfp_mask, nr_pages);
1476	if (!bio)
1477		return ERR_PTR(-ENOMEM);
1478
1479	if (is_vmalloc) {
1480		flush_kernel_vmap_range(data, len);
1481		bio->bi_private = data;
1482	}
1483
1484	offset = offset_in_page(kaddr);
1485	for (i = 0; i < nr_pages; i++) {
1486		unsigned int bytes = PAGE_SIZE - offset;
1487
1488		if (len <= 0)
1489			break;
1490
1491		if (bytes > len)
1492			bytes = len;
1493
1494		if (!is_vmalloc)
1495			page = virt_to_page(data);
1496		else
1497			page = vmalloc_to_page(data);
1498		if (bio_add_pc_page(q, bio, page, bytes,
1499				    offset) < bytes) {
1500			/* we don't support partial mappings */
1501			bio_put(bio);
1502			return ERR_PTR(-EINVAL);
1503		}
1504
1505		data += bytes;
1506		len -= bytes;
1507		offset = 0;
1508	}
1509
1510	bio->bi_end_io = bio_map_kern_endio;
1511	return bio;
1512}
1513
1514static void bio_copy_kern_endio(struct bio *bio)
1515{
1516	bio_free_pages(bio);
1517	bio_put(bio);
1518}
1519
1520static void bio_copy_kern_endio_read(struct bio *bio)
1521{
1522	char *p = bio->bi_private;
1523	struct bio_vec *bvec;
1524	struct bvec_iter_all iter_all;
1525
1526	bio_for_each_segment_all(bvec, bio, iter_all) {
1527		memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1528		p += bvec->bv_len;
1529	}
1530
1531	bio_copy_kern_endio(bio);
1532}
1533
1534/**
1535 *	bio_copy_kern	-	copy kernel address into bio
1536 *	@q: the struct request_queue for the bio
1537 *	@data: pointer to buffer to copy
1538 *	@len: length in bytes
1539 *	@gfp_mask: allocation flags for bio and page allocation
1540 *	@reading: data direction is READ
1541 *
1542 *	copy the kernel address into a bio suitable for io to a block
1543 *	device. Returns an error pointer in case of error.
1544 */
1545struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1546			  gfp_t gfp_mask, int reading)
1547{
1548	unsigned long kaddr = (unsigned long)data;
1549	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1550	unsigned long start = kaddr >> PAGE_SHIFT;
1551	struct bio *bio;
1552	void *p = data;
1553	int nr_pages = 0;
1554
1555	/*
1556	 * Overflow, abort
1557	 */
1558	if (end < start)
1559		return ERR_PTR(-EINVAL);
1560
1561	nr_pages = end - start;
1562	bio = bio_kmalloc(gfp_mask, nr_pages);
1563	if (!bio)
1564		return ERR_PTR(-ENOMEM);
1565
1566	while (len) {
1567		struct page *page;
1568		unsigned int bytes = PAGE_SIZE;
1569
1570		if (bytes > len)
1571			bytes = len;
1572
1573		page = alloc_page(q->bounce_gfp | gfp_mask);
1574		if (!page)
1575			goto cleanup;
1576
1577		if (!reading)
1578			memcpy(page_address(page), p, bytes);
1579
1580		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1581			break;
1582
1583		len -= bytes;
1584		p += bytes;
1585	}
1586
1587	if (reading) {
1588		bio->bi_end_io = bio_copy_kern_endio_read;
1589		bio->bi_private = data;
1590	} else {
1591		bio->bi_end_io = bio_copy_kern_endio;
1592	}
1593
1594	return bio;
1595
1596cleanup:
1597	bio_free_pages(bio);
1598	bio_put(bio);
1599	return ERR_PTR(-ENOMEM);
1600}
1601
1602/*
1603 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1604 * for performing direct-IO in BIOs.
1605 *
1606 * The problem is that we cannot run set_page_dirty() from interrupt context
1607 * because the required locks are not interrupt-safe.  So what we can do is to
1608 * mark the pages dirty _before_ performing IO.  And in interrupt context,
1609 * check that the pages are still dirty.   If so, fine.  If not, redirty them
1610 * in process context.
1611 *
1612 * We special-case compound pages here: normally this means reads into hugetlb
1613 * pages.  The logic in here doesn't really work right for compound pages
1614 * because the VM does not uniformly chase down the head page in all cases.
1615 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1616 * handle them at all.  So we skip compound pages here at an early stage.
1617 *
1618 * Note that this code is very hard to test under normal circumstances because
1619 * direct-io pins the pages with get_user_pages().  This makes
1620 * is_page_cache_freeable return false, and the VM will not clean the pages.
1621 * But other code (eg, flusher threads) could clean the pages if they are mapped
1622 * pagecache.
1623 *
1624 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1625 * deferred bio dirtying paths.
1626 */
1627
1628/*
1629 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1630 */
1631void bio_set_pages_dirty(struct bio *bio)
1632{
1633	struct bio_vec *bvec;
1634	struct bvec_iter_all iter_all;
1635
1636	bio_for_each_segment_all(bvec, bio, iter_all) {
1637		if (!PageCompound(bvec->bv_page))
1638			set_page_dirty_lock(bvec->bv_page);
1639	}
1640}
1641
1642/*
1643 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1644 * If they are, then fine.  If, however, some pages are clean then they must
1645 * have been written out during the direct-IO read.  So we take another ref on
1646 * the BIO and re-dirty the pages in process context.
1647 *
1648 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1649 * here on.  It will run one put_page() against each page and will run one
1650 * bio_put() against the BIO.
1651 */
1652
1653static void bio_dirty_fn(struct work_struct *work);
1654
1655static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1656static DEFINE_SPINLOCK(bio_dirty_lock);
1657static struct bio *bio_dirty_list;
1658
1659/*
1660 * This runs in process context
1661 */
1662static void bio_dirty_fn(struct work_struct *work)
1663{
1664	struct bio *bio, *next;
1665
1666	spin_lock_irq(&bio_dirty_lock);
1667	next = bio_dirty_list;
1668	bio_dirty_list = NULL;
1669	spin_unlock_irq(&bio_dirty_lock);
1670
1671	while ((bio = next) != NULL) {
1672		next = bio->bi_private;
1673
1674		bio_release_pages(bio, true);
1675		bio_put(bio);
1676	}
1677}
1678
1679void bio_check_pages_dirty(struct bio *bio)
1680{
1681	struct bio_vec *bvec;
1682	unsigned long flags;
1683	struct bvec_iter_all iter_all;
1684
1685	bio_for_each_segment_all(bvec, bio, iter_all) {
1686		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1687			goto defer;
1688	}
1689
1690	bio_release_pages(bio, false);
1691	bio_put(bio);
1692	return;
1693defer:
1694	spin_lock_irqsave(&bio_dirty_lock, flags);
1695	bio->bi_private = bio_dirty_list;
1696	bio_dirty_list = bio;
1697	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1698	schedule_work(&bio_dirty_work);
1699}
1700
1701void update_io_ticks(struct hd_struct *part, unsigned long now)
1702{
1703	unsigned long stamp;
1704again:
1705	stamp = READ_ONCE(part->stamp);
1706	if (unlikely(stamp != now)) {
1707		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
1708			__part_stat_add(part, io_ticks, 1);
1709		}
1710	}
1711	if (part->partno) {
1712		part = &part_to_disk(part)->part0;
1713		goto again;
1714	}
1715}
1716
1717void generic_start_io_acct(struct request_queue *q, int op,
1718			   unsigned long sectors, struct hd_struct *part)
1719{
1720	const int sgrp = op_stat_group(op);
1721
1722	part_stat_lock();
1723
1724	update_io_ticks(part, jiffies);
1725	part_stat_inc(part, ios[sgrp]);
1726	part_stat_add(part, sectors[sgrp], sectors);
1727	part_inc_in_flight(q, part, op_is_write(op));
1728
1729	part_stat_unlock();
1730}
1731EXPORT_SYMBOL(generic_start_io_acct);
1732
1733void generic_end_io_acct(struct request_queue *q, int req_op,
1734			 struct hd_struct *part, unsigned long start_time)
1735{
1736	unsigned long now = jiffies;
1737	unsigned long duration = now - start_time;
1738	const int sgrp = op_stat_group(req_op);
1739
1740	part_stat_lock();
1741
1742	update_io_ticks(part, now);
1743	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1744	part_stat_add(part, time_in_queue, duration);
1745	part_dec_in_flight(q, part, op_is_write(req_op));
1746
1747	part_stat_unlock();
1748}
1749EXPORT_SYMBOL(generic_end_io_acct);
1750
1751static inline bool bio_remaining_done(struct bio *bio)
1752{
1753	/*
1754	 * If we're not chaining, then ->__bi_remaining is always 1 and
1755	 * we always end io on the first invocation.
1756	 */
1757	if (!bio_flagged(bio, BIO_CHAIN))
1758		return true;
1759
1760	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1761
1762	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1763		bio_clear_flag(bio, BIO_CHAIN);
1764		return true;
1765	}
1766
1767	return false;
1768}
1769
1770/**
1771 * bio_endio - end I/O on a bio
1772 * @bio:	bio
1773 *
1774 * Description:
1775 *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1776 *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1777 *   bio unless they own it and thus know that it has an end_io function.
1778 *
1779 *   bio_endio() can be called several times on a bio that has been chained
1780 *   using bio_chain().  The ->bi_end_io() function will only be called the
1781 *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1782 *   generated if BIO_TRACE_COMPLETION is set.
1783 **/
1784void bio_endio(struct bio *bio)
1785{
1786again:
1787	if (!bio_remaining_done(bio))
1788		return;
1789	if (!bio_integrity_endio(bio))
1790		return;
1791
1792	if (bio->bi_disk)
1793		rq_qos_done_bio(bio->bi_disk->queue, bio);
1794
1795	/*
1796	 * Need to have a real endio function for chained bios, otherwise
1797	 * various corner cases will break (like stacking block devices that
1798	 * save/restore bi_end_io) - however, we want to avoid unbounded
1799	 * recursion and blowing the stack. Tail call optimization would
1800	 * handle this, but compiling with frame pointers also disables
1801	 * gcc's sibling call optimization.
1802	 */
1803	if (bio->bi_end_io == bio_chain_endio) {
1804		bio = __bio_chain_endio(bio);
1805		goto again;
1806	}
1807
1808	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1809		trace_block_bio_complete(bio->bi_disk->queue, bio,
1810					 blk_status_to_errno(bio->bi_status));
1811		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1812	}
1813
1814	blk_throtl_bio_endio(bio);
1815	/* release cgroup info */
1816	bio_uninit(bio);
1817	if (bio->bi_end_io)
1818		bio->bi_end_io(bio);
1819}
1820EXPORT_SYMBOL(bio_endio);
1821
1822/**
1823 * bio_split - split a bio
1824 * @bio:	bio to split
1825 * @sectors:	number of sectors to split from the front of @bio
1826 * @gfp:	gfp mask
1827 * @bs:		bio set to allocate from
1828 *
1829 * Allocates and returns a new bio which represents @sectors from the start of
1830 * @bio, and updates @bio to represent the remaining sectors.
1831 *
1832 * Unless this is a discard request the newly allocated bio will point
1833 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1834 * neither @bio nor @bs are freed before the split bio.
1835 */
1836struct bio *bio_split(struct bio *bio, int sectors,
1837		      gfp_t gfp, struct bio_set *bs)
1838{
1839	struct bio *split;
1840
1841	BUG_ON(sectors <= 0);
1842	BUG_ON(sectors >= bio_sectors(bio));
1843
1844	split = bio_clone_fast(bio, gfp, bs);
1845	if (!split)
1846		return NULL;
1847
1848	split->bi_iter.bi_size = sectors << 9;
1849
1850	if (bio_integrity(split))
1851		bio_integrity_trim(split);
1852
1853	bio_advance(bio, split->bi_iter.bi_size);
1854
1855	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1856		bio_set_flag(split, BIO_TRACE_COMPLETION);
1857
1858	return split;
1859}
1860EXPORT_SYMBOL(bio_split);
1861
1862/**
1863 * bio_trim - trim a bio
1864 * @bio:	bio to trim
1865 * @offset:	number of sectors to trim from the front of @bio
1866 * @size:	size we want to trim @bio to, in sectors
1867 */
1868void bio_trim(struct bio *bio, int offset, int size)
1869{
1870	/* 'bio' is a cloned bio which we need to trim to match
1871	 * the given offset and size.
1872	 */
1873
1874	size <<= 9;
1875	if (offset == 0 && size == bio->bi_iter.bi_size)
1876		return;
1877
1878	bio_advance(bio, offset << 9);
1879	bio->bi_iter.bi_size = size;
1880
1881	if (bio_integrity(bio))
1882		bio_integrity_trim(bio);
1883
1884}
1885EXPORT_SYMBOL_GPL(bio_trim);
1886
1887/*
1888 * create memory pools for biovec's in a bio_set.
1889 * use the global biovec slabs created for general use.
1890 */
1891int biovec_init_pool(mempool_t *pool, int pool_entries)
1892{
1893	struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1894
1895	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1896}
1897
1898/*
1899 * bioset_exit - exit a bioset initialized with bioset_init()
1900 *
1901 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1902 * kzalloc()).
1903 */
1904void bioset_exit(struct bio_set *bs)
1905{
1906	if (bs->rescue_workqueue)
1907		destroy_workqueue(bs->rescue_workqueue);
1908	bs->rescue_workqueue = NULL;
1909
1910	mempool_exit(&bs->bio_pool);
1911	mempool_exit(&bs->bvec_pool);
1912
1913	bioset_integrity_free(bs);
1914	if (bs->bio_slab)
1915		bio_put_slab(bs);
1916	bs->bio_slab = NULL;
1917}
1918EXPORT_SYMBOL(bioset_exit);
1919
1920/**
1921 * bioset_init - Initialize a bio_set
1922 * @bs:		pool to initialize
1923 * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1924 * @front_pad:	Number of bytes to allocate in front of the returned bio
1925 * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1926 *              and %BIOSET_NEED_RESCUER
1927 *
1928 * Description:
1929 *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1930 *    to ask for a number of bytes to be allocated in front of the bio.
1931 *    Front pad allocation is useful for embedding the bio inside
1932 *    another structure, to avoid allocating extra data to go with the bio.
1933 *    Note that the bio must be embedded at the END of that structure always,
1934 *    or things will break badly.
1935 *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1936 *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1937 *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1938 *    dispatch queued requests when the mempool runs out of space.
1939 *
1940 */
1941int bioset_init(struct bio_set *bs,
1942		unsigned int pool_size,
1943		unsigned int front_pad,
1944		int flags)
1945{
1946	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1947
1948	bs->front_pad = front_pad;
1949
1950	spin_lock_init(&bs->rescue_lock);
1951	bio_list_init(&bs->rescue_list);
1952	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1953
1954	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1955	if (!bs->bio_slab)
1956		return -ENOMEM;
1957
1958	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1959		goto bad;
1960
1961	if ((flags & BIOSET_NEED_BVECS) &&
1962	    biovec_init_pool(&bs->bvec_pool, pool_size))
1963		goto bad;
1964
1965	if (!(flags & BIOSET_NEED_RESCUER))
1966		return 0;
1967
1968	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1969	if (!bs->rescue_workqueue)
1970		goto bad;
1971
1972	return 0;
1973bad:
1974	bioset_exit(bs);
1975	return -ENOMEM;
1976}
1977EXPORT_SYMBOL(bioset_init);
1978
1979/*
1980 * Initialize and setup a new bio_set, based on the settings from
1981 * another bio_set.
1982 */
1983int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1984{
1985	int flags;
1986
1987	flags = 0;
1988	if (src->bvec_pool.min_nr)
1989		flags |= BIOSET_NEED_BVECS;
1990	if (src->rescue_workqueue)
1991		flags |= BIOSET_NEED_RESCUER;
1992
1993	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1994}
1995EXPORT_SYMBOL(bioset_init_from_src);
1996
1997#ifdef CONFIG_BLK_CGROUP
1998
1999/**
2000 * bio_disassociate_blkg - puts back the blkg reference if associated
2001 * @bio: target bio
2002 *
2003 * Helper to disassociate the blkg from @bio if a blkg is associated.
2004 */
2005void bio_disassociate_blkg(struct bio *bio)
2006{
2007	if (bio->bi_blkg) {
2008		blkg_put(bio->bi_blkg);
2009		bio->bi_blkg = NULL;
2010	}
2011}
2012EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
2013
2014/**
2015 * __bio_associate_blkg - associate a bio with the a blkg
2016 * @bio: target bio
2017 * @blkg: the blkg to associate
2018 *
2019 * This tries to associate @bio with the specified @blkg.  Association failure
2020 * is handled by walking up the blkg tree.  Therefore, the blkg associated can
2021 * be anything between @blkg and the root_blkg.  This situation only happens
2022 * when a cgroup is dying and then the remaining bios will spill to the closest
2023 * alive blkg.
2024 *
2025 * A reference will be taken on the @blkg and will be released when @bio is
2026 * freed.
2027 */
2028static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
2029{
2030	bio_disassociate_blkg(bio);
2031
2032	bio->bi_blkg = blkg_tryget_closest(blkg);
2033}
2034
2035/**
2036 * bio_associate_blkg_from_css - associate a bio with a specified css
2037 * @bio: target bio
2038 * @css: target css
2039 *
2040 * Associate @bio with the blkg found by combining the css's blkg and the
2041 * request_queue of the @bio.  This falls back to the queue's root_blkg if
2042 * the association fails with the css.
2043 */
2044void bio_associate_blkg_from_css(struct bio *bio,
2045				 struct cgroup_subsys_state *css)
2046{
2047	struct request_queue *q = bio->bi_disk->queue;
2048	struct blkcg_gq *blkg;
2049
2050	rcu_read_lock();
2051
2052	if (!css || !css->parent)
2053		blkg = q->root_blkg;
2054	else
2055		blkg = blkg_lookup_create(css_to_blkcg(css), q);
2056
2057	__bio_associate_blkg(bio, blkg);
2058
2059	rcu_read_unlock();
2060}
2061EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
2062
2063#ifdef CONFIG_MEMCG
2064/**
2065 * bio_associate_blkg_from_page - associate a bio with the page's blkg
2066 * @bio: target bio
2067 * @page: the page to lookup the blkcg from
2068 *
2069 * Associate @bio with the blkg from @page's owning memcg and the respective
2070 * request_queue.  If cgroup_e_css returns %NULL, fall back to the queue's
2071 * root_blkg.
2072 */
2073void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
2074{
2075	struct cgroup_subsys_state *css;
2076
2077	if (!page->mem_cgroup)
2078		return;
2079
2080	rcu_read_lock();
2081
2082	css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
2083	bio_associate_blkg_from_css(bio, css);
2084
2085	rcu_read_unlock();
2086}
2087#endif /* CONFIG_MEMCG */
2088
2089/**
2090 * bio_associate_blkg - associate a bio with a blkg
2091 * @bio: target bio
2092 *
2093 * Associate @bio with the blkg found from the bio's css and request_queue.
2094 * If one is not found, bio_lookup_blkg() creates the blkg.  If a blkg is
2095 * already associated, the css is reused and association redone as the
2096 * request_queue may have changed.
2097 */
2098void bio_associate_blkg(struct bio *bio)
2099{
2100	struct cgroup_subsys_state *css;
2101
2102	rcu_read_lock();
2103
2104	if (bio->bi_blkg)
2105		css = &bio_blkcg(bio)->css;
2106	else
2107		css = blkcg_css();
2108
2109	bio_associate_blkg_from_css(bio, css);
2110
2111	rcu_read_unlock();
2112}
2113EXPORT_SYMBOL_GPL(bio_associate_blkg);
2114
2115/**
2116 * bio_clone_blkg_association - clone blkg association from src to dst bio
2117 * @dst: destination bio
2118 * @src: source bio
2119 */
2120void bio_clone_blkg_association(struct bio *dst, struct bio *src)
2121{
2122	rcu_read_lock();
2123
2124	if (src->bi_blkg)
2125		__bio_associate_blkg(dst, src->bi_blkg);
2126
2127	rcu_read_unlock();
2128}
2129EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
2130#endif /* CONFIG_BLK_CGROUP */
2131
2132static void __init biovec_init_slabs(void)
2133{
2134	int i;
2135
2136	for (i = 0; i < BVEC_POOL_NR; i++) {
2137		int size;
2138		struct biovec_slab *bvs = bvec_slabs + i;
2139
2140		if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2141			bvs->slab = NULL;
2142			continue;
2143		}
2144
2145		size = bvs->nr_vecs * sizeof(struct bio_vec);
2146		bvs->slab = kmem_cache_create(bvs->name, size, 0,
2147                                SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2148	}
2149}
2150
2151static int __init init_bio(void)
2152{
2153	bio_slab_max = 2;
2154	bio_slab_nr = 0;
2155	bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
2156			    GFP_KERNEL);
2157
2158	BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
2159
2160	if (!bio_slabs)
2161		panic("bio: can't allocate bios\n");
2162
2163	bio_integrity_init();
2164	biovec_init_slabs();
2165
2166	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
2167		panic("bio: can't allocate bios\n");
2168
2169	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
2170		panic("bio: can't create integrity pool\n");
2171
2172	return 0;
2173}
2174subsys_initcall(init_bio);