Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5#include <linux/mm.h>
   6#include <linux/swap.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/uio.h>
  10#include <linux/iocontext.h>
  11#include <linux/slab.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/export.h>
  15#include <linux/mempool.h>
  16#include <linux/workqueue.h>
  17#include <linux/cgroup.h>
  18#include <linux/highmem.h>
  19#include <linux/sched/sysctl.h>
  20#include <linux/blk-crypto.h>
  21#include <linux/xarray.h>
  22
  23#include <trace/events/block.h>
  24#include "blk.h"
  25#include "blk-rq-qos.h"
  26#include "blk-cgroup.h"
  27
  28#define ALLOC_CACHE_THRESHOLD	16
  29#define ALLOC_CACHE_SLACK	64
  30#define ALLOC_CACHE_MAX		256
  31
  32struct bio_alloc_cache {
  33	struct bio		*free_list;
  34	struct bio		*free_list_irq;
  35	unsigned int		nr;
  36	unsigned int		nr_irq;
  37};
  38
  39static struct biovec_slab {
  40	int nr_vecs;
  41	char *name;
  42	struct kmem_cache *slab;
  43} bvec_slabs[] __read_mostly = {
  44	{ .nr_vecs = 16, .name = "biovec-16" },
  45	{ .nr_vecs = 64, .name = "biovec-64" },
  46	{ .nr_vecs = 128, .name = "biovec-128" },
  47	{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
  48};
  49
  50static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
  51{
  52	switch (nr_vecs) {
  53	/* smaller bios use inline vecs */
  54	case 5 ... 16:
  55		return &bvec_slabs[0];
  56	case 17 ... 64:
  57		return &bvec_slabs[1];
  58	case 65 ... 128:
  59		return &bvec_slabs[2];
  60	case 129 ... BIO_MAX_VECS:
  61		return &bvec_slabs[3];
  62	default:
  63		BUG();
  64		return NULL;
  65	}
  66}
  67
  68/*
  69 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
  70 * IO code that does not need private memory pools.
  71 */
  72struct bio_set fs_bio_set;
  73EXPORT_SYMBOL(fs_bio_set);
  74
  75/*
  76 * Our slab pool management
  77 */
  78struct bio_slab {
  79	struct kmem_cache *slab;
  80	unsigned int slab_ref;
  81	unsigned int slab_size;
  82	char name[8];
  83};
  84static DEFINE_MUTEX(bio_slab_lock);
  85static DEFINE_XARRAY(bio_slabs);
 
  86
  87static struct bio_slab *create_bio_slab(unsigned int size)
  88{
  89	struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
  90
  91	if (!bslab)
  92		return NULL;
 
  93
  94	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
  95	bslab->slab = kmem_cache_create(bslab->name, size,
  96			ARCH_KMALLOC_MINALIGN,
  97			SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
  98	if (!bslab->slab)
  99		goto fail_alloc_slab;
 100
 101	bslab->slab_ref = 1;
 102	bslab->slab_size = size;
 103
 104	if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
 105		return bslab;
 106
 107	kmem_cache_destroy(bslab->slab);
 108
 109fail_alloc_slab:
 110	kfree(bslab);
 111	return NULL;
 112}
 
 
 
 
 
 
 
 
 
 113
 114static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
 115{
 116	return bs->front_pad + sizeof(struct bio) + bs->back_pad;
 117}
 118
 119static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
 120{
 121	unsigned int size = bs_bio_slab_size(bs);
 122	struct bio_slab *bslab;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123
 124	mutex_lock(&bio_slab_lock);
 125	bslab = xa_load(&bio_slabs, size);
 126	if (bslab)
 127		bslab->slab_ref++;
 128	else
 129		bslab = create_bio_slab(size);
 130	mutex_unlock(&bio_slab_lock);
 131
 132	if (bslab)
 133		return bslab->slab;
 134	return NULL;
 135}
 136
 137static void bio_put_slab(struct bio_set *bs)
 138{
 139	struct bio_slab *bslab = NULL;
 140	unsigned int slab_size = bs_bio_slab_size(bs);
 141
 142	mutex_lock(&bio_slab_lock);
 143
 144	bslab = xa_load(&bio_slabs, slab_size);
 
 
 
 
 
 
 145	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
 146		goto out;
 147
 148	WARN_ON_ONCE(bslab->slab != bs->bio_slab);
 149
 150	WARN_ON(!bslab->slab_ref);
 151
 152	if (--bslab->slab_ref)
 153		goto out;
 154
 155	xa_erase(&bio_slabs, slab_size);
 156
 157	kmem_cache_destroy(bslab->slab);
 158	kfree(bslab);
 159
 160out:
 161	mutex_unlock(&bio_slab_lock);
 162}
 163
 164void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
 165{
 166	BUG_ON(nr_vecs > BIO_MAX_VECS);
 167
 168	if (nr_vecs == BIO_MAX_VECS)
 169		mempool_free(bv, pool);
 170	else if (nr_vecs > BIO_INLINE_VECS)
 171		kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
 172}
 173
 174/*
 175 * Make the first allocation restricted and don't dump info on allocation
 176 * failures, since we'll fall back to the mempool in case of failure.
 177 */
 178static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
 179{
 180	return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
 181		__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 
 
 
 
 
 
 
 
 
 
 
 182}
 183
 184struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
 185		gfp_t gfp_mask)
 186{
 187	struct biovec_slab *bvs = biovec_slab(*nr_vecs);
 188
 189	if (WARN_ON_ONCE(!bvs))
 190		return NULL;
 191
 192	/*
 193	 * Upgrade the nr_vecs request to take full advantage of the allocation.
 194	 * We also rely on this in the bvec_free path.
 195	 */
 196	*nr_vecs = bvs->nr_vecs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197
 198	/*
 199	 * Try a slab allocation first for all smaller allocations.  If that
 200	 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
 201	 * The mempool is sized to handle up to BIO_MAX_VECS entries.
 202	 */
 203	if (*nr_vecs < BIO_MAX_VECS) {
 204		struct bio_vec *bvl;
 
 
 
 
 205
 206		bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
 207		if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
 208			return bvl;
 209		*nr_vecs = BIO_MAX_VECS;
 
 
 
 
 
 
 
 
 
 
 
 
 210	}
 211
 212	return mempool_alloc(pool, gfp_mask);
 
 213}
 214
 215void bio_uninit(struct bio *bio)
 216{
 217#ifdef CONFIG_BLK_CGROUP
 218	if (bio->bi_blkg) {
 219		blkg_put(bio->bi_blkg);
 220		bio->bi_blkg = NULL;
 221	}
 222#endif
 223	if (bio_integrity(bio))
 224		bio_integrity_free(bio);
 225
 226	bio_crypt_free_ctx(bio);
 227}
 228EXPORT_SYMBOL(bio_uninit);
 229
 230static void bio_free(struct bio *bio)
 231{
 232	struct bio_set *bs = bio->bi_pool;
 233	void *p = bio;
 234
 235	WARN_ON_ONCE(!bs);
 236
 237	bio_uninit(bio);
 238	bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
 239	mempool_free(p - bs->front_pad, &bs->bio_pool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 240}
 241
 242/*
 243 * Users of this function have their own bio allocation. Subsequently,
 244 * they must remember to pair any call to bio_init() with bio_uninit()
 245 * when IO has completed, or when the bio is released.
 246 */
 247void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
 248	      unsigned short max_vecs, blk_opf_t opf)
 249{
 250	bio->bi_next = NULL;
 251	bio->bi_bdev = bdev;
 252	bio->bi_opf = opf;
 253	bio->bi_flags = 0;
 254	bio->bi_ioprio = 0;
 255	bio->bi_status = 0;
 256	bio->bi_iter.bi_sector = 0;
 257	bio->bi_iter.bi_size = 0;
 258	bio->bi_iter.bi_idx = 0;
 259	bio->bi_iter.bi_bvec_done = 0;
 260	bio->bi_end_io = NULL;
 261	bio->bi_private = NULL;
 262#ifdef CONFIG_BLK_CGROUP
 263	bio->bi_blkg = NULL;
 264	bio->bi_issue.value = 0;
 265	if (bdev)
 266		bio_associate_blkg(bio);
 267#ifdef CONFIG_BLK_CGROUP_IOCOST
 268	bio->bi_iocost_cost = 0;
 269#endif
 270#endif
 271#ifdef CONFIG_BLK_INLINE_ENCRYPTION
 272	bio->bi_crypt_context = NULL;
 273#endif
 274#ifdef CONFIG_BLK_DEV_INTEGRITY
 275	bio->bi_integrity = NULL;
 276#endif
 277	bio->bi_vcnt = 0;
 278
 279	atomic_set(&bio->__bi_remaining, 1);
 280	atomic_set(&bio->__bi_cnt, 1);
 281	bio->bi_cookie = BLK_QC_T_NONE;
 282
 283	bio->bi_max_vecs = max_vecs;
 284	bio->bi_io_vec = table;
 285	bio->bi_pool = NULL;
 286}
 287EXPORT_SYMBOL(bio_init);
 288
 289/**
 290 * bio_reset - reinitialize a bio
 291 * @bio:	bio to reset
 292 * @bdev:	block device to use the bio for
 293 * @opf:	operation and flags for bio
 294 *
 295 * Description:
 296 *   After calling bio_reset(), @bio will be in the same state as a freshly
 297 *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
 298 *   preserved are the ones that are initialized by bio_alloc_bioset(). See
 299 *   comment in struct bio.
 300 */
 301void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
 302{
 
 
 303	bio_uninit(bio);
 
 304	memset(bio, 0, BIO_RESET_BYTES);
 
 305	atomic_set(&bio->__bi_remaining, 1);
 306	bio->bi_bdev = bdev;
 307	if (bio->bi_bdev)
 308		bio_associate_blkg(bio);
 309	bio->bi_opf = opf;
 310}
 311EXPORT_SYMBOL(bio_reset);
 312
 313static struct bio *__bio_chain_endio(struct bio *bio)
 314{
 315	struct bio *parent = bio->bi_private;
 316
 317	if (bio->bi_status && !parent->bi_status)
 318		parent->bi_status = bio->bi_status;
 319	bio_put(bio);
 320	return parent;
 321}
 322
 323static void bio_chain_endio(struct bio *bio)
 324{
 325	bio_endio(__bio_chain_endio(bio));
 326}
 327
 328/**
 329 * bio_chain - chain bio completions
 330 * @bio: the target bio
 331 * @parent: the parent bio of @bio
 332 *
 333 * The caller won't have a bi_end_io called when @bio completes - instead,
 334 * @parent's bi_end_io won't be called until both @parent and @bio have
 335 * completed; the chained bio will also be freed when it completes.
 336 *
 337 * The caller must not set bi_private or bi_end_io in @bio.
 338 */
 339void bio_chain(struct bio *bio, struct bio *parent)
 340{
 341	BUG_ON(bio->bi_private || bio->bi_end_io);
 342
 343	bio->bi_private = parent;
 344	bio->bi_end_io	= bio_chain_endio;
 345	bio_inc_remaining(parent);
 346}
 347EXPORT_SYMBOL(bio_chain);
 348
 349struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
 350		unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
 351{
 352	struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
 353
 354	if (bio) {
 355		bio_chain(bio, new);
 356		submit_bio(bio);
 357	}
 358
 359	return new;
 360}
 361EXPORT_SYMBOL_GPL(blk_next_bio);
 362
 363static void bio_alloc_rescue(struct work_struct *work)
 364{
 365	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
 366	struct bio *bio;
 367
 368	while (1) {
 369		spin_lock(&bs->rescue_lock);
 370		bio = bio_list_pop(&bs->rescue_list);
 371		spin_unlock(&bs->rescue_lock);
 372
 373		if (!bio)
 374			break;
 375
 376		submit_bio_noacct(bio);
 377	}
 378}
 379
 380static void punt_bios_to_rescuer(struct bio_set *bs)
 381{
 382	struct bio_list punt, nopunt;
 383	struct bio *bio;
 384
 385	if (WARN_ON_ONCE(!bs->rescue_workqueue))
 386		return;
 387	/*
 388	 * In order to guarantee forward progress we must punt only bios that
 389	 * were allocated from this bio_set; otherwise, if there was a bio on
 390	 * there for a stacking driver higher up in the stack, processing it
 391	 * could require allocating bios from this bio_set, and doing that from
 392	 * our own rescuer would be bad.
 393	 *
 394	 * Since bio lists are singly linked, pop them all instead of trying to
 395	 * remove from the middle of the list:
 396	 */
 397
 398	bio_list_init(&punt);
 399	bio_list_init(&nopunt);
 400
 401	while ((bio = bio_list_pop(&current->bio_list[0])))
 402		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 403	current->bio_list[0] = nopunt;
 404
 405	bio_list_init(&nopunt);
 406	while ((bio = bio_list_pop(&current->bio_list[1])))
 407		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 408	current->bio_list[1] = nopunt;
 409
 410	spin_lock(&bs->rescue_lock);
 411	bio_list_merge(&bs->rescue_list, &punt);
 412	spin_unlock(&bs->rescue_lock);
 413
 414	queue_work(bs->rescue_workqueue, &bs->rescue_work);
 415}
 416
 417static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
 418{
 419	unsigned long flags;
 420
 421	/* cache->free_list must be empty */
 422	if (WARN_ON_ONCE(cache->free_list))
 423		return;
 424
 425	local_irq_save(flags);
 426	cache->free_list = cache->free_list_irq;
 427	cache->free_list_irq = NULL;
 428	cache->nr += cache->nr_irq;
 429	cache->nr_irq = 0;
 430	local_irq_restore(flags);
 431}
 432
 433static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
 434		unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
 435		struct bio_set *bs)
 436{
 437	struct bio_alloc_cache *cache;
 438	struct bio *bio;
 439
 440	cache = per_cpu_ptr(bs->cache, get_cpu());
 441	if (!cache->free_list) {
 442		if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD)
 443			bio_alloc_irq_cache_splice(cache);
 444		if (!cache->free_list) {
 445			put_cpu();
 446			return NULL;
 447		}
 448	}
 449	bio = cache->free_list;
 450	cache->free_list = bio->bi_next;
 451	cache->nr--;
 452	put_cpu();
 453
 454	bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
 455	bio->bi_pool = bs;
 456	return bio;
 457}
 458
 459/**
 460 * bio_alloc_bioset - allocate a bio for I/O
 461 * @bdev:	block device to allocate the bio for (can be %NULL)
 462 * @nr_vecs:	number of bvecs to pre-allocate
 463 * @opf:	operation and flags for bio
 464 * @gfp_mask:   the GFP_* mask given to the slab allocator
 
 465 * @bs:		the bio_set to allocate from.
 466 *
 467 * Allocate a bio from the mempools in @bs.
 
 
 468 *
 469 * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
 470 * allocate a bio.  This is due to the mempool guarantees.  To make this work,
 471 * callers must never allocate more than 1 bio at a time from the general pool.
 472 * Callers that need to allocate more than 1 bio must always submit the
 473 * previously allocated bio for IO before attempting to allocate a new one.
 474 * Failure to do so can cause deadlocks under memory pressure.
 475 *
 476 * Note that when running under submit_bio_noacct() (i.e. any block driver),
 477 * bios are not submitted until after you return - see the code in
 478 * submit_bio_noacct() that converts recursion into iteration, to prevent
 479 * stack overflows.
 480 *
 481 * This would normally mean allocating multiple bios under submit_bio_noacct()
 482 * would be susceptible to deadlocks, but we have
 483 * deadlock avoidance code that resubmits any blocked bios from a rescuer
 484 * thread.
 485 *
 486 * However, we do not guarantee forward progress for allocations from other
 487 * mempools. Doing multiple allocations from the same mempool under
 488 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
 489 * for per bio allocations.
 490 *
 491 * Returns: Pointer to new bio on success, NULL on failure.
 
 492 */
 493struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
 494			     blk_opf_t opf, gfp_t gfp_mask,
 495			     struct bio_set *bs)
 496{
 497	gfp_t saved_gfp = gfp_mask;
 
 
 
 498	struct bio *bio;
 499	void *p;
 500
 501	/* should not use nobvec bioset for nr_vecs > 0 */
 502	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
 503		return NULL;
 504
 505	if (opf & REQ_ALLOC_CACHE) {
 506		if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
 507			bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
 508						     gfp_mask, bs);
 509			if (bio)
 510				return bio;
 511			/*
 512			 * No cached bio available, bio returned below marked with
 513			 * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
 514			 */
 515		} else {
 516			opf &= ~REQ_ALLOC_CACHE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517		}
 518	}
 519
 520	/*
 521	 * submit_bio_noacct() converts recursion to iteration; this means if
 522	 * we're running beneath it, any bios we allocate and submit will not be
 523	 * submitted (and thus freed) until after we return.
 524	 *
 525	 * This exposes us to a potential deadlock if we allocate multiple bios
 526	 * from the same bio_set() while running underneath submit_bio_noacct().
 527	 * If we were to allocate multiple bios (say a stacking block driver
 528	 * that was splitting bios), we would deadlock if we exhausted the
 529	 * mempool's reserve.
 530	 *
 531	 * We solve this, and guarantee forward progress, with a rescuer
 532	 * workqueue per bio_set. If we go to allocate and there are bios on
 533	 * current->bio_list, we first try the allocation without
 534	 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
 535	 * blocking to the rescuer workqueue before we retry with the original
 536	 * gfp_flags.
 537	 */
 538	if (current->bio_list &&
 539	    (!bio_list_empty(&current->bio_list[0]) ||
 540	     !bio_list_empty(&current->bio_list[1])) &&
 541	    bs->rescue_workqueue)
 542		gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 543
 544	p = mempool_alloc(&bs->bio_pool, gfp_mask);
 545	if (!p && gfp_mask != saved_gfp) {
 546		punt_bios_to_rescuer(bs);
 547		gfp_mask = saved_gfp;
 548		p = mempool_alloc(&bs->bio_pool, gfp_mask);
 549	}
 
 550	if (unlikely(!p))
 551		return NULL;
 552	if (!mempool_is_saturated(&bs->bio_pool))
 553		opf &= ~REQ_ALLOC_CACHE;
 554
 555	bio = p + bs->front_pad;
 556	if (nr_vecs > BIO_INLINE_VECS) {
 557		struct bio_vec *bvl = NULL;
 
 
 558
 559		bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
 560		if (!bvl && gfp_mask != saved_gfp) {
 561			punt_bios_to_rescuer(bs);
 562			gfp_mask = saved_gfp;
 563			bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
 564		}
 
 565		if (unlikely(!bvl))
 566			goto err_free;
 567
 568		bio_init(bio, bdev, bvl, nr_vecs, opf);
 569	} else if (nr_vecs) {
 570		bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
 571	} else {
 572		bio_init(bio, bdev, NULL, 0, opf);
 573	}
 574
 575	bio->bi_pool = bs;
 
 
 576	return bio;
 577
 578err_free:
 579	mempool_free(p, &bs->bio_pool);
 580	return NULL;
 581}
 582EXPORT_SYMBOL(bio_alloc_bioset);
 583
 584/**
 585 * bio_kmalloc - kmalloc a bio
 586 * @nr_vecs:	number of bio_vecs to allocate
 587 * @gfp_mask:   the GFP_* mask given to the slab allocator
 588 *
 589 * Use kmalloc to allocate a bio (including bvecs).  The bio must be initialized
 590 * using bio_init() before use.  To free a bio returned from this function use
 591 * kfree() after calling bio_uninit().  A bio returned from this function can
 592 * be reused by calling bio_uninit() before calling bio_init() again.
 593 *
 594 * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
 595 * function are not backed by a mempool can fail.  Do not use this function
 596 * for allocations in the file system I/O path.
 597 *
 598 * Returns: Pointer to new bio on success, NULL on failure.
 599 */
 600struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
 601{
 602	struct bio *bio;
 603
 604	if (nr_vecs > UIO_MAXIOV)
 605		return NULL;
 606	return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
 607}
 608EXPORT_SYMBOL(bio_kmalloc);
 609
 610void zero_fill_bio(struct bio *bio)
 611{
 
 612	struct bio_vec bv;
 613	struct bvec_iter iter;
 614
 615	bio_for_each_segment(bv, bio, iter)
 616		memzero_bvec(&bv);
 617}
 618EXPORT_SYMBOL(zero_fill_bio);
 619
 620/**
 621 * bio_truncate - truncate the bio to small size of @new_size
 622 * @bio:	the bio to be truncated
 623 * @new_size:	new size for truncating the bio
 624 *
 625 * Description:
 626 *   Truncate the bio to new size of @new_size. If bio_op(bio) is
 627 *   REQ_OP_READ, zero the truncated part. This function should only
 628 *   be used for handling corner cases, such as bio eod.
 629 */
 630static void bio_truncate(struct bio *bio, unsigned new_size)
 631{
 632	struct bio_vec bv;
 633	struct bvec_iter iter;
 634	unsigned int done = 0;
 635	bool truncated = false;
 636
 637	if (new_size >= bio->bi_iter.bi_size)
 638		return;
 639
 640	if (bio_op(bio) != REQ_OP_READ)
 641		goto exit;
 642
 643	bio_for_each_segment(bv, bio, iter) {
 644		if (done + bv.bv_len > new_size) {
 645			unsigned offset;
 646
 647			if (!truncated)
 648				offset = new_size - done;
 649			else
 650				offset = 0;
 651			zero_user(bv.bv_page, bv.bv_offset + offset,
 652				  bv.bv_len - offset);
 653			truncated = true;
 654		}
 655		done += bv.bv_len;
 656	}
 657
 658 exit:
 659	/*
 660	 * Don't touch bvec table here and make it really immutable, since
 661	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
 662	 * in its .end_bio() callback.
 663	 *
 664	 * It is enough to truncate bio by updating .bi_size since we can make
 665	 * correct bvec with the updated .bi_size for drivers.
 666	 */
 667	bio->bi_iter.bi_size = new_size;
 668}
 669
 670/**
 671 * guard_bio_eod - truncate a BIO to fit the block device
 672 * @bio:	bio to truncate
 673 *
 674 * This allows us to do IO even on the odd last sectors of a device, even if the
 675 * block size is some multiple of the physical sector size.
 676 *
 677 * We'll just truncate the bio to the size of the device, and clear the end of
 678 * the buffer head manually.  Truly out-of-range accesses will turn into actual
 679 * I/O errors, this only handles the "we need to be able to do I/O at the final
 680 * sector" case.
 681 */
 682void guard_bio_eod(struct bio *bio)
 683{
 684	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
 685
 686	if (!maxsector)
 687		return;
 688
 689	/*
 690	 * If the *whole* IO is past the end of the device,
 691	 * let it through, and the IO layer will turn it into
 692	 * an EIO.
 693	 */
 694	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
 695		return;
 696
 697	maxsector -= bio->bi_iter.bi_sector;
 698	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
 699		return;
 700
 701	bio_truncate(bio, maxsector << 9);
 702}
 703
 704static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
 705				   unsigned int nr)
 706{
 707	unsigned int i = 0;
 708	struct bio *bio;
 709
 710	while ((bio = cache->free_list) != NULL) {
 711		cache->free_list = bio->bi_next;
 712		cache->nr--;
 713		bio_free(bio);
 714		if (++i == nr)
 715			break;
 716	}
 717	return i;
 718}
 719
 720static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
 721				  unsigned int nr)
 722{
 723	nr -= __bio_alloc_cache_prune(cache, nr);
 724	if (!READ_ONCE(cache->free_list)) {
 725		bio_alloc_irq_cache_splice(cache);
 726		__bio_alloc_cache_prune(cache, nr);
 727	}
 728}
 729
 730static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
 731{
 732	struct bio_set *bs;
 733
 734	bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
 735	if (bs->cache) {
 736		struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
 737
 738		bio_alloc_cache_prune(cache, -1U);
 739	}
 740	return 0;
 741}
 742
 743static void bio_alloc_cache_destroy(struct bio_set *bs)
 744{
 745	int cpu;
 746
 747	if (!bs->cache)
 748		return;
 749
 750	cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
 751	for_each_possible_cpu(cpu) {
 752		struct bio_alloc_cache *cache;
 753
 754		cache = per_cpu_ptr(bs->cache, cpu);
 755		bio_alloc_cache_prune(cache, -1U);
 756	}
 757	free_percpu(bs->cache);
 758	bs->cache = NULL;
 759}
 760
 761static inline void bio_put_percpu_cache(struct bio *bio)
 762{
 763	struct bio_alloc_cache *cache;
 764
 765	cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
 766	if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) {
 767		put_cpu();
 768		bio_free(bio);
 769		return;
 770	}
 771
 772	bio_uninit(bio);
 773
 774	if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
 775		bio->bi_next = cache->free_list;
 776		cache->free_list = bio;
 777		cache->nr++;
 778	} else {
 779		unsigned long flags;
 780
 781		local_irq_save(flags);
 782		bio->bi_next = cache->free_list_irq;
 783		cache->free_list_irq = bio;
 784		cache->nr_irq++;
 785		local_irq_restore(flags);
 786	}
 787	put_cpu();
 788}
 
 789
 790/**
 791 * bio_put - release a reference to a bio
 792 * @bio:   bio to release reference to
 793 *
 794 * Description:
 795 *   Put a reference to a &struct bio, either one you have gotten with
 796 *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
 797 **/
 798void bio_put(struct bio *bio)
 799{
 800	if (unlikely(bio_flagged(bio, BIO_REFFED))) {
 801		BUG_ON(!atomic_read(&bio->__bi_cnt));
 802		if (!atomic_dec_and_test(&bio->__bi_cnt))
 803			return;
 804	}
 805	if (bio->bi_opf & REQ_ALLOC_CACHE)
 806		bio_put_percpu_cache(bio);
 807	else
 808		bio_free(bio);
 
 
 
 
 
 
 
 
 
 809}
 810EXPORT_SYMBOL(bio_put);
 811
 812static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
 813{
 814	bio_set_flag(bio, BIO_CLONED);
 815	bio->bi_ioprio = bio_src->bi_ioprio;
 816	bio->bi_iter = bio_src->bi_iter;
 817
 818	if (bio->bi_bdev) {
 819		if (bio->bi_bdev == bio_src->bi_bdev &&
 820		    bio_flagged(bio_src, BIO_REMAPPED))
 821			bio_set_flag(bio, BIO_REMAPPED);
 822		bio_clone_blkg_association(bio, bio_src);
 823	}
 824
 825	if (bio_crypt_clone(bio, bio_src, gfp) < 0)
 826		return -ENOMEM;
 827	if (bio_integrity(bio_src) &&
 828	    bio_integrity_clone(bio, bio_src, gfp) < 0)
 829		return -ENOMEM;
 830	return 0;
 831}
 
 832
 833/**
 834 * bio_alloc_clone - clone a bio that shares the original bio's biovec
 835 * @bdev: block_device to clone onto
 836 * @bio_src: bio to clone from
 837 * @gfp: allocation priority
 838 * @bs: bio_set to allocate from
 839 *
 840 * Allocate a new bio that is a clone of @bio_src. The caller owns the returned
 841 * bio, but not the actual data it points to.
 
 842 *
 843 * The caller must ensure that the return bio is not freed before @bio_src.
 844 */
 845struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
 846		gfp_t gfp, struct bio_set *bs)
 847{
 848	struct bio *bio;
 849
 850	bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
 851	if (!bio)
 852		return NULL;
 853
 854	if (__bio_clone(bio, bio_src, gfp) < 0) {
 855		bio_put(bio);
 856		return NULL;
 857	}
 
 
 
 
 858	bio->bi_io_vec = bio_src->bi_io_vec;
 859
 860	return bio;
 861}
 862EXPORT_SYMBOL(bio_alloc_clone);
 863
 864/**
 865 * bio_init_clone - clone a bio that shares the original bio's biovec
 866 * @bdev: block_device to clone onto
 867 * @bio: bio to clone into
 868 * @bio_src: bio to clone from
 869 * @gfp: allocation priority
 870 *
 871 * Initialize a new bio in caller provided memory that is a clone of @bio_src.
 872 * The caller owns the returned bio, but not the actual data it points to.
 873 *
 874 * The caller must ensure that @bio_src is not freed before @bio.
 875 */
 876int bio_init_clone(struct block_device *bdev, struct bio *bio,
 877		struct bio *bio_src, gfp_t gfp)
 878{
 879	int ret;
 880
 881	bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
 882	ret = __bio_clone(bio, bio_src, gfp);
 883	if (ret)
 884		bio_uninit(bio);
 885	return ret;
 886}
 887EXPORT_SYMBOL(bio_init_clone);
 888
 889/**
 890 * bio_full - check if the bio is full
 891 * @bio:	bio to check
 892 * @len:	length of one segment to be added
 893 *
 894 * Return true if @bio is full and one segment with @len bytes can't be
 895 * added to the bio, otherwise return false
 896 */
 897static inline bool bio_full(struct bio *bio, unsigned len)
 898{
 899	if (bio->bi_vcnt >= bio->bi_max_vecs)
 900		return true;
 901	if (bio->bi_iter.bi_size > UINT_MAX - len)
 902		return true;
 903	return false;
 904}
 905
 906static inline bool page_is_mergeable(const struct bio_vec *bv,
 907		struct page *page, unsigned int len, unsigned int off,
 908		bool *same_page)
 909{
 910	size_t bv_end = bv->bv_offset + bv->bv_len;
 911	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
 912	phys_addr_t page_addr = page_to_phys(page);
 913
 914	if (vec_end_addr + 1 != page_addr + off)
 915		return false;
 916	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
 917		return false;
 918	if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
 919		return false;
 920
 921	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
 922	if (*same_page)
 923		return true;
 924	else if (IS_ENABLED(CONFIG_KMSAN))
 925		return false;
 926	return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
 
 
 
 927}
 
 928
 929/**
 930 * __bio_try_merge_page - try appending data to an existing bvec.
 931 * @bio: destination bio
 932 * @page: start page to add
 933 * @len: length of the data to add
 934 * @off: offset of the data relative to @page
 935 * @same_page: return if the segment has been merged inside the same page
 936 *
 937 * Try to add the data at @page + @off to the last bvec of @bio.  This is a
 938 * useful optimisation for file systems with a block size smaller than the
 939 * page size.
 940 *
 941 * Warn if (@len, @off) crosses pages in case that @same_page is true.
 942 *
 943 * Return %true on success or %false on failure.
 944 */
 945static bool __bio_try_merge_page(struct bio *bio, struct page *page,
 946		unsigned int len, unsigned int off, bool *same_page)
 947{
 948	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 949		return false;
 
 950
 951	if (bio->bi_vcnt > 0) {
 952		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 953
 954		if (page_is_mergeable(bv, page, len, off, same_page)) {
 955			if (bio->bi_iter.bi_size > UINT_MAX - len) {
 956				*same_page = false;
 957				return false;
 958			}
 959			bv->bv_len += len;
 960			bio->bi_iter.bi_size += len;
 961			return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 962		}
 963	}
 964	return false;
 965}
 966
 967/*
 968 * Try to merge a page into a segment, while obeying the hardware segment
 969 * size limit.  This is not for normal read/write bios, but for passthrough
 970 * or Zone Append operations that we can't split.
 971 */
 972static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
 973				 struct page *page, unsigned len,
 974				 unsigned offset, bool *same_page)
 975{
 976	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 977	unsigned long mask = queue_segment_boundary(q);
 978	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
 979	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
 980
 981	if ((addr1 | mask) != (addr2 | mask))
 982		return false;
 983	if (bv->bv_len + len > queue_max_segment_size(q))
 984		return false;
 985	return __bio_try_merge_page(bio, page, len, offset, same_page);
 986}
 
 987
 988/**
 989 * bio_add_hw_page - attempt to add a page to a bio with hw constraints
 990 * @q: the target queue
 991 * @bio: destination bio
 992 * @page: page to add
 993 * @len: vec entry length
 994 * @offset: vec entry offset
 995 * @max_sectors: maximum number of sectors that can be added
 996 * @same_page: return if the segment has been merged inside the same page
 997 *
 998 * Add a page to a bio while respecting the hardware max_sectors, max_segment
 999 * and gap limitations.
1000 */
1001int bio_add_hw_page(struct request_queue *q, struct bio *bio,
1002		struct page *page, unsigned int len, unsigned int offset,
1003		unsigned int max_sectors, bool *same_page)
 
1004{
 
1005	struct bio_vec *bvec;
1006
1007	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 
 
 
1008		return 0;
1009
1010	if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
1011		return 0;
1012
 
 
 
 
 
1013	if (bio->bi_vcnt > 0) {
1014		if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
1015			return len;
 
 
 
 
 
 
1016
1017		/*
1018		 * If the queue doesn't support SG gaps and adding this segment
1019		 * would create a gap, disallow it.
1020		 */
1021		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
1022		if (bvec_gap_to_prev(&q->limits, bvec, offset))
1023			return 0;
1024	}
1025
1026	if (bio_full(bio, len))
1027		return 0;
1028
1029	if (bio->bi_vcnt >= queue_max_segments(q))
1030		return 0;
1031
 
 
 
 
1032	bvec = &bio->bi_io_vec[bio->bi_vcnt];
1033	bvec->bv_page = page;
1034	bvec->bv_len = len;
1035	bvec->bv_offset = offset;
1036	bio->bi_vcnt++;
 
1037	bio->bi_iter.bi_size += len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1038	return len;
1039}
1040
1041/**
1042 * bio_add_pc_page	- attempt to add page to passthrough bio
1043 * @q: the target queue
1044 * @bio: destination bio
1045 * @page: page to add
1046 * @len: vec entry length
1047 * @offset: vec entry offset
1048 *
1049 * Attempt to add a page to the bio_vec maplist. This can fail for a
1050 * number of reasons, such as the bio being full or target block device
1051 * limitations. The target block device must allow bio's up to PAGE_SIZE,
1052 * so it is always possible to add a single page to an empty bio.
1053 *
1054 * This should only be used by passthrough bios.
1055 */
1056int bio_add_pc_page(struct request_queue *q, struct bio *bio,
1057		struct page *page, unsigned int len, unsigned int offset)
1058{
1059	bool same_page = false;
1060	return bio_add_hw_page(q, bio, page, len, offset,
1061			queue_max_hw_sectors(q), &same_page);
1062}
1063EXPORT_SYMBOL(bio_add_pc_page);
1064
1065/**
1066 * bio_add_zone_append_page - attempt to add page to zone-append bio
1067 * @bio: destination bio
1068 * @page: page to add
1069 * @len: vec entry length
1070 * @offset: vec entry offset
1071 *
1072 * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
1073 * for a zone-append request. This can fail for a number of reasons, such as the
1074 * bio being full or the target block device is not a zoned block device or
1075 * other limitations of the target block device. The target block device must
1076 * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
1077 * to an empty bio.
1078 *
1079 * Returns: number of bytes added to the bio, or 0 in case of a failure.
1080 */
1081int bio_add_zone_append_page(struct bio *bio, struct page *page,
1082			     unsigned int len, unsigned int offset)
1083{
1084	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1085	bool same_page = false;
1086
1087	if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
 
 
 
1088		return 0;
1089
1090	if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1091		return 0;
1092
1093	return bio_add_hw_page(q, bio, page, len, offset,
1094			       queue_max_zone_append_sectors(q), &same_page);
 
 
 
 
 
 
 
1095}
1096EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
1097
1098/**
1099 * __bio_add_page - add page(s) to a bio in a new segment
1100 * @bio: destination bio
1101 * @page: start page to add
1102 * @len: length of the data to add, may cross pages
1103 * @off: offset of the data relative to @page, may cross pages
1104 *
1105 * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
1106 * that @bio has space for another bvec.
1107 */
1108void __bio_add_page(struct bio *bio, struct page *page,
1109		unsigned int len, unsigned int off)
1110{
1111	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
 
 
 
 
1112
1113	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
1114	WARN_ON_ONCE(bio_full(bio, len));
 
 
1115
1116	bv->bv_page = page;
1117	bv->bv_offset = off;
1118	bv->bv_len = len;
 
 
 
 
 
 
 
1119
1120	bio->bi_iter.bi_size += len;
1121	bio->bi_vcnt++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122}
1123EXPORT_SYMBOL_GPL(__bio_add_page);
1124
1125/**
1126 *	bio_add_page	-	attempt to add page(s) to bio
1127 *	@bio: destination bio
1128 *	@page: start page to add
1129 *	@len: vec entry length, may cross pages
1130 *	@offset: vec entry offset relative to @page, may cross pages
1131 *
1132 *	Attempt to add page(s) to the bio_vec maplist. This will only fail
1133 *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
 
 
 
 
1134 */
1135int bio_add_page(struct bio *bio, struct page *page,
1136		 unsigned int len, unsigned int offset)
1137{
1138	bool same_page = false;
1139
1140	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1141		if (bio_full(bio, len))
1142			return 0;
1143		__bio_add_page(bio, page, len, offset);
1144	}
1145	return len;
 
1146}
1147EXPORT_SYMBOL(bio_add_page);
1148
1149/**
1150 * bio_add_folio - Attempt to add part of a folio to a bio.
1151 * @bio: BIO to add to.
1152 * @folio: Folio to add.
1153 * @len: How many bytes from the folio to add.
1154 * @off: First byte in this folio to add.
1155 *
1156 * Filesystems that use folios can call this function instead of calling
1157 * bio_add_page() for each page in the folio.  If @off is bigger than
1158 * PAGE_SIZE, this function can create a bio_vec that starts in a page
1159 * after the bv_page.  BIOs do not support folios that are 4GiB or larger.
1160 *
1161 * Return: Whether the addition was successful.
1162 */
1163bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1164		   size_t off)
1165{
1166	if (len > UINT_MAX || off > UINT_MAX)
1167		return false;
1168	return bio_add_page(bio, &folio->page, len, off) > 0;
 
1169}
 
1170
1171void __bio_release_pages(struct bio *bio, bool mark_dirty)
 
 
 
 
 
 
 
 
 
 
 
 
1172{
1173	struct bvec_iter_all iter_all;
1174	struct bio_vec *bvec;
 
 
1175
1176	bio_for_each_segment_all(bvec, bio, iter_all) {
1177		if (mark_dirty && !PageCompound(bvec->bv_page))
1178			set_page_dirty_lock(bvec->bv_page);
1179		put_page(bvec->bv_page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180	}
1181}
1182EXPORT_SYMBOL_GPL(__bio_release_pages);
 
 
 
 
 
 
1183
1184void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
 
1185{
1186	size_t size = iov_iter_count(iter);
 
 
1187
1188	WARN_ON_ONCE(bio->bi_max_vecs);
 
 
 
 
 
 
 
 
1189
1190	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1191		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1192		size_t max_sectors = queue_max_zone_append_sectors(q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193
1194		size = min(size, max_sectors << SECTOR_SHIFT);
 
 
 
 
1195	}
1196
1197	bio->bi_vcnt = iter->nr_segs;
1198	bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1199	bio->bi_iter.bi_bvec_done = iter->iov_offset;
1200	bio->bi_iter.bi_size = size;
1201	bio_set_flag(bio, BIO_NO_PAGE_REF);
1202	bio_set_flag(bio, BIO_CLONED);
1203}
1204
1205static int bio_iov_add_page(struct bio *bio, struct page *page,
1206		unsigned int len, unsigned int offset)
 
 
 
 
 
 
 
1207{
1208	bool same_page = false;
 
1209
1210	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1211		__bio_add_page(bio, page, len, offset);
1212		return 0;
 
 
 
 
 
 
 
 
 
 
1213	}
1214
1215	if (same_page)
1216		put_page(page);
1217	return 0;
1218}
1219
1220static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
1221		unsigned int len, unsigned int offset)
1222{
1223	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1224	bool same_page = false;
1225
1226	if (bio_add_hw_page(q, bio, page, len, offset,
1227			queue_max_zone_append_sectors(q), &same_page) != len)
1228		return -EINVAL;
1229	if (same_page)
1230		put_page(page);
1231	return 0;
1232}
1233
1234#define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
1235
1236/**
1237 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1238 * @bio: bio to add pages to
1239 * @iter: iov iterator describing the region to be mapped
1240 *
1241 * Pins pages from *iter and appends them to @bio's bvec array. The
1242 * pages will have to be released using put_page() when done.
1243 * For multi-segment *iter, this function only adds pages from the
1244 * next non-empty segment of the iov iterator.
1245 */
1246static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1247{
1248	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1249	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1250	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1251	struct page **pages = (struct page **)bv;
1252	unsigned int gup_flags = 0;
1253	ssize_t size, left;
1254	unsigned len, i = 0;
1255	size_t offset, trim;
1256	int ret = 0;
1257
1258	/*
1259	 * Move page array up in the allocated memory for the bio vecs as far as
1260	 * possible so that we can start filling biovecs from the beginning
1261	 * without overwriting the temporary page array.
1262	 */
1263	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1264	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
 
 
 
 
 
 
 
 
 
 
1265
1266	if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1267		gup_flags |= FOLL_PCI_P2PDMA;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268
1269	/*
1270	 * Each segment in the iov is required to be a block size multiple.
1271	 * However, we may not be able to get the entire segment if it spans
1272	 * more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the
1273	 * result to ensure the bio's total size is correct. The remainder of
1274	 * the iov data will be picked up in the next bio iteration.
1275	 */
1276	size = iov_iter_get_pages(iter, pages,
1277				  UINT_MAX - bio->bi_iter.bi_size,
1278				  nr_pages, &offset, gup_flags);
1279	if (unlikely(size <= 0))
1280		return size ? size : -EFAULT;
1281
1282	nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
 
 
 
1283
1284	trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
1285	iov_iter_revert(iter, trim);
1286
1287	size -= trim;
1288	if (unlikely(!size)) {
1289		ret = -EFAULT;
1290		goto out;
1291	}
 
 
 
 
1292
1293	for (left = size, i = 0; left > 0; left -= len, i++) {
1294		struct page *page = pages[i];
1295
1296		len = min_t(size_t, PAGE_SIZE - offset, left);
1297		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1298			ret = bio_iov_add_zone_append_page(bio, page, len,
1299					offset);
1300			if (ret)
 
 
 
 
 
 
 
 
 
1301				break;
1302		} else
1303			bio_iov_add_page(bio, page, len, offset);
 
 
 
1304
 
1305		offset = 0;
1306	}
1307
1308	iov_iter_revert(iter, left);
1309out:
1310	while (i < nr_pages)
1311		put_page(pages[i++]);
1312
1313	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1314}
1315
1316/**
1317 * bio_iov_iter_get_pages - add user or kernel pages to a bio
1318 * @bio: bio to add pages to
1319 * @iter: iov iterator describing the region to be added
 
1320 *
1321 * This takes either an iterator pointing to user memory, or one pointing to
1322 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1323 * map them into the kernel. On IO completion, the caller should put those
1324 * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1325 * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1326 * to ensure the bvecs and pages stay referenced until the submitted I/O is
1327 * completed by a call to ->ki_complete() or returns with an error other than
1328 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1329 * on IO completion. If it isn't, then pages should be released.
1330 *
1331 * The function tries, but does not guarantee, to pin as many pages as
1332 * fit into the bio, or are requested in @iter, whatever is smaller. If
1333 * MM encounters an error pinning the requested pages, it stops. Error
1334 * is returned only if 0 pages could be pinned.
1335 */
1336int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 
 
1337{
1338	int ret = 0;
 
 
 
1339
1340	if (iov_iter_is_bvec(iter)) {
1341		bio_iov_bvec_set(bio, iter);
1342		iov_iter_advance(iter, bio->bi_iter.bi_size);
1343		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1344	}
1345
1346	do {
1347		ret = __bio_iov_iter_get_pages(bio, iter);
1348	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
 
 
 
 
 
 
 
1349
1350	return bio->bi_vcnt ? 0 : ret;
 
 
 
 
 
1351}
1352EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
1353
1354static void submit_bio_wait_endio(struct bio *bio)
1355{
1356	complete(bio->bi_private);
 
 
 
 
 
 
 
 
 
 
 
 
 
1357}
1358
1359/**
1360 * submit_bio_wait - submit a bio, and wait until it completes
1361 * @bio: The &struct bio which describes the I/O
1362 *
1363 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1364 * bio_endio() on failure.
1365 *
1366 * WARNING: Unlike to how submit_bio() is usually used, this function does not
1367 * result in bio reference to be consumed. The caller must drop the reference
1368 * on his own.
1369 */
1370int submit_bio_wait(struct bio *bio)
1371{
1372	DECLARE_COMPLETION_ONSTACK_MAP(done,
1373			bio->bi_bdev->bd_disk->lockdep_map);
1374	unsigned long hang_check;
1375
1376	bio->bi_private = &done;
1377	bio->bi_end_io = submit_bio_wait_endio;
1378	bio->bi_opf |= REQ_SYNC;
1379	submit_bio(bio);
1380
1381	/* Prevent hang_check timer from firing at us during very long I/O */
1382	hang_check = sysctl_hung_task_timeout_secs;
1383	if (hang_check)
1384		while (!wait_for_completion_io_timeout(&done,
1385					hang_check * (HZ/2)))
1386			;
1387	else
1388		wait_for_completion_io(&done);
 
 
 
 
 
 
 
 
 
 
 
1389
1390	return blk_status_to_errno(bio->bi_status);
1391}
1392EXPORT_SYMBOL(submit_bio_wait);
1393
1394void __bio_advance(struct bio *bio, unsigned bytes)
1395{
1396	if (bio_integrity(bio))
1397		bio_integrity_advance(bio, bytes);
1398
1399	bio_crypt_advance(bio, bytes);
1400	bio_advance_iter(bio, &bio->bi_iter, bytes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1401}
1402EXPORT_SYMBOL(__bio_advance);
1403
1404void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1405			struct bio *src, struct bvec_iter *src_iter)
1406{
1407	while (src_iter->bi_size && dst_iter->bi_size) {
1408		struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1409		struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1410		unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1411		void *src_buf = bvec_kmap_local(&src_bv);
1412		void *dst_buf = bvec_kmap_local(&dst_bv);
1413
1414		memcpy(dst_buf, src_buf, bytes);
1415
1416		kunmap_local(dst_buf);
1417		kunmap_local(src_buf);
 
 
 
1418
1419		bio_advance_iter_single(src, src_iter, bytes);
1420		bio_advance_iter_single(dst, dst_iter, bytes);
 
1421	}
 
 
1422}
1423EXPORT_SYMBOL(bio_copy_data_iter);
1424
1425/**
1426 * bio_copy_data - copy contents of data buffers from one bio to another
1427 * @src: source bio
1428 * @dst: destination bio
 
 
 
1429 *
1430 * Stops when it reaches the end of either @src or @dst - that is, copies
1431 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1432 */
1433void bio_copy_data(struct bio *dst, struct bio *src)
 
1434{
1435	struct bvec_iter src_iter = src->bi_iter;
1436	struct bvec_iter dst_iter = dst->bi_iter;
 
 
 
 
1437
1438	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1439}
1440EXPORT_SYMBOL(bio_copy_data);
 
 
1441
1442void bio_free_pages(struct bio *bio)
1443{
1444	struct bio_vec *bvec;
1445	struct bvec_iter_all iter_all;
1446
1447	bio_for_each_segment_all(bvec, bio, iter_all)
1448		__free_page(bvec->bv_page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1449}
1450EXPORT_SYMBOL(bio_free_pages);
1451
1452/*
1453 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1454 * for performing direct-IO in BIOs.
1455 *
1456 * The problem is that we cannot run set_page_dirty() from interrupt context
1457 * because the required locks are not interrupt-safe.  So what we can do is to
1458 * mark the pages dirty _before_ performing IO.  And in interrupt context,
1459 * check that the pages are still dirty.   If so, fine.  If not, redirty them
1460 * in process context.
1461 *
1462 * We special-case compound pages here: normally this means reads into hugetlb
1463 * pages.  The logic in here doesn't really work right for compound pages
1464 * because the VM does not uniformly chase down the head page in all cases.
1465 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1466 * handle them at all.  So we skip compound pages here at an early stage.
1467 *
1468 * Note that this code is very hard to test under normal circumstances because
1469 * direct-io pins the pages with get_user_pages().  This makes
1470 * is_page_cache_freeable return false, and the VM will not clean the pages.
1471 * But other code (eg, flusher threads) could clean the pages if they are mapped
1472 * pagecache.
1473 *
1474 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1475 * deferred bio dirtying paths.
1476 */
1477
1478/*
1479 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1480 */
1481void bio_set_pages_dirty(struct bio *bio)
1482{
1483	struct bio_vec *bvec;
1484	struct bvec_iter_all iter_all;
1485
1486	bio_for_each_segment_all(bvec, bio, iter_all) {
1487		if (!PageCompound(bvec->bv_page))
1488			set_page_dirty_lock(bvec->bv_page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1489	}
1490}
1491
1492/*
1493 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1494 * If they are, then fine.  If, however, some pages are clean then they must
1495 * have been written out during the direct-IO read.  So we take another ref on
1496 * the BIO and re-dirty the pages in process context.
1497 *
1498 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1499 * here on.  It will run one put_page() against each page and will run one
1500 * bio_put() against the BIO.
1501 */
1502
1503static void bio_dirty_fn(struct work_struct *work);
1504
1505static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1506static DEFINE_SPINLOCK(bio_dirty_lock);
1507static struct bio *bio_dirty_list;
1508
1509/*
1510 * This runs in process context
1511 */
1512static void bio_dirty_fn(struct work_struct *work)
1513{
1514	struct bio *bio, *next;
 
1515
1516	spin_lock_irq(&bio_dirty_lock);
1517	next = bio_dirty_list;
1518	bio_dirty_list = NULL;
1519	spin_unlock_irq(&bio_dirty_lock);
1520
1521	while ((bio = next) != NULL) {
1522		next = bio->bi_private;
1523
1524		bio_release_pages(bio, true);
 
1525		bio_put(bio);
 
1526	}
1527}
1528
1529void bio_check_pages_dirty(struct bio *bio)
1530{
1531	struct bio_vec *bvec;
1532	unsigned long flags;
1533	struct bvec_iter_all iter_all;
1534
1535	bio_for_each_segment_all(bvec, bio, iter_all) {
1536		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1537			goto defer;
 
 
 
 
 
 
1538	}
1539
1540	bio_release_pages(bio, false);
1541	bio_put(bio);
1542	return;
1543defer:
1544	spin_lock_irqsave(&bio_dirty_lock, flags);
1545	bio->bi_private = bio_dirty_list;
1546	bio_dirty_list = bio;
1547	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1548	schedule_work(&bio_dirty_work);
 
 
1549}
1550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1551static inline bool bio_remaining_done(struct bio *bio)
1552{
1553	/*
1554	 * If we're not chaining, then ->__bi_remaining is always 1 and
1555	 * we always end io on the first invocation.
1556	 */
1557	if (!bio_flagged(bio, BIO_CHAIN))
1558		return true;
1559
1560	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1561
1562	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1563		bio_clear_flag(bio, BIO_CHAIN);
1564		return true;
1565	}
1566
1567	return false;
1568}
1569
1570/**
1571 * bio_endio - end I/O on a bio
1572 * @bio:	bio
1573 *
1574 * Description:
1575 *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1576 *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1577 *   bio unless they own it and thus know that it has an end_io function.
1578 *
1579 *   bio_endio() can be called several times on a bio that has been chained
1580 *   using bio_chain().  The ->bi_end_io() function will only be called the
1581 *   last time.
 
1582 **/
1583void bio_endio(struct bio *bio)
1584{
1585again:
1586	if (!bio_remaining_done(bio))
1587		return;
1588	if (!bio_integrity_endio(bio))
1589		return;
1590
1591	rq_qos_done_bio(bio);
1592
1593	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1594		trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1595		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1596	}
1597
1598	/*
1599	 * Need to have a real endio function for chained bios, otherwise
1600	 * various corner cases will break (like stacking block devices that
1601	 * save/restore bi_end_io) - however, we want to avoid unbounded
1602	 * recursion and blowing the stack. Tail call optimization would
1603	 * handle this, but compiling with frame pointers also disables
1604	 * gcc's sibling call optimization.
1605	 */
1606	if (bio->bi_end_io == bio_chain_endio) {
1607		bio = __bio_chain_endio(bio);
1608		goto again;
1609	}
1610
 
 
 
 
 
 
1611	blk_throtl_bio_endio(bio);
1612	/* release cgroup info */
1613	bio_uninit(bio);
1614	if (bio->bi_end_io)
1615		bio->bi_end_io(bio);
1616}
1617EXPORT_SYMBOL(bio_endio);
1618
1619/**
1620 * bio_split - split a bio
1621 * @bio:	bio to split
1622 * @sectors:	number of sectors to split from the front of @bio
1623 * @gfp:	gfp mask
1624 * @bs:		bio set to allocate from
1625 *
1626 * Allocates and returns a new bio which represents @sectors from the start of
1627 * @bio, and updates @bio to represent the remaining sectors.
1628 *
1629 * Unless this is a discard request the newly allocated bio will point
1630 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1631 * neither @bio nor @bs are freed before the split bio.
1632 */
1633struct bio *bio_split(struct bio *bio, int sectors,
1634		      gfp_t gfp, struct bio_set *bs)
1635{
1636	struct bio *split;
1637
1638	BUG_ON(sectors <= 0);
1639	BUG_ON(sectors >= bio_sectors(bio));
1640
1641	/* Zone append commands cannot be split */
1642	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1643		return NULL;
1644
1645	split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1646	if (!split)
1647		return NULL;
1648
1649	split->bi_iter.bi_size = sectors << 9;
1650
1651	if (bio_integrity(split))
1652		bio_integrity_trim(split);
1653
1654	bio_advance(bio, split->bi_iter.bi_size);
1655
1656	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1657		bio_set_flag(split, BIO_TRACE_COMPLETION);
1658
1659	return split;
1660}
1661EXPORT_SYMBOL(bio_split);
1662
1663/**
1664 * bio_trim - trim a bio
1665 * @bio:	bio to trim
1666 * @offset:	number of sectors to trim from the front of @bio
1667 * @size:	size we want to trim @bio to, in sectors
1668 *
1669 * This function is typically used for bios that are cloned and submitted
1670 * to the underlying device in parts.
1671 */
1672void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1673{
1674	if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1675			 offset + size > bio_sectors(bio)))
1676		return;
1677
1678	size <<= 9;
1679	if (offset == 0 && size == bio->bi_iter.bi_size)
1680		return;
1681
 
 
1682	bio_advance(bio, offset << 9);
 
1683	bio->bi_iter.bi_size = size;
1684
1685	if (bio_integrity(bio))
1686		bio_integrity_trim(bio);
 
1687}
1688EXPORT_SYMBOL_GPL(bio_trim);
1689
1690/*
1691 * create memory pools for biovec's in a bio_set.
1692 * use the global biovec slabs created for general use.
1693 */
1694int biovec_init_pool(mempool_t *pool, int pool_entries)
1695{
1696	struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1697
1698	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1699}
1700
1701/*
1702 * bioset_exit - exit a bioset initialized with bioset_init()
1703 *
1704 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1705 * kzalloc()).
1706 */
1707void bioset_exit(struct bio_set *bs)
1708{
1709	bio_alloc_cache_destroy(bs);
1710	if (bs->rescue_workqueue)
1711		destroy_workqueue(bs->rescue_workqueue);
1712	bs->rescue_workqueue = NULL;
1713
1714	mempool_exit(&bs->bio_pool);
1715	mempool_exit(&bs->bvec_pool);
1716
1717	bioset_integrity_free(bs);
1718	if (bs->bio_slab)
1719		bio_put_slab(bs);
1720	bs->bio_slab = NULL;
1721}
1722EXPORT_SYMBOL(bioset_exit);
1723
1724/**
1725 * bioset_init - Initialize a bio_set
1726 * @bs:		pool to initialize
1727 * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1728 * @front_pad:	Number of bytes to allocate in front of the returned bio
1729 * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1730 *              and %BIOSET_NEED_RESCUER
1731 *
1732 * Description:
1733 *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1734 *    to ask for a number of bytes to be allocated in front of the bio.
1735 *    Front pad allocation is useful for embedding the bio inside
1736 *    another structure, to avoid allocating extra data to go with the bio.
1737 *    Note that the bio must be embedded at the END of that structure always,
1738 *    or things will break badly.
1739 *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1740 *    for allocating iovecs.  This pool is not needed e.g. for bio_init_clone().
1741 *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
1742 *    to dispatch queued requests when the mempool runs out of space.
1743 *
1744 */
1745int bioset_init(struct bio_set *bs,
1746		unsigned int pool_size,
1747		unsigned int front_pad,
1748		int flags)
1749{
 
 
 
 
 
 
 
1750	bs->front_pad = front_pad;
1751	if (flags & BIOSET_NEED_BVECS)
1752		bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1753	else
1754		bs->back_pad = 0;
1755
1756	spin_lock_init(&bs->rescue_lock);
1757	bio_list_init(&bs->rescue_list);
1758	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1759
1760	bs->bio_slab = bio_find_or_create_slab(bs);
1761	if (!bs->bio_slab)
1762		return -ENOMEM;
 
 
1763
1764	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
 
1765		goto bad;
1766
1767	if ((flags & BIOSET_NEED_BVECS) &&
1768	    biovec_init_pool(&bs->bvec_pool, pool_size))
 
 
 
 
 
 
 
 
 
1769		goto bad;
1770
1771	if (flags & BIOSET_NEED_RESCUER) {
1772		bs->rescue_workqueue = alloc_workqueue("bioset",
1773							WQ_MEM_RECLAIM, 0);
1774		if (!bs->rescue_workqueue)
1775			goto bad;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1776	}
1777	if (flags & BIOSET_PERCPU_CACHE) {
1778		bs->cache = alloc_percpu(struct bio_alloc_cache);
1779		if (!bs->cache)
1780			goto bad;
1781		cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1782	}
 
1783
1784	return 0;
1785bad:
1786	bioset_exit(bs);
1787	return -ENOMEM;
 
 
 
 
 
1788}
1789EXPORT_SYMBOL(bioset_init);
 
1790
1791static int __init init_bio(void)
1792{
1793	int i;
1794
1795	bio_integrity_init();
1796
1797	for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1798		struct biovec_slab *bvs = bvec_slabs + i;
1799
1800		bvs->slab = kmem_cache_create(bvs->name,
1801				bvs->nr_vecs * sizeof(struct bio_vec), 0,
1802				SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 
 
 
 
 
1803	}
 
1804
1805	cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1806					bio_cpu_dead);
 
 
 
 
 
 
 
 
1807
1808	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0,
1809			BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
1810		panic("bio: can't allocate bios\n");
1811
1812	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1813		panic("bio: can't create integrity pool\n");
1814
1815	return 0;
1816}
1817subsys_initcall(init_bio);
v4.17
 
   1/*
   2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public Licens
  14 * along with this program; if not, write to the Free Software
  15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  16 *
  17 */
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/bio.h>
  21#include <linux/blkdev.h>
  22#include <linux/uio.h>
  23#include <linux/iocontext.h>
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/kernel.h>
  27#include <linux/export.h>
  28#include <linux/mempool.h>
  29#include <linux/workqueue.h>
  30#include <linux/cgroup.h>
 
 
 
 
  31
  32#include <trace/events/block.h>
  33#include "blk.h"
 
 
  34
  35/*
  36 * Test patch to inline a certain number of bi_io_vec's inside the bio
  37 * itself, to shrink a bio data allocation from two mempool calls to one
  38 */
  39#define BIO_INLINE_VECS		4
 
 
 
 
 
  40
  41/*
  42 * if you change this list, also change bvec_alloc or things will
  43 * break badly! cannot be bigger than what you can fit into an
  44 * unsigned short
  45 */
  46#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
  47static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
  48	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
 
  49};
  50#undef BV
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  51
  52/*
  53 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
  54 * IO code that does not need private memory pools.
  55 */
  56struct bio_set *fs_bio_set;
  57EXPORT_SYMBOL(fs_bio_set);
  58
  59/*
  60 * Our slab pool management
  61 */
  62struct bio_slab {
  63	struct kmem_cache *slab;
  64	unsigned int slab_ref;
  65	unsigned int slab_size;
  66	char name[8];
  67};
  68static DEFINE_MUTEX(bio_slab_lock);
  69static struct bio_slab *bio_slabs;
  70static unsigned int bio_slab_nr, bio_slab_max;
  71
  72static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
  73{
  74	unsigned int sz = sizeof(struct bio) + extra_size;
  75	struct kmem_cache *slab = NULL;
  76	struct bio_slab *bslab, *new_bio_slabs;
  77	unsigned int new_bio_slab_max;
  78	unsigned int i, entry = -1;
  79
  80	mutex_lock(&bio_slab_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
  81
  82	i = 0;
  83	while (i < bio_slab_nr) {
  84		bslab = &bio_slabs[i];
  85
  86		if (!bslab->slab && entry == -1)
  87			entry = i;
  88		else if (bslab->slab_size == sz) {
  89			slab = bslab->slab;
  90			bslab->slab_ref++;
  91			break;
  92		}
  93		i++;
  94	}
  95
  96	if (slab)
  97		goto out_unlock;
 
 
  98
  99	if (bio_slab_nr == bio_slab_max && entry == -1) {
 100		new_bio_slab_max = bio_slab_max << 1;
 101		new_bio_slabs = krealloc(bio_slabs,
 102					 new_bio_slab_max * sizeof(struct bio_slab),
 103					 GFP_KERNEL);
 104		if (!new_bio_slabs)
 105			goto out_unlock;
 106		bio_slab_max = new_bio_slab_max;
 107		bio_slabs = new_bio_slabs;
 108	}
 109	if (entry == -1)
 110		entry = bio_slab_nr++;
 111
 112	bslab = &bio_slabs[entry];
 113
 114	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
 115	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
 116				 SLAB_HWCACHE_ALIGN, NULL);
 117	if (!slab)
 118		goto out_unlock;
 119
 120	bslab->slab = slab;
 121	bslab->slab_ref = 1;
 122	bslab->slab_size = sz;
 123out_unlock:
 
 
 124	mutex_unlock(&bio_slab_lock);
 125	return slab;
 
 
 
 126}
 127
 128static void bio_put_slab(struct bio_set *bs)
 129{
 130	struct bio_slab *bslab = NULL;
 131	unsigned int i;
 132
 133	mutex_lock(&bio_slab_lock);
 134
 135	for (i = 0; i < bio_slab_nr; i++) {
 136		if (bs->bio_slab == bio_slabs[i].slab) {
 137			bslab = &bio_slabs[i];
 138			break;
 139		}
 140	}
 141
 142	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
 143		goto out;
 144
 
 
 145	WARN_ON(!bslab->slab_ref);
 146
 147	if (--bslab->slab_ref)
 148		goto out;
 149
 
 
 150	kmem_cache_destroy(bslab->slab);
 151	bslab->slab = NULL;
 152
 153out:
 154	mutex_unlock(&bio_slab_lock);
 155}
 156
 157unsigned int bvec_nr_vecs(unsigned short idx)
 158{
 159	return bvec_slabs[idx].nr_vecs;
 
 
 
 
 
 160}
 161
 162void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
 
 
 
 
 163{
 164	if (!idx)
 165		return;
 166	idx--;
 167
 168	BIO_BUG_ON(idx >= BVEC_POOL_NR);
 169
 170	if (idx == BVEC_POOL_MAX) {
 171		mempool_free(bv, pool);
 172	} else {
 173		struct biovec_slab *bvs = bvec_slabs + idx;
 174
 175		kmem_cache_free(bvs->slab, bv);
 176	}
 177}
 178
 179struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
 180			   mempool_t *pool)
 181{
 182	struct bio_vec *bvl;
 
 
 
 183
 184	/*
 185	 * see comment near bvec_array define!
 
 186	 */
 187	switch (nr) {
 188	case 1:
 189		*idx = 0;
 190		break;
 191	case 2 ... 4:
 192		*idx = 1;
 193		break;
 194	case 5 ... 16:
 195		*idx = 2;
 196		break;
 197	case 17 ... 64:
 198		*idx = 3;
 199		break;
 200	case 65 ... 128:
 201		*idx = 4;
 202		break;
 203	case 129 ... BIO_MAX_PAGES:
 204		*idx = 5;
 205		break;
 206	default:
 207		return NULL;
 208	}
 209
 210	/*
 211	 * idx now points to the pool we want to allocate from. only the
 212	 * 1-vec entry pool is mempool backed.
 
 213	 */
 214	if (*idx == BVEC_POOL_MAX) {
 215fallback:
 216		bvl = mempool_alloc(pool, gfp_mask);
 217	} else {
 218		struct biovec_slab *bvs = bvec_slabs + *idx;
 219		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
 220
 221		/*
 222		 * Make this allocation restricted and don't dump info on
 223		 * allocation failures, since we'll fallback to the mempool
 224		 * in case of failure.
 225		 */
 226		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 227
 228		/*
 229		 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
 230		 * is set, retry with the 1-entry mempool
 231		 */
 232		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
 233		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
 234			*idx = BVEC_POOL_MAX;
 235			goto fallback;
 236		}
 237	}
 238
 239	(*idx)++;
 240	return bvl;
 241}
 242
 243void bio_uninit(struct bio *bio)
 244{
 245	bio_disassociate_task(bio);
 
 
 
 
 
 
 
 
 
 246}
 247EXPORT_SYMBOL(bio_uninit);
 248
 249static void bio_free(struct bio *bio)
 250{
 251	struct bio_set *bs = bio->bi_pool;
 252	void *p;
 
 
 253
 254	bio_uninit(bio);
 255
 256	if (bs) {
 257		bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
 258
 259		/*
 260		 * If we have front padding, adjust the bio pointer before freeing
 261		 */
 262		p = bio;
 263		p -= bs->front_pad;
 264
 265		mempool_free(p, bs->bio_pool);
 266	} else {
 267		/* Bio was allocated by bio_kmalloc() */
 268		kfree(bio);
 269	}
 270}
 271
 272/*
 273 * Users of this function have their own bio allocation. Subsequently,
 274 * they must remember to pair any call to bio_init() with bio_uninit()
 275 * when IO has completed, or when the bio is released.
 276 */
 277void bio_init(struct bio *bio, struct bio_vec *table,
 278	      unsigned short max_vecs)
 279{
 280	memset(bio, 0, sizeof(*bio));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 281	atomic_set(&bio->__bi_remaining, 1);
 282	atomic_set(&bio->__bi_cnt, 1);
 
 283
 
 284	bio->bi_io_vec = table;
 285	bio->bi_max_vecs = max_vecs;
 286}
 287EXPORT_SYMBOL(bio_init);
 288
 289/**
 290 * bio_reset - reinitialize a bio
 291 * @bio:	bio to reset
 
 
 292 *
 293 * Description:
 294 *   After calling bio_reset(), @bio will be in the same state as a freshly
 295 *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
 296 *   preserved are the ones that are initialized by bio_alloc_bioset(). See
 297 *   comment in struct bio.
 298 */
 299void bio_reset(struct bio *bio)
 300{
 301	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
 302
 303	bio_uninit(bio);
 304
 305	memset(bio, 0, BIO_RESET_BYTES);
 306	bio->bi_flags = flags;
 307	atomic_set(&bio->__bi_remaining, 1);
 
 
 
 
 308}
 309EXPORT_SYMBOL(bio_reset);
 310
 311static struct bio *__bio_chain_endio(struct bio *bio)
 312{
 313	struct bio *parent = bio->bi_private;
 314
 315	if (!parent->bi_status)
 316		parent->bi_status = bio->bi_status;
 317	bio_put(bio);
 318	return parent;
 319}
 320
 321static void bio_chain_endio(struct bio *bio)
 322{
 323	bio_endio(__bio_chain_endio(bio));
 324}
 325
 326/**
 327 * bio_chain - chain bio completions
 328 * @bio: the target bio
 329 * @parent: the @bio's parent bio
 330 *
 331 * The caller won't have a bi_end_io called when @bio completes - instead,
 332 * @parent's bi_end_io won't be called until both @parent and @bio have
 333 * completed; the chained bio will also be freed when it completes.
 334 *
 335 * The caller must not set bi_private or bi_end_io in @bio.
 336 */
 337void bio_chain(struct bio *bio, struct bio *parent)
 338{
 339	BUG_ON(bio->bi_private || bio->bi_end_io);
 340
 341	bio->bi_private = parent;
 342	bio->bi_end_io	= bio_chain_endio;
 343	bio_inc_remaining(parent);
 344}
 345EXPORT_SYMBOL(bio_chain);
 346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347static void bio_alloc_rescue(struct work_struct *work)
 348{
 349	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
 350	struct bio *bio;
 351
 352	while (1) {
 353		spin_lock(&bs->rescue_lock);
 354		bio = bio_list_pop(&bs->rescue_list);
 355		spin_unlock(&bs->rescue_lock);
 356
 357		if (!bio)
 358			break;
 359
 360		generic_make_request(bio);
 361	}
 362}
 363
 364static void punt_bios_to_rescuer(struct bio_set *bs)
 365{
 366	struct bio_list punt, nopunt;
 367	struct bio *bio;
 368
 369	if (WARN_ON_ONCE(!bs->rescue_workqueue))
 370		return;
 371	/*
 372	 * In order to guarantee forward progress we must punt only bios that
 373	 * were allocated from this bio_set; otherwise, if there was a bio on
 374	 * there for a stacking driver higher up in the stack, processing it
 375	 * could require allocating bios from this bio_set, and doing that from
 376	 * our own rescuer would be bad.
 377	 *
 378	 * Since bio lists are singly linked, pop them all instead of trying to
 379	 * remove from the middle of the list:
 380	 */
 381
 382	bio_list_init(&punt);
 383	bio_list_init(&nopunt);
 384
 385	while ((bio = bio_list_pop(&current->bio_list[0])))
 386		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 387	current->bio_list[0] = nopunt;
 388
 389	bio_list_init(&nopunt);
 390	while ((bio = bio_list_pop(&current->bio_list[1])))
 391		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 392	current->bio_list[1] = nopunt;
 393
 394	spin_lock(&bs->rescue_lock);
 395	bio_list_merge(&bs->rescue_list, &punt);
 396	spin_unlock(&bs->rescue_lock);
 397
 398	queue_work(bs->rescue_workqueue, &bs->rescue_work);
 399}
 400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 401/**
 402 * bio_alloc_bioset - allocate a bio for I/O
 
 
 
 403 * @gfp_mask:   the GFP_* mask given to the slab allocator
 404 * @nr_iovecs:	number of iovecs to pre-allocate
 405 * @bs:		the bio_set to allocate from.
 406 *
 407 * Description:
 408 *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
 409 *   backed by the @bs's mempool.
 410 *
 411 *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
 412 *   always be able to allocate a bio. This is due to the mempool guarantees.
 413 *   To make this work, callers must never allocate more than 1 bio at a time
 414 *   from this pool. Callers that need to allocate more than 1 bio must always
 415 *   submit the previously allocated bio for IO before attempting to allocate
 416 *   a new one. Failure to do so can cause deadlocks under memory pressure.
 417 *
 418 *   Note that when running under generic_make_request() (i.e. any block
 419 *   driver), bios are not submitted until after you return - see the code in
 420 *   generic_make_request() that converts recursion into iteration, to prevent
 421 *   stack overflows.
 422 *
 423 *   This would normally mean allocating multiple bios under
 424 *   generic_make_request() would be susceptible to deadlocks, but we have
 425 *   deadlock avoidance code that resubmits any blocked bios from a rescuer
 426 *   thread.
 427 *
 428 *   However, we do not guarantee forward progress for allocations from other
 429 *   mempools. Doing multiple allocations from the same mempool under
 430 *   generic_make_request() should be avoided - instead, use bio_set's front_pad
 431 *   for per bio allocations.
 432 *
 433 *   RETURNS:
 434 *   Pointer to new bio on success, NULL on failure.
 435 */
 436struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
 
 437			     struct bio_set *bs)
 438{
 439	gfp_t saved_gfp = gfp_mask;
 440	unsigned front_pad;
 441	unsigned inline_vecs;
 442	struct bio_vec *bvl = NULL;
 443	struct bio *bio;
 444	void *p;
 445
 446	if (!bs) {
 447		if (nr_iovecs > UIO_MAXIOV)
 448			return NULL;
 449
 450		p = kmalloc(sizeof(struct bio) +
 451			    nr_iovecs * sizeof(struct bio_vec),
 452			    gfp_mask);
 453		front_pad = 0;
 454		inline_vecs = nr_iovecs;
 455	} else {
 456		/* should not use nobvec bioset for nr_iovecs > 0 */
 457		if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
 458			return NULL;
 459		/*
 460		 * generic_make_request() converts recursion to iteration; this
 461		 * means if we're running beneath it, any bios we allocate and
 462		 * submit will not be submitted (and thus freed) until after we
 463		 * return.
 464		 *
 465		 * This exposes us to a potential deadlock if we allocate
 466		 * multiple bios from the same bio_set() while running
 467		 * underneath generic_make_request(). If we were to allocate
 468		 * multiple bios (say a stacking block driver that was splitting
 469		 * bios), we would deadlock if we exhausted the mempool's
 470		 * reserve.
 471		 *
 472		 * We solve this, and guarantee forward progress, with a rescuer
 473		 * workqueue per bio_set. If we go to allocate and there are
 474		 * bios on current->bio_list, we first try the allocation
 475		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
 476		 * bios we would be blocking to the rescuer workqueue before
 477		 * we retry with the original gfp_flags.
 478		 */
 479
 480		if (current->bio_list &&
 481		    (!bio_list_empty(&current->bio_list[0]) ||
 482		     !bio_list_empty(&current->bio_list[1])) &&
 483		    bs->rescue_workqueue)
 484			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 485
 486		p = mempool_alloc(bs->bio_pool, gfp_mask);
 487		if (!p && gfp_mask != saved_gfp) {
 488			punt_bios_to_rescuer(bs);
 489			gfp_mask = saved_gfp;
 490			p = mempool_alloc(bs->bio_pool, gfp_mask);
 491		}
 
 492
 493		front_pad = bs->front_pad;
 494		inline_vecs = BIO_INLINE_VECS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495	}
 496
 497	if (unlikely(!p))
 498		return NULL;
 
 
 499
 500	bio = p + front_pad;
 501	bio_init(bio, NULL, 0);
 502
 503	if (nr_iovecs > inline_vecs) {
 504		unsigned long idx = 0;
 505
 506		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
 507		if (!bvl && gfp_mask != saved_gfp) {
 508			punt_bios_to_rescuer(bs);
 509			gfp_mask = saved_gfp;
 510			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
 511		}
 512
 513		if (unlikely(!bvl))
 514			goto err_free;
 515
 516		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
 517	} else if (nr_iovecs) {
 518		bvl = bio->bi_inline_vecs;
 
 
 519	}
 520
 521	bio->bi_pool = bs;
 522	bio->bi_max_vecs = nr_iovecs;
 523	bio->bi_io_vec = bvl;
 524	return bio;
 525
 526err_free:
 527	mempool_free(p, bs->bio_pool);
 528	return NULL;
 529}
 530EXPORT_SYMBOL(bio_alloc_bioset);
 531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 532void zero_fill_bio(struct bio *bio)
 533{
 534	unsigned long flags;
 535	struct bio_vec bv;
 536	struct bvec_iter iter;
 537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 538	bio_for_each_segment(bv, bio, iter) {
 539		char *data = bvec_kmap_irq(&bv, &flags);
 540		memset(data, 0, bv.bv_len);
 541		flush_dcache_page(bv.bv_page);
 542		bvec_kunmap_irq(data, &flags);
 
 
 
 
 
 
 
 
 543	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544}
 545EXPORT_SYMBOL(zero_fill_bio);
 546
 547/**
 548 * bio_put - release a reference to a bio
 549 * @bio:   bio to release reference to
 550 *
 551 * Description:
 552 *   Put a reference to a &struct bio, either one you have gotten with
 553 *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
 554 **/
 555void bio_put(struct bio *bio)
 556{
 557	if (!bio_flagged(bio, BIO_REFFED))
 
 
 
 
 
 
 
 558		bio_free(bio);
 559	else {
 560		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
 561
 562		/*
 563		 * last put frees it
 564		 */
 565		if (atomic_dec_and_test(&bio->__bi_cnt))
 566			bio_free(bio);
 567	}
 568}
 569EXPORT_SYMBOL(bio_put);
 570
 571inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
 572{
 573	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
 574		blk_recount_segments(q, bio);
 
 575
 576	return bio->bi_phys_segments;
 
 
 
 
 
 
 
 
 
 
 
 
 577}
 578EXPORT_SYMBOL(bio_phys_segments);
 579
 580/**
 581 * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
 582 * 	@bio: destination bio
 583 * 	@bio_src: bio to clone
 
 
 584 *
 585 *	Clone a &bio. Caller will own the returned bio, but not
 586 *	the actual data it points to. Reference count of returned
 587 * 	bio will be one.
 588 *
 589 * 	Caller must ensure that @bio_src is not freed before @bio.
 590 */
 591void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 
 592{
 593	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
 594
 595	/*
 596	 * most users will be overriding ->bi_disk with a new target,
 597	 * so we don't set nor calculate new physical/hw segment counts here
 598	 */
 599	bio->bi_disk = bio_src->bi_disk;
 600	bio->bi_partno = bio_src->bi_partno;
 601	bio_set_flag(bio, BIO_CLONED);
 602	if (bio_flagged(bio_src, BIO_THROTTLED))
 603		bio_set_flag(bio, BIO_THROTTLED);
 604	bio->bi_opf = bio_src->bi_opf;
 605	bio->bi_write_hint = bio_src->bi_write_hint;
 606	bio->bi_iter = bio_src->bi_iter;
 607	bio->bi_io_vec = bio_src->bi_io_vec;
 608
 609	bio_clone_blkcg_association(bio, bio_src);
 610}
 611EXPORT_SYMBOL(__bio_clone_fast);
 612
 613/**
 614 *	bio_clone_fast - clone a bio that shares the original bio's biovec
 615 *	@bio: bio to clone
 616 *	@gfp_mask: allocation priority
 617 *	@bs: bio_set to allocate from
 
 618 *
 619 * 	Like __bio_clone_fast, only also allocates the returned bio
 
 
 
 620 */
 621struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
 
 622{
 623	struct bio *b;
 624
 625	b = bio_alloc_bioset(gfp_mask, 0, bs);
 626	if (!b)
 627		return NULL;
 
 
 
 
 628
 629	__bio_clone_fast(b, bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 630
 631	if (bio_integrity(bio)) {
 632		int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 633
 634		ret = bio_integrity_clone(b, bio, gfp_mask);
 635
 636		if (ret < 0) {
 637			bio_put(b);
 638			return NULL;
 639		}
 640	}
 641
 642	return b;
 643}
 644EXPORT_SYMBOL(bio_clone_fast);
 645
 646/**
 647 * 	bio_clone_bioset - clone a bio
 648 * 	@bio_src: bio to clone
 649 *	@gfp_mask: allocation priority
 650 *	@bs: bio_set to allocate from
 
 
 
 
 
 
 651 *
 652 *	Clone bio. Caller will own the returned bio, but not the actual data it
 653 *	points to. Reference count of returned bio will be one.
 
 654 */
 655struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
 656			     struct bio_set *bs)
 657{
 658	struct bvec_iter iter;
 659	struct bio_vec bv;
 660	struct bio *bio;
 661
 662	/*
 663	 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
 664	 * bio_src->bi_io_vec to bio->bi_io_vec.
 665	 *
 666	 * We can't do that anymore, because:
 667	 *
 668	 *  - The point of cloning the biovec is to produce a bio with a biovec
 669	 *    the caller can modify: bi_idx and bi_bvec_done should be 0.
 670	 *
 671	 *  - The original bio could've had more than BIO_MAX_PAGES biovecs; if
 672	 *    we tried to clone the whole thing bio_alloc_bioset() would fail.
 673	 *    But the clone should succeed as long as the number of biovecs we
 674	 *    actually need to allocate is fewer than BIO_MAX_PAGES.
 675	 *
 676	 *  - Lastly, bi_vcnt should not be looked at or relied upon by code
 677	 *    that does not own the bio - reason being drivers don't use it for
 678	 *    iterating over the biovec anymore, so expecting it to be kept up
 679	 *    to date (i.e. for clones that share the parent biovec) is just
 680	 *    asking for trouble and would force extra work on
 681	 *    __bio_clone_fast() anyways.
 682	 */
 683
 684	bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
 685	if (!bio)
 686		return NULL;
 687	bio->bi_disk		= bio_src->bi_disk;
 688	bio->bi_opf		= bio_src->bi_opf;
 689	bio->bi_write_hint	= bio_src->bi_write_hint;
 690	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
 691	bio->bi_iter.bi_size	= bio_src->bi_iter.bi_size;
 692
 693	switch (bio_op(bio)) {
 694	case REQ_OP_DISCARD:
 695	case REQ_OP_SECURE_ERASE:
 696	case REQ_OP_WRITE_ZEROES:
 697		break;
 698	case REQ_OP_WRITE_SAME:
 699		bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
 700		break;
 701	default:
 702		bio_for_each_segment(bv, bio_src, iter)
 703			bio->bi_io_vec[bio->bi_vcnt++] = bv;
 704		break;
 705	}
 706
 707	if (bio_integrity(bio_src)) {
 708		int ret;
 709
 710		ret = bio_integrity_clone(bio, bio_src, gfp_mask);
 711		if (ret < 0) {
 712			bio_put(bio);
 713			return NULL;
 714		}
 715	}
 
 
 716
 717	bio_clone_blkcg_association(bio, bio_src);
 718
 719	return bio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 720}
 721EXPORT_SYMBOL(bio_clone_bioset);
 722
 723/**
 724 *	bio_add_pc_page	-	attempt to add page to bio
 725 *	@q: the target queue
 726 *	@bio: destination bio
 727 *	@page: page to add
 728 *	@len: vec entry length
 729 *	@offset: vec entry offset
 730 *
 731 *	Attempt to add a page to the bio_vec maplist. This can fail for a
 732 *	number of reasons, such as the bio being full or target block device
 733 *	limitations. The target block device must allow bio's up to PAGE_SIZE,
 734 *	so it is always possible to add a single page to an empty bio.
 735 *
 736 *	This should only be used by REQ_PC bios.
 737 */
 738int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
 739		    *page, unsigned int len, unsigned int offset)
 740{
 741	int retried_segments = 0;
 742	struct bio_vec *bvec;
 743
 744	/*
 745	 * cloned bio must not modify vec list
 746	 */
 747	if (unlikely(bio_flagged(bio, BIO_CLONED)))
 748		return 0;
 749
 750	if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
 751		return 0;
 752
 753	/*
 754	 * For filesystems with a blocksize smaller than the pagesize
 755	 * we will often be called with the same page as last time and
 756	 * a consecutive offset.  Optimize this special case.
 757	 */
 758	if (bio->bi_vcnt > 0) {
 759		struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
 760
 761		if (page == prev->bv_page &&
 762		    offset == prev->bv_offset + prev->bv_len) {
 763			prev->bv_len += len;
 764			bio->bi_iter.bi_size += len;
 765			goto done;
 766		}
 767
 768		/*
 769		 * If the queue doesn't support SG gaps and adding this
 770		 * offset would create a gap, disallow it.
 771		 */
 772		if (bvec_gap_to_prev(q, prev, offset))
 
 773			return 0;
 774	}
 775
 776	if (bio->bi_vcnt >= bio->bi_max_vecs)
 
 
 
 777		return 0;
 778
 779	/*
 780	 * setup the new entry, we might clear it again later if we
 781	 * cannot add the page
 782	 */
 783	bvec = &bio->bi_io_vec[bio->bi_vcnt];
 784	bvec->bv_page = page;
 785	bvec->bv_len = len;
 786	bvec->bv_offset = offset;
 787	bio->bi_vcnt++;
 788	bio->bi_phys_segments++;
 789	bio->bi_iter.bi_size += len;
 790
 791	/*
 792	 * Perform a recount if the number of segments is greater
 793	 * than queue_max_segments(q).
 794	 */
 795
 796	while (bio->bi_phys_segments > queue_max_segments(q)) {
 797
 798		if (retried_segments)
 799			goto failed;
 800
 801		retried_segments = 1;
 802		blk_recount_segments(q, bio);
 803	}
 804
 805	/* If we may be able to merge these biovecs, force a recount */
 806	if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
 807		bio_clear_flag(bio, BIO_SEG_VALID);
 808
 809 done:
 810	return len;
 
 811
 812 failed:
 813	bvec->bv_page = NULL;
 814	bvec->bv_len = 0;
 815	bvec->bv_offset = 0;
 816	bio->bi_vcnt--;
 817	bio->bi_iter.bi_size -= len;
 818	blk_recount_segments(q, bio);
 819	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 820}
 821EXPORT_SYMBOL(bio_add_pc_page);
 822
 823/**
 824 *	bio_add_page	-	attempt to add page to bio
 825 *	@bio: destination bio
 826 *	@page: page to add
 827 *	@len: vec entry length
 828 *	@offset: vec entry offset
 829 *
 830 *	Attempt to add a page to the bio_vec maplist. This will only fail
 831 *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
 
 
 
 
 
 
 832 */
 833int bio_add_page(struct bio *bio, struct page *page,
 834		 unsigned int len, unsigned int offset)
 835{
 836	struct bio_vec *bv;
 
 837
 838	/*
 839	 * cloned bio must not modify vec list
 840	 */
 841	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 842		return 0;
 843
 844	/*
 845	 * For filesystems with a blocksize smaller than the pagesize
 846	 * we will often be called with the same page as last time and
 847	 * a consecutive offset.  Optimize this special case.
 848	 */
 849	if (bio->bi_vcnt > 0) {
 850		bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 851
 852		if (page == bv->bv_page &&
 853		    offset == bv->bv_offset + bv->bv_len) {
 854			bv->bv_len += len;
 855			goto done;
 856		}
 857	}
 858
 859	if (bio->bi_vcnt >= bio->bi_max_vecs)
 860		return 0;
 861
 862	bv		= &bio->bi_io_vec[bio->bi_vcnt];
 863	bv->bv_page	= page;
 864	bv->bv_len	= len;
 865	bv->bv_offset	= offset;
 866
 867	bio->bi_vcnt++;
 868done:
 869	bio->bi_iter.bi_size += len;
 870	return len;
 871}
 872EXPORT_SYMBOL(bio_add_page);
 873
 874/**
 875 * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
 876 * @bio: bio to add pages to
 877 * @iter: iov iterator describing the region to be mapped
 
 
 878 *
 879 * Pins as many pages from *iter and appends them to @bio's bvec array. The
 880 * pages will have to be released using put_page() when done.
 881 */
 882int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 
 883{
 884	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
 885	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
 886	struct page **pages = (struct page **)bv;
 887	size_t offset, diff;
 888	ssize_t size;
 889
 890	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
 891	if (unlikely(size <= 0))
 892		return size ? size : -EFAULT;
 893	nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
 894
 895	/*
 896	 * Deep magic below:  We need to walk the pinned pages backwards
 897	 * because we are abusing the space allocated for the bio_vecs
 898	 * for the page array.  Because the bio_vecs are larger than the
 899	 * page pointers by definition this will always work.  But it also
 900	 * means we can't use bio_add_page, so any changes to it's semantics
 901	 * need to be reflected here as well.
 902	 */
 903	bio->bi_iter.bi_size += size;
 904	bio->bi_vcnt += nr_pages;
 905
 906	diff = (nr_pages * PAGE_SIZE - offset) - size;
 907	while (nr_pages--) {
 908		bv[nr_pages].bv_page = pages[nr_pages];
 909		bv[nr_pages].bv_len = PAGE_SIZE;
 910		bv[nr_pages].bv_offset = 0;
 911	}
 912
 913	bv[0].bv_offset += offset;
 914	bv[0].bv_len -= offset;
 915	if (diff)
 916		bv[bio->bi_vcnt - 1].bv_len -= diff;
 917
 918	iov_iter_advance(iter, size);
 919	return 0;
 920}
 921EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
 922
 923static void submit_bio_wait_endio(struct bio *bio)
 924{
 925	complete(bio->bi_private);
 926}
 
 927
 928/**
 929 * submit_bio_wait - submit a bio, and wait until it completes
 930 * @bio: The &struct bio which describes the I/O
 
 
 
 931 *
 932 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
 933 * bio_endio() on failure.
 934 *
 935 * WARNING: Unlike to how submit_bio() is usually used, this function does not
 936 * result in bio reference to be consumed. The caller must drop the reference
 937 * on his own.
 938 */
 939int submit_bio_wait(struct bio *bio)
 
 940{
 941	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
 942
 943	bio->bi_private = &done;
 944	bio->bi_end_io = submit_bio_wait_endio;
 945	bio->bi_opf |= REQ_SYNC;
 946	submit_bio(bio);
 947	wait_for_completion_io(&done);
 948
 949	return blk_status_to_errno(bio->bi_status);
 950}
 951EXPORT_SYMBOL(submit_bio_wait);
 952
 953/**
 954 * bio_advance - increment/complete a bio by some number of bytes
 955 * @bio:	bio to advance
 956 * @bytes:	number of bytes to complete
 
 
 957 *
 958 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
 959 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
 960 * be updated on the last bvec as well.
 
 961 *
 962 * @bio will then represent the remaining, uncompleted portion of the io.
 963 */
 964void bio_advance(struct bio *bio, unsigned bytes)
 
 965{
 966	if (bio_integrity(bio))
 967		bio_integrity_advance(bio, bytes);
 968
 969	bio_advance_iter(bio, &bio->bi_iter, bytes);
 970}
 971EXPORT_SYMBOL(bio_advance);
 972
 973/**
 974 * bio_copy_data - copy contents of data buffers from one chain of bios to
 975 * another
 976 * @src: source bio list
 977 * @dst: destination bio list
 978 *
 979 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
 980 * @src and @dst as linked lists of bios.
 981 *
 982 * Stops when it reaches the end of either @src or @dst - that is, copies
 983 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
 984 */
 985void bio_copy_data(struct bio *dst, struct bio *src)
 986{
 987	struct bvec_iter src_iter, dst_iter;
 988	struct bio_vec src_bv, dst_bv;
 989	void *src_p, *dst_p;
 990	unsigned bytes;
 991
 992	src_iter = src->bi_iter;
 993	dst_iter = dst->bi_iter;
 994
 995	while (1) {
 996		if (!src_iter.bi_size) {
 997			src = src->bi_next;
 998			if (!src)
 999				break;
1000
1001			src_iter = src->bi_iter;
1002		}
1003
1004		if (!dst_iter.bi_size) {
1005			dst = dst->bi_next;
1006			if (!dst)
1007				break;
1008
1009			dst_iter = dst->bi_iter;
1010		}
1011
1012		src_bv = bio_iter_iovec(src, src_iter);
1013		dst_bv = bio_iter_iovec(dst, dst_iter);
1014
1015		bytes = min(src_bv.bv_len, dst_bv.bv_len);
1016
1017		src_p = kmap_atomic(src_bv.bv_page);
1018		dst_p = kmap_atomic(dst_bv.bv_page);
1019
1020		memcpy(dst_p + dst_bv.bv_offset,
1021		       src_p + src_bv.bv_offset,
1022		       bytes);
1023
1024		kunmap_atomic(dst_p);
1025		kunmap_atomic(src_p);
1026
1027		bio_advance_iter(src, &src_iter, bytes);
1028		bio_advance_iter(dst, &dst_iter, bytes);
1029	}
1030}
1031EXPORT_SYMBOL(bio_copy_data);
1032
1033struct bio_map_data {
1034	int is_our_pages;
1035	struct iov_iter iter;
1036	struct iovec iov[];
1037};
1038
1039static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1040					       gfp_t gfp_mask)
1041{
1042	struct bio_map_data *bmd;
1043	if (data->nr_segs > UIO_MAXIOV)
1044		return NULL;
1045
1046	bmd = kmalloc(sizeof(struct bio_map_data) +
1047		       sizeof(struct iovec) * data->nr_segs, gfp_mask);
1048	if (!bmd)
1049		return NULL;
1050	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
1051	bmd->iter = *data;
1052	bmd->iter.iov = bmd->iov;
1053	return bmd;
1054}
1055
1056/**
1057 * bio_copy_from_iter - copy all pages from iov_iter to bio
1058 * @bio: The &struct bio which describes the I/O as destination
1059 * @iter: iov_iter as source
1060 *
1061 * Copy all pages from iov_iter to bio.
1062 * Returns 0 on success, or error on failure.
1063 */
1064static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1065{
1066	int i;
1067	struct bio_vec *bvec;
1068
1069	bio_for_each_segment_all(bvec, bio, i) {
1070		ssize_t ret;
1071
1072		ret = copy_page_from_iter(bvec->bv_page,
1073					  bvec->bv_offset,
1074					  bvec->bv_len,
1075					  iter);
1076
1077		if (!iov_iter_count(iter))
1078			break;
1079
1080		if (ret < bvec->bv_len)
1081			return -EFAULT;
1082	}
1083
1084	return 0;
 
 
 
 
 
1085}
1086
1087/**
1088 * bio_copy_to_iter - copy all pages from bio to iov_iter
1089 * @bio: The &struct bio which describes the I/O as source
1090 * @iter: iov_iter as destination
1091 *
1092 * Copy all pages from bio to iov_iter.
1093 * Returns 0 on success, or error on failure.
1094 */
1095static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1096{
1097	int i;
1098	struct bio_vec *bvec;
1099
1100	bio_for_each_segment_all(bvec, bio, i) {
1101		ssize_t ret;
1102
1103		ret = copy_page_to_iter(bvec->bv_page,
1104					bvec->bv_offset,
1105					bvec->bv_len,
1106					&iter);
1107
1108		if (!iov_iter_count(&iter))
1109			break;
1110
1111		if (ret < bvec->bv_len)
1112			return -EFAULT;
1113	}
1114
 
 
1115	return 0;
1116}
1117
1118void bio_free_pages(struct bio *bio)
 
1119{
1120	struct bio_vec *bvec;
1121	int i;
1122
1123	bio_for_each_segment_all(bvec, bio, i)
1124		__free_page(bvec->bv_page);
 
 
 
 
1125}
1126EXPORT_SYMBOL(bio_free_pages);
 
1127
1128/**
1129 *	bio_uncopy_user	-	finish previously mapped bio
1130 *	@bio: bio being terminated
 
1131 *
1132 *	Free pages allocated from bio_copy_user_iov() and write back data
1133 *	to user space in case of a read.
 
 
1134 */
1135int bio_uncopy_user(struct bio *bio)
1136{
1137	struct bio_map_data *bmd = bio->bi_private;
 
 
 
 
 
 
 
1138	int ret = 0;
1139
1140	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1141		/*
1142		 * if we're in a workqueue, the request is orphaned, so
1143		 * don't copy into a random user address space, just free
1144		 * and return -EINTR so user space doesn't expect any data.
1145		 */
1146		if (!current->mm)
1147			ret = -EINTR;
1148		else if (bio_data_dir(bio) == READ)
1149			ret = bio_copy_to_iter(bio, bmd->iter);
1150		if (bmd->is_our_pages)
1151			bio_free_pages(bio);
1152	}
1153	kfree(bmd);
1154	bio_put(bio);
1155	return ret;
1156}
1157
1158/**
1159 *	bio_copy_user_iov	-	copy user data to bio
1160 *	@q:		destination block queue
1161 *	@map_data:	pointer to the rq_map_data holding pages (if necessary)
1162 *	@iter:		iovec iterator
1163 *	@gfp_mask:	memory allocation flags
1164 *
1165 *	Prepares and returns a bio for indirect user io, bouncing data
1166 *	to/from kernel pages as necessary. Must be paired with
1167 *	call bio_uncopy_user() on io completion.
1168 */
1169struct bio *bio_copy_user_iov(struct request_queue *q,
1170			      struct rq_map_data *map_data,
1171			      struct iov_iter *iter,
1172			      gfp_t gfp_mask)
1173{
1174	struct bio_map_data *bmd;
1175	struct page *page;
1176	struct bio *bio;
1177	int i = 0, ret;
1178	int nr_pages;
1179	unsigned int len = iter->count;
1180	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1181
1182	bmd = bio_alloc_map_data(iter, gfp_mask);
1183	if (!bmd)
1184		return ERR_PTR(-ENOMEM);
1185
1186	/*
1187	 * We need to do a deep copy of the iov_iter including the iovecs.
1188	 * The caller provided iov might point to an on-stack or otherwise
1189	 * shortlived one.
 
 
1190	 */
1191	bmd->is_our_pages = map_data ? 0 : 1;
1192
1193	nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1194	if (nr_pages > BIO_MAX_PAGES)
1195		nr_pages = BIO_MAX_PAGES;
1196
1197	ret = -ENOMEM;
1198	bio = bio_kmalloc(gfp_mask, nr_pages);
1199	if (!bio)
1200		goto out_bmd;
1201
1202	ret = 0;
 
1203
1204	if (map_data) {
1205		nr_pages = 1 << map_data->page_order;
1206		i = map_data->offset / PAGE_SIZE;
 
1207	}
1208	while (len) {
1209		unsigned int bytes = PAGE_SIZE;
1210
1211		bytes -= offset;
1212
1213		if (bytes > len)
1214			bytes = len;
1215
1216		if (map_data) {
1217			if (i == map_data->nr_entries * nr_pages) {
1218				ret = -ENOMEM;
1219				break;
1220			}
1221
1222			page = map_data->pages[i / nr_pages];
1223			page += (i % nr_pages);
1224
1225			i++;
1226		} else {
1227			page = alloc_page(q->bounce_gfp | gfp_mask);
1228			if (!page) {
1229				ret = -ENOMEM;
1230				break;
1231			}
1232		}
1233
1234		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1235			break;
1236
1237		len -= bytes;
1238		offset = 0;
1239	}
1240
1241	if (ret)
1242		goto cleanup;
 
 
1243
1244	if (map_data)
1245		map_data->offset += bio->bi_iter.bi_size;
1246
1247	/*
1248	 * success
1249	 */
1250	if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1251	    (map_data && map_data->from_user)) {
1252		ret = bio_copy_from_iter(bio, iter);
1253		if (ret)
1254			goto cleanup;
1255	} else {
1256		iov_iter_advance(iter, bio->bi_iter.bi_size);
1257	}
1258
1259	bio->bi_private = bmd;
1260	if (map_data && map_data->null_mapped)
1261		bio_set_flag(bio, BIO_NULL_MAPPED);
1262	return bio;
1263cleanup:
1264	if (!map_data)
1265		bio_free_pages(bio);
1266	bio_put(bio);
1267out_bmd:
1268	kfree(bmd);
1269	return ERR_PTR(ret);
1270}
1271
1272/**
1273 *	bio_map_user_iov - map user iovec into bio
1274 *	@q:		the struct request_queue for the bio
1275 *	@iter:		iovec iterator
1276 *	@gfp_mask:	memory allocation flags
1277 *
1278 *	Map the user space address into a bio suitable for io to a block
1279 *	device. Returns an error pointer in case of error.
 
 
 
 
 
 
 
 
 
 
 
 
1280 */
1281struct bio *bio_map_user_iov(struct request_queue *q,
1282			     struct iov_iter *iter,
1283			     gfp_t gfp_mask)
1284{
1285	int j;
1286	struct bio *bio;
1287	int ret;
1288	struct bio_vec *bvec;
1289
1290	if (!iov_iter_count(iter))
1291		return ERR_PTR(-EINVAL);
1292
1293	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1294	if (!bio)
1295		return ERR_PTR(-ENOMEM);
1296
1297	while (iov_iter_count(iter)) {
1298		struct page **pages;
1299		ssize_t bytes;
1300		size_t offs, added = 0;
1301		int npages;
1302
1303		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1304		if (unlikely(bytes <= 0)) {
1305			ret = bytes ? bytes : -EFAULT;
1306			goto out_unmap;
1307		}
1308
1309		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1310
1311		if (unlikely(offs & queue_dma_alignment(q))) {
1312			ret = -EINVAL;
1313			j = 0;
1314		} else {
1315			for (j = 0; j < npages; j++) {
1316				struct page *page = pages[j];
1317				unsigned int n = PAGE_SIZE - offs;
1318				unsigned short prev_bi_vcnt = bio->bi_vcnt;
1319
1320				if (n > bytes)
1321					n = bytes;
1322
1323				if (!bio_add_pc_page(q, bio, page, n, offs))
1324					break;
1325
1326				/*
1327				 * check if vector was merged with previous
1328				 * drop page reference if needed
1329				 */
1330				if (bio->bi_vcnt == prev_bi_vcnt)
1331					put_page(page);
1332
1333				added += n;
1334				bytes -= n;
1335				offs = 0;
1336			}
1337			iov_iter_advance(iter, added);
1338		}
1339		/*
1340		 * release the pages we didn't map into the bio, if any
1341		 */
1342		while (j < npages)
1343			put_page(pages[j++]);
1344		kvfree(pages);
1345		/* couldn't stuff something into bio? */
1346		if (bytes)
1347			break;
1348	}
1349
1350	bio_set_flag(bio, BIO_USER_MAPPED);
1351
1352	/*
1353	 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
1354	 * it would normally disappear when its bi_end_io is run.
1355	 * however, we need it for the unmap, so grab an extra
1356	 * reference to it
1357	 */
1358	bio_get(bio);
1359	return bio;
1360
1361 out_unmap:
1362	bio_for_each_segment_all(bvec, bio, j) {
1363		put_page(bvec->bv_page);
1364	}
1365	bio_put(bio);
1366	return ERR_PTR(ret);
1367}
 
1368
1369static void __bio_unmap_user(struct bio *bio)
1370{
1371	struct bio_vec *bvec;
1372	int i;
1373
1374	/*
1375	 * make sure we dirty pages we wrote to
1376	 */
1377	bio_for_each_segment_all(bvec, bio, i) {
1378		if (bio_data_dir(bio) == READ)
1379			set_page_dirty_lock(bvec->bv_page);
1380
1381		put_page(bvec->bv_page);
1382	}
1383
1384	bio_put(bio);
1385}
1386
1387/**
1388 *	bio_unmap_user	-	unmap a bio
1389 *	@bio:		the bio being unmapped
1390 *
1391 *	Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
1392 *	process context.
1393 *
1394 *	bio_unmap_user() may sleep.
 
 
1395 */
1396void bio_unmap_user(struct bio *bio)
1397{
1398	__bio_unmap_user(bio);
1399	bio_put(bio);
1400}
1401
1402static void bio_map_kern_endio(struct bio *bio)
1403{
1404	bio_put(bio);
1405}
1406
1407/**
1408 *	bio_map_kern	-	map kernel address into bio
1409 *	@q: the struct request_queue for the bio
1410 *	@data: pointer to buffer to map
1411 *	@len: length in bytes
1412 *	@gfp_mask: allocation flags for bio allocation
1413 *
1414 *	Map the kernel address into a bio suitable for io to a block
1415 *	device. Returns an error pointer in case of error.
1416 */
1417struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1418			 gfp_t gfp_mask)
1419{
1420	unsigned long kaddr = (unsigned long)data;
1421	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1422	unsigned long start = kaddr >> PAGE_SHIFT;
1423	const int nr_pages = end - start;
1424	int offset, i;
1425	struct bio *bio;
1426
1427	bio = bio_kmalloc(gfp_mask, nr_pages);
1428	if (!bio)
1429		return ERR_PTR(-ENOMEM);
1430
1431	offset = offset_in_page(kaddr);
1432	for (i = 0; i < nr_pages; i++) {
1433		unsigned int bytes = PAGE_SIZE - offset;
 
1434
1435		if (len <= 0)
1436			break;
1437
1438		if (bytes > len)
1439			bytes = len;
1440
1441		if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1442				    offset) < bytes) {
1443			/* we don't support partial mappings */
1444			bio_put(bio);
1445			return ERR_PTR(-EINVAL);
1446		}
1447
1448		data += bytes;
1449		len -= bytes;
1450		offset = 0;
1451	}
1452
1453	bio->bi_end_io = bio_map_kern_endio;
1454	return bio;
1455}
1456EXPORT_SYMBOL(bio_map_kern);
1457
1458static void bio_copy_kern_endio(struct bio *bio)
 
1459{
1460	bio_free_pages(bio);
1461	bio_put(bio);
1462}
 
 
 
 
 
1463
1464static void bio_copy_kern_endio_read(struct bio *bio)
1465{
1466	char *p = bio->bi_private;
1467	struct bio_vec *bvec;
1468	int i;
1469
1470	bio_for_each_segment_all(bvec, bio, i) {
1471		memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1472		p += bvec->bv_len;
1473	}
1474
1475	bio_copy_kern_endio(bio);
1476}
 
1477
1478/**
1479 *	bio_copy_kern	-	copy kernel address into bio
1480 *	@q: the struct request_queue for the bio
1481 *	@data: pointer to buffer to copy
1482 *	@len: length in bytes
1483 *	@gfp_mask: allocation flags for bio and page allocation
1484 *	@reading: data direction is READ
1485 *
1486 *	copy the kernel address into a bio suitable for io to a block
1487 *	device. Returns an error pointer in case of error.
1488 */
1489struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1490			  gfp_t gfp_mask, int reading)
1491{
1492	unsigned long kaddr = (unsigned long)data;
1493	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1494	unsigned long start = kaddr >> PAGE_SHIFT;
1495	struct bio *bio;
1496	void *p = data;
1497	int nr_pages = 0;
1498
1499	/*
1500	 * Overflow, abort
1501	 */
1502	if (end < start)
1503		return ERR_PTR(-EINVAL);
1504
1505	nr_pages = end - start;
1506	bio = bio_kmalloc(gfp_mask, nr_pages);
1507	if (!bio)
1508		return ERR_PTR(-ENOMEM);
1509
1510	while (len) {
1511		struct page *page;
1512		unsigned int bytes = PAGE_SIZE;
1513
1514		if (bytes > len)
1515			bytes = len;
1516
1517		page = alloc_page(q->bounce_gfp | gfp_mask);
1518		if (!page)
1519			goto cleanup;
1520
1521		if (!reading)
1522			memcpy(page_address(page), p, bytes);
1523
1524		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1525			break;
1526
1527		len -= bytes;
1528		p += bytes;
1529	}
1530
1531	if (reading) {
1532		bio->bi_end_io = bio_copy_kern_endio_read;
1533		bio->bi_private = data;
1534	} else {
1535		bio->bi_end_io = bio_copy_kern_endio;
1536	}
1537
1538	return bio;
1539
1540cleanup:
1541	bio_free_pages(bio);
1542	bio_put(bio);
1543	return ERR_PTR(-ENOMEM);
1544}
 
1545
1546/*
1547 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1548 * for performing direct-IO in BIOs.
1549 *
1550 * The problem is that we cannot run set_page_dirty() from interrupt context
1551 * because the required locks are not interrupt-safe.  So what we can do is to
1552 * mark the pages dirty _before_ performing IO.  And in interrupt context,
1553 * check that the pages are still dirty.   If so, fine.  If not, redirty them
1554 * in process context.
1555 *
1556 * We special-case compound pages here: normally this means reads into hugetlb
1557 * pages.  The logic in here doesn't really work right for compound pages
1558 * because the VM does not uniformly chase down the head page in all cases.
1559 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1560 * handle them at all.  So we skip compound pages here at an early stage.
1561 *
1562 * Note that this code is very hard to test under normal circumstances because
1563 * direct-io pins the pages with get_user_pages().  This makes
1564 * is_page_cache_freeable return false, and the VM will not clean the pages.
1565 * But other code (eg, flusher threads) could clean the pages if they are mapped
1566 * pagecache.
1567 *
1568 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1569 * deferred bio dirtying paths.
1570 */
1571
1572/*
1573 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1574 */
1575void bio_set_pages_dirty(struct bio *bio)
1576{
1577	struct bio_vec *bvec;
1578	int i;
1579
1580	bio_for_each_segment_all(bvec, bio, i) {
1581		struct page *page = bvec->bv_page;
1582
1583		if (page && !PageCompound(page))
1584			set_page_dirty_lock(page);
1585	}
1586}
1587
1588static void bio_release_pages(struct bio *bio)
1589{
1590	struct bio_vec *bvec;
1591	int i;
1592
1593	bio_for_each_segment_all(bvec, bio, i) {
1594		struct page *page = bvec->bv_page;
1595
1596		if (page)
1597			put_page(page);
1598	}
1599}
1600
1601/*
1602 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1603 * If they are, then fine.  If, however, some pages are clean then they must
1604 * have been written out during the direct-IO read.  So we take another ref on
1605 * the BIO and the offending pages and re-dirty the pages in process context.
1606 *
1607 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1608 * here on.  It will run one put_page() against each page and will run one
1609 * bio_put() against the BIO.
1610 */
1611
1612static void bio_dirty_fn(struct work_struct *work);
1613
1614static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1615static DEFINE_SPINLOCK(bio_dirty_lock);
1616static struct bio *bio_dirty_list;
1617
1618/*
1619 * This runs in process context
1620 */
1621static void bio_dirty_fn(struct work_struct *work)
1622{
1623	unsigned long flags;
1624	struct bio *bio;
1625
1626	spin_lock_irqsave(&bio_dirty_lock, flags);
1627	bio = bio_dirty_list;
1628	bio_dirty_list = NULL;
1629	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1630
1631	while (bio) {
1632		struct bio *next = bio->bi_private;
1633
1634		bio_set_pages_dirty(bio);
1635		bio_release_pages(bio);
1636		bio_put(bio);
1637		bio = next;
1638	}
1639}
1640
1641void bio_check_pages_dirty(struct bio *bio)
1642{
1643	struct bio_vec *bvec;
1644	int nr_clean_pages = 0;
1645	int i;
1646
1647	bio_for_each_segment_all(bvec, bio, i) {
1648		struct page *page = bvec->bv_page;
1649
1650		if (PageDirty(page) || PageCompound(page)) {
1651			put_page(page);
1652			bvec->bv_page = NULL;
1653		} else {
1654			nr_clean_pages++;
1655		}
1656	}
1657
1658	if (nr_clean_pages) {
1659		unsigned long flags;
1660
1661		spin_lock_irqsave(&bio_dirty_lock, flags);
1662		bio->bi_private = bio_dirty_list;
1663		bio_dirty_list = bio;
1664		spin_unlock_irqrestore(&bio_dirty_lock, flags);
1665		schedule_work(&bio_dirty_work);
1666	} else {
1667		bio_put(bio);
1668	}
1669}
1670
1671void generic_start_io_acct(struct request_queue *q, int rw,
1672			   unsigned long sectors, struct hd_struct *part)
1673{
1674	int cpu = part_stat_lock();
1675
1676	part_round_stats(q, cpu, part);
1677	part_stat_inc(cpu, part, ios[rw]);
1678	part_stat_add(cpu, part, sectors[rw], sectors);
1679	part_inc_in_flight(q, part, rw);
1680
1681	part_stat_unlock();
1682}
1683EXPORT_SYMBOL(generic_start_io_acct);
1684
1685void generic_end_io_acct(struct request_queue *q, int rw,
1686			 struct hd_struct *part, unsigned long start_time)
1687{
1688	unsigned long duration = jiffies - start_time;
1689	int cpu = part_stat_lock();
1690
1691	part_stat_add(cpu, part, ticks[rw], duration);
1692	part_round_stats(q, cpu, part);
1693	part_dec_in_flight(q, part, rw);
1694
1695	part_stat_unlock();
1696}
1697EXPORT_SYMBOL(generic_end_io_acct);
1698
1699#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1700void bio_flush_dcache_pages(struct bio *bi)
1701{
1702	struct bio_vec bvec;
1703	struct bvec_iter iter;
1704
1705	bio_for_each_segment(bvec, bi, iter)
1706		flush_dcache_page(bvec.bv_page);
1707}
1708EXPORT_SYMBOL(bio_flush_dcache_pages);
1709#endif
1710
1711static inline bool bio_remaining_done(struct bio *bio)
1712{
1713	/*
1714	 * If we're not chaining, then ->__bi_remaining is always 1 and
1715	 * we always end io on the first invocation.
1716	 */
1717	if (!bio_flagged(bio, BIO_CHAIN))
1718		return true;
1719
1720	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1721
1722	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1723		bio_clear_flag(bio, BIO_CHAIN);
1724		return true;
1725	}
1726
1727	return false;
1728}
1729
1730/**
1731 * bio_endio - end I/O on a bio
1732 * @bio:	bio
1733 *
1734 * Description:
1735 *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1736 *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1737 *   bio unless they own it and thus know that it has an end_io function.
1738 *
1739 *   bio_endio() can be called several times on a bio that has been chained
1740 *   using bio_chain().  The ->bi_end_io() function will only be called the
1741 *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1742 *   generated if BIO_TRACE_COMPLETION is set.
1743 **/
1744void bio_endio(struct bio *bio)
1745{
1746again:
1747	if (!bio_remaining_done(bio))
1748		return;
1749	if (!bio_integrity_endio(bio))
1750		return;
1751
 
 
 
 
 
 
 
1752	/*
1753	 * Need to have a real endio function for chained bios, otherwise
1754	 * various corner cases will break (like stacking block devices that
1755	 * save/restore bi_end_io) - however, we want to avoid unbounded
1756	 * recursion and blowing the stack. Tail call optimization would
1757	 * handle this, but compiling with frame pointers also disables
1758	 * gcc's sibling call optimization.
1759	 */
1760	if (bio->bi_end_io == bio_chain_endio) {
1761		bio = __bio_chain_endio(bio);
1762		goto again;
1763	}
1764
1765	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1766		trace_block_bio_complete(bio->bi_disk->queue, bio,
1767					 blk_status_to_errno(bio->bi_status));
1768		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1769	}
1770
1771	blk_throtl_bio_endio(bio);
1772	/* release cgroup info */
1773	bio_uninit(bio);
1774	if (bio->bi_end_io)
1775		bio->bi_end_io(bio);
1776}
1777EXPORT_SYMBOL(bio_endio);
1778
1779/**
1780 * bio_split - split a bio
1781 * @bio:	bio to split
1782 * @sectors:	number of sectors to split from the front of @bio
1783 * @gfp:	gfp mask
1784 * @bs:		bio set to allocate from
1785 *
1786 * Allocates and returns a new bio which represents @sectors from the start of
1787 * @bio, and updates @bio to represent the remaining sectors.
1788 *
1789 * Unless this is a discard request the newly allocated bio will point
1790 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1791 * @bio is not freed before the split.
1792 */
1793struct bio *bio_split(struct bio *bio, int sectors,
1794		      gfp_t gfp, struct bio_set *bs)
1795{
1796	struct bio *split;
1797
1798	BUG_ON(sectors <= 0);
1799	BUG_ON(sectors >= bio_sectors(bio));
1800
1801	split = bio_clone_fast(bio, gfp, bs);
 
 
 
 
1802	if (!split)
1803		return NULL;
1804
1805	split->bi_iter.bi_size = sectors << 9;
1806
1807	if (bio_integrity(split))
1808		bio_integrity_trim(split);
1809
1810	bio_advance(bio, split->bi_iter.bi_size);
1811
1812	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1813		bio_set_flag(split, BIO_TRACE_COMPLETION);
1814
1815	return split;
1816}
1817EXPORT_SYMBOL(bio_split);
1818
1819/**
1820 * bio_trim - trim a bio
1821 * @bio:	bio to trim
1822 * @offset:	number of sectors to trim from the front of @bio
1823 * @size:	size we want to trim @bio to, in sectors
 
 
 
1824 */
1825void bio_trim(struct bio *bio, int offset, int size)
1826{
1827	/* 'bio' is a cloned bio which we need to trim to match
1828	 * the given offset and size.
1829	 */
1830
1831	size <<= 9;
1832	if (offset == 0 && size == bio->bi_iter.bi_size)
1833		return;
1834
1835	bio_clear_flag(bio, BIO_SEG_VALID);
1836
1837	bio_advance(bio, offset << 9);
1838
1839	bio->bi_iter.bi_size = size;
1840
1841	if (bio_integrity(bio))
1842		bio_integrity_trim(bio);
1843
1844}
1845EXPORT_SYMBOL_GPL(bio_trim);
1846
1847/*
1848 * create memory pools for biovec's in a bio_set.
1849 * use the global biovec slabs created for general use.
1850 */
1851mempool_t *biovec_create_pool(int pool_entries)
1852{
1853	struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1854
1855	return mempool_create_slab_pool(pool_entries, bp->slab);
1856}
1857
1858void bioset_free(struct bio_set *bs)
 
 
 
 
 
 
1859{
 
1860	if (bs->rescue_workqueue)
1861		destroy_workqueue(bs->rescue_workqueue);
 
1862
1863	mempool_destroy(bs->bio_pool);
1864	mempool_destroy(bs->bvec_pool);
1865
1866	bioset_integrity_free(bs);
1867	bio_put_slab(bs);
1868
1869	kfree(bs);
1870}
1871EXPORT_SYMBOL(bioset_free);
1872
1873/**
1874 * bioset_create  - Create a bio_set
 
1875 * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1876 * @front_pad:	Number of bytes to allocate in front of the returned bio
1877 * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1878 *              and %BIOSET_NEED_RESCUER
1879 *
1880 * Description:
1881 *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1882 *    to ask for a number of bytes to be allocated in front of the bio.
1883 *    Front pad allocation is useful for embedding the bio inside
1884 *    another structure, to avoid allocating extra data to go with the bio.
1885 *    Note that the bio must be embedded at the END of that structure always,
1886 *    or things will break badly.
1887 *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1888 *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1889 *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1890 *    dispatch queued requests when the mempool runs out of space.
1891 *
1892 */
1893struct bio_set *bioset_create(unsigned int pool_size,
1894			      unsigned int front_pad,
1895			      int flags)
 
1896{
1897	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1898	struct bio_set *bs;
1899
1900	bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1901	if (!bs)
1902		return NULL;
1903
1904	bs->front_pad = front_pad;
 
 
 
 
1905
1906	spin_lock_init(&bs->rescue_lock);
1907	bio_list_init(&bs->rescue_list);
1908	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1909
1910	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1911	if (!bs->bio_slab) {
1912		kfree(bs);
1913		return NULL;
1914	}
1915
1916	bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1917	if (!bs->bio_pool)
1918		goto bad;
1919
1920	if (flags & BIOSET_NEED_BVECS) {
1921		bs->bvec_pool = biovec_create_pool(pool_size);
1922		if (!bs->bvec_pool)
1923			goto bad;
1924	}
1925
1926	if (!(flags & BIOSET_NEED_RESCUER))
1927		return bs;
1928
1929	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1930	if (!bs->rescue_workqueue)
1931		goto bad;
1932
1933	return bs;
1934bad:
1935	bioset_free(bs);
1936	return NULL;
1937}
1938EXPORT_SYMBOL(bioset_create);
1939
1940#ifdef CONFIG_BLK_CGROUP
1941
1942/**
1943 * bio_associate_blkcg - associate a bio with the specified blkcg
1944 * @bio: target bio
1945 * @blkcg_css: css of the blkcg to associate
1946 *
1947 * Associate @bio with the blkcg specified by @blkcg_css.  Block layer will
1948 * treat @bio as if it were issued by a task which belongs to the blkcg.
1949 *
1950 * This function takes an extra reference of @blkcg_css which will be put
1951 * when @bio is released.  The caller must own @bio and is responsible for
1952 * synchronizing calls to this function.
1953 */
1954int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
1955{
1956	if (unlikely(bio->bi_css))
1957		return -EBUSY;
1958	css_get(blkcg_css);
1959	bio->bi_css = blkcg_css;
1960	return 0;
1961}
1962EXPORT_SYMBOL_GPL(bio_associate_blkcg);
1963
1964/**
1965 * bio_disassociate_task - undo bio_associate_current()
1966 * @bio: target bio
1967 */
1968void bio_disassociate_task(struct bio *bio)
1969{
1970	if (bio->bi_ioc) {
1971		put_io_context(bio->bi_ioc);
1972		bio->bi_ioc = NULL;
1973	}
1974	if (bio->bi_css) {
1975		css_put(bio->bi_css);
1976		bio->bi_css = NULL;
 
 
1977	}
1978}
1979
1980/**
1981 * bio_clone_blkcg_association - clone blkcg association from src to dst bio
1982 * @dst: destination bio
1983 * @src: source bio
1984 */
1985void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
1986{
1987	if (src->bi_css)
1988		WARN_ON(bio_associate_blkcg(dst, src->bi_css));
1989}
1990EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
1991#endif /* CONFIG_BLK_CGROUP */
1992
1993static void __init biovec_init_slabs(void)
1994{
1995	int i;
1996
1997	for (i = 0; i < BVEC_POOL_NR; i++) {
1998		int size;
 
1999		struct biovec_slab *bvs = bvec_slabs + i;
2000
2001		if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2002			bvs->slab = NULL;
2003			continue;
2004		}
2005
2006		size = bvs->nr_vecs * sizeof(struct bio_vec);
2007		bvs->slab = kmem_cache_create(bvs->name, size, 0,
2008                                SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2009	}
2010}
2011
2012static int __init init_bio(void)
2013{
2014	bio_slab_max = 2;
2015	bio_slab_nr = 0;
2016	bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2017	if (!bio_slabs)
2018		panic("bio: can't allocate bios\n");
2019
2020	bio_integrity_init();
2021	biovec_init_slabs();
2022
2023	fs_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
2024	if (!fs_bio_set)
2025		panic("bio: can't allocate bios\n");
2026
2027	if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2028		panic("bio: can't create integrity pool\n");
2029
2030	return 0;
2031}
2032subsys_initcall(init_bio);