Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
   4 */
   5#include <linux/mm.h>
   6#include <linux/swap.h>
   7#include <linux/bio-integrity.h>
   8#include <linux/blkdev.h>
   9#include <linux/uio.h>
  10#include <linux/iocontext.h>
  11#include <linux/slab.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/export.h>
  15#include <linux/mempool.h>
  16#include <linux/workqueue.h>
  17#include <linux/cgroup.h>
  18#include <linux/highmem.h>
  19#include <linux/blk-crypto.h>
  20#include <linux/xarray.h>
  21
  22#include <trace/events/block.h>
  23#include "blk.h"
  24#include "blk-rq-qos.h"
  25#include "blk-cgroup.h"
  26
  27#define ALLOC_CACHE_THRESHOLD	16
  28#define ALLOC_CACHE_MAX		256
  29
  30struct bio_alloc_cache {
  31	struct bio		*free_list;
  32	struct bio		*free_list_irq;
  33	unsigned int		nr;
  34	unsigned int		nr_irq;
  35};
  36
  37static struct biovec_slab {
  38	int nr_vecs;
  39	char *name;
  40	struct kmem_cache *slab;
  41} bvec_slabs[] __read_mostly = {
  42	{ .nr_vecs = 16, .name = "biovec-16" },
  43	{ .nr_vecs = 64, .name = "biovec-64" },
  44	{ .nr_vecs = 128, .name = "biovec-128" },
  45	{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
  46};
  47
  48static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
  49{
  50	switch (nr_vecs) {
  51	/* smaller bios use inline vecs */
  52	case 5 ... 16:
  53		return &bvec_slabs[0];
  54	case 17 ... 64:
  55		return &bvec_slabs[1];
  56	case 65 ... 128:
  57		return &bvec_slabs[2];
  58	case 129 ... BIO_MAX_VECS:
  59		return &bvec_slabs[3];
  60	default:
  61		BUG();
  62		return NULL;
  63	}
  64}
  65
  66/*
  67 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
  68 * IO code that does not need private memory pools.
  69 */
  70struct bio_set fs_bio_set;
  71EXPORT_SYMBOL(fs_bio_set);
  72
  73/*
  74 * Our slab pool management
  75 */
  76struct bio_slab {
  77	struct kmem_cache *slab;
  78	unsigned int slab_ref;
  79	unsigned int slab_size;
  80	char name[8];
  81};
  82static DEFINE_MUTEX(bio_slab_lock);
  83static DEFINE_XARRAY(bio_slabs);
  84
  85static struct bio_slab *create_bio_slab(unsigned int size)
  86{
  87	struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
  88
  89	if (!bslab)
  90		return NULL;
  91
  92	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
  93	bslab->slab = kmem_cache_create(bslab->name, size,
  94			ARCH_KMALLOC_MINALIGN,
  95			SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
  96	if (!bslab->slab)
  97		goto fail_alloc_slab;
  98
  99	bslab->slab_ref = 1;
 100	bslab->slab_size = size;
 101
 102	if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
 103		return bslab;
 104
 105	kmem_cache_destroy(bslab->slab);
 106
 107fail_alloc_slab:
 108	kfree(bslab);
 109	return NULL;
 110}
 111
 112static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
 113{
 114	return bs->front_pad + sizeof(struct bio) + bs->back_pad;
 115}
 116
 117static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
 118{
 119	unsigned int size = bs_bio_slab_size(bs);
 120	struct bio_slab *bslab;
 121
 122	mutex_lock(&bio_slab_lock);
 123	bslab = xa_load(&bio_slabs, size);
 124	if (bslab)
 125		bslab->slab_ref++;
 126	else
 127		bslab = create_bio_slab(size);
 128	mutex_unlock(&bio_slab_lock);
 129
 130	if (bslab)
 131		return bslab->slab;
 132	return NULL;
 133}
 134
 135static void bio_put_slab(struct bio_set *bs)
 136{
 137	struct bio_slab *bslab = NULL;
 138	unsigned int slab_size = bs_bio_slab_size(bs);
 139
 140	mutex_lock(&bio_slab_lock);
 141
 142	bslab = xa_load(&bio_slabs, slab_size);
 143	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
 144		goto out;
 145
 146	WARN_ON_ONCE(bslab->slab != bs->bio_slab);
 147
 148	WARN_ON(!bslab->slab_ref);
 149
 150	if (--bslab->slab_ref)
 151		goto out;
 152
 153	xa_erase(&bio_slabs, slab_size);
 154
 155	kmem_cache_destroy(bslab->slab);
 156	kfree(bslab);
 157
 158out:
 159	mutex_unlock(&bio_slab_lock);
 160}
 161
 162void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
 163{
 164	BUG_ON(nr_vecs > BIO_MAX_VECS);
 165
 166	if (nr_vecs == BIO_MAX_VECS)
 167		mempool_free(bv, pool);
 168	else if (nr_vecs > BIO_INLINE_VECS)
 169		kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
 170}
 171
 172/*
 173 * Make the first allocation restricted and don't dump info on allocation
 174 * failures, since we'll fall back to the mempool in case of failure.
 175 */
 176static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
 177{
 178	return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
 179		__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 180}
 181
 182struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
 183		gfp_t gfp_mask)
 184{
 185	struct biovec_slab *bvs = biovec_slab(*nr_vecs);
 186
 187	if (WARN_ON_ONCE(!bvs))
 188		return NULL;
 189
 190	/*
 191	 * Upgrade the nr_vecs request to take full advantage of the allocation.
 192	 * We also rely on this in the bvec_free path.
 193	 */
 194	*nr_vecs = bvs->nr_vecs;
 195
 196	/*
 197	 * Try a slab allocation first for all smaller allocations.  If that
 198	 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
 199	 * The mempool is sized to handle up to BIO_MAX_VECS entries.
 200	 */
 201	if (*nr_vecs < BIO_MAX_VECS) {
 202		struct bio_vec *bvl;
 203
 204		bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
 205		if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
 206			return bvl;
 207		*nr_vecs = BIO_MAX_VECS;
 208	}
 209
 210	return mempool_alloc(pool, gfp_mask);
 211}
 212
 213void bio_uninit(struct bio *bio)
 214{
 215#ifdef CONFIG_BLK_CGROUP
 216	if (bio->bi_blkg) {
 217		blkg_put(bio->bi_blkg);
 218		bio->bi_blkg = NULL;
 219	}
 220#endif
 221	if (bio_integrity(bio))
 222		bio_integrity_free(bio);
 223
 224	bio_crypt_free_ctx(bio);
 225}
 226EXPORT_SYMBOL(bio_uninit);
 227
 228static void bio_free(struct bio *bio)
 229{
 230	struct bio_set *bs = bio->bi_pool;
 231	void *p = bio;
 232
 233	WARN_ON_ONCE(!bs);
 234
 235	bio_uninit(bio);
 236	bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
 237	mempool_free(p - bs->front_pad, &bs->bio_pool);
 238}
 239
 240/*
 241 * Users of this function have their own bio allocation. Subsequently,
 242 * they must remember to pair any call to bio_init() with bio_uninit()
 243 * when IO has completed, or when the bio is released.
 244 */
 245void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
 246	      unsigned short max_vecs, blk_opf_t opf)
 247{
 248	bio->bi_next = NULL;
 249	bio->bi_bdev = bdev;
 250	bio->bi_opf = opf;
 251	bio->bi_flags = 0;
 252	bio->bi_ioprio = 0;
 253	bio->bi_write_hint = 0;
 254	bio->bi_status = 0;
 255	bio->bi_iter.bi_sector = 0;
 256	bio->bi_iter.bi_size = 0;
 257	bio->bi_iter.bi_idx = 0;
 258	bio->bi_iter.bi_bvec_done = 0;
 259	bio->bi_end_io = NULL;
 260	bio->bi_private = NULL;
 261#ifdef CONFIG_BLK_CGROUP
 262	bio->bi_blkg = NULL;
 263	bio->bi_issue.value = 0;
 264	if (bdev)
 265		bio_associate_blkg(bio);
 266#ifdef CONFIG_BLK_CGROUP_IOCOST
 267	bio->bi_iocost_cost = 0;
 268#endif
 269#endif
 270#ifdef CONFIG_BLK_INLINE_ENCRYPTION
 271	bio->bi_crypt_context = NULL;
 272#endif
 273#ifdef CONFIG_BLK_DEV_INTEGRITY
 274	bio->bi_integrity = NULL;
 275#endif
 276	bio->bi_vcnt = 0;
 277
 278	atomic_set(&bio->__bi_remaining, 1);
 279	atomic_set(&bio->__bi_cnt, 1);
 280	bio->bi_cookie = BLK_QC_T_NONE;
 281
 282	bio->bi_max_vecs = max_vecs;
 283	bio->bi_io_vec = table;
 284	bio->bi_pool = NULL;
 285}
 286EXPORT_SYMBOL(bio_init);
 287
 288/**
 289 * bio_reset - reinitialize a bio
 290 * @bio:	bio to reset
 291 * @bdev:	block device to use the bio for
 292 * @opf:	operation and flags for bio
 293 *
 294 * Description:
 295 *   After calling bio_reset(), @bio will be in the same state as a freshly
 296 *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
 297 *   preserved are the ones that are initialized by bio_alloc_bioset(). See
 298 *   comment in struct bio.
 299 */
 300void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
 301{
 302	bio_uninit(bio);
 303	memset(bio, 0, BIO_RESET_BYTES);
 304	atomic_set(&bio->__bi_remaining, 1);
 305	bio->bi_bdev = bdev;
 306	if (bio->bi_bdev)
 307		bio_associate_blkg(bio);
 308	bio->bi_opf = opf;
 309}
 310EXPORT_SYMBOL(bio_reset);
 311
 312static struct bio *__bio_chain_endio(struct bio *bio)
 313{
 314	struct bio *parent = bio->bi_private;
 315
 316	if (bio->bi_status && !parent->bi_status)
 317		parent->bi_status = bio->bi_status;
 318	bio_put(bio);
 319	return parent;
 320}
 321
 322static void bio_chain_endio(struct bio *bio)
 323{
 324	bio_endio(__bio_chain_endio(bio));
 325}
 326
 327/**
 328 * bio_chain - chain bio completions
 329 * @bio: the target bio
 330 * @parent: the parent bio of @bio
 331 *
 332 * The caller won't have a bi_end_io called when @bio completes - instead,
 333 * @parent's bi_end_io won't be called until both @parent and @bio have
 334 * completed; the chained bio will also be freed when it completes.
 335 *
 336 * The caller must not set bi_private or bi_end_io in @bio.
 337 */
 338void bio_chain(struct bio *bio, struct bio *parent)
 339{
 340	BUG_ON(bio->bi_private || bio->bi_end_io);
 341
 342	bio->bi_private = parent;
 343	bio->bi_end_io	= bio_chain_endio;
 344	bio_inc_remaining(parent);
 345}
 346EXPORT_SYMBOL(bio_chain);
 347
 348/**
 349 * bio_chain_and_submit - submit a bio after chaining it to another one
 350 * @prev: bio to chain and submit
 351 * @new: bio to chain to
 352 *
 353 * If @prev is non-NULL, chain it to @new and submit it.
 354 *
 355 * Return: @new.
 356 */
 357struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new)
 358{
 359	if (prev) {
 360		bio_chain(prev, new);
 361		submit_bio(prev);
 362	}
 363	return new;
 364}
 365
 366struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
 367		unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
 368{
 369	return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp));
 370}
 371EXPORT_SYMBOL_GPL(blk_next_bio);
 372
 373static void bio_alloc_rescue(struct work_struct *work)
 374{
 375	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
 376	struct bio *bio;
 377
 378	while (1) {
 379		spin_lock(&bs->rescue_lock);
 380		bio = bio_list_pop(&bs->rescue_list);
 381		spin_unlock(&bs->rescue_lock);
 382
 383		if (!bio)
 384			break;
 385
 386		submit_bio_noacct(bio);
 387	}
 388}
 389
 390static void punt_bios_to_rescuer(struct bio_set *bs)
 391{
 392	struct bio_list punt, nopunt;
 393	struct bio *bio;
 394
 395	if (WARN_ON_ONCE(!bs->rescue_workqueue))
 396		return;
 397	/*
 398	 * In order to guarantee forward progress we must punt only bios that
 399	 * were allocated from this bio_set; otherwise, if there was a bio on
 400	 * there for a stacking driver higher up in the stack, processing it
 401	 * could require allocating bios from this bio_set, and doing that from
 402	 * our own rescuer would be bad.
 403	 *
 404	 * Since bio lists are singly linked, pop them all instead of trying to
 405	 * remove from the middle of the list:
 406	 */
 407
 408	bio_list_init(&punt);
 409	bio_list_init(&nopunt);
 410
 411	while ((bio = bio_list_pop(&current->bio_list[0])))
 412		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 413	current->bio_list[0] = nopunt;
 414
 415	bio_list_init(&nopunt);
 416	while ((bio = bio_list_pop(&current->bio_list[1])))
 417		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 418	current->bio_list[1] = nopunt;
 419
 420	spin_lock(&bs->rescue_lock);
 421	bio_list_merge(&bs->rescue_list, &punt);
 422	spin_unlock(&bs->rescue_lock);
 423
 424	queue_work(bs->rescue_workqueue, &bs->rescue_work);
 425}
 426
 427static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
 428{
 429	unsigned long flags;
 430
 431	/* cache->free_list must be empty */
 432	if (WARN_ON_ONCE(cache->free_list))
 433		return;
 434
 435	local_irq_save(flags);
 436	cache->free_list = cache->free_list_irq;
 437	cache->free_list_irq = NULL;
 438	cache->nr += cache->nr_irq;
 439	cache->nr_irq = 0;
 440	local_irq_restore(flags);
 441}
 442
 443static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
 444		unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
 445		struct bio_set *bs)
 446{
 447	struct bio_alloc_cache *cache;
 448	struct bio *bio;
 449
 450	cache = per_cpu_ptr(bs->cache, get_cpu());
 451	if (!cache->free_list) {
 452		if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD)
 453			bio_alloc_irq_cache_splice(cache);
 454		if (!cache->free_list) {
 455			put_cpu();
 456			return NULL;
 457		}
 458	}
 459	bio = cache->free_list;
 460	cache->free_list = bio->bi_next;
 461	cache->nr--;
 462	put_cpu();
 463
 464	bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
 465	bio->bi_pool = bs;
 466	return bio;
 467}
 468
 469/**
 470 * bio_alloc_bioset - allocate a bio for I/O
 471 * @bdev:	block device to allocate the bio for (can be %NULL)
 472 * @nr_vecs:	number of bvecs to pre-allocate
 473 * @opf:	operation and flags for bio
 474 * @gfp_mask:   the GFP_* mask given to the slab allocator
 475 * @bs:		the bio_set to allocate from.
 476 *
 477 * Allocate a bio from the mempools in @bs.
 478 *
 479 * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
 480 * allocate a bio.  This is due to the mempool guarantees.  To make this work,
 481 * callers must never allocate more than 1 bio at a time from the general pool.
 482 * Callers that need to allocate more than 1 bio must always submit the
 483 * previously allocated bio for IO before attempting to allocate a new one.
 484 * Failure to do so can cause deadlocks under memory pressure.
 485 *
 486 * Note that when running under submit_bio_noacct() (i.e. any block driver),
 487 * bios are not submitted until after you return - see the code in
 488 * submit_bio_noacct() that converts recursion into iteration, to prevent
 489 * stack overflows.
 490 *
 491 * This would normally mean allocating multiple bios under submit_bio_noacct()
 492 * would be susceptible to deadlocks, but we have
 493 * deadlock avoidance code that resubmits any blocked bios from a rescuer
 494 * thread.
 495 *
 496 * However, we do not guarantee forward progress for allocations from other
 497 * mempools. Doing multiple allocations from the same mempool under
 498 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
 499 * for per bio allocations.
 500 *
 501 * Returns: Pointer to new bio on success, NULL on failure.
 502 */
 503struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
 504			     blk_opf_t opf, gfp_t gfp_mask,
 505			     struct bio_set *bs)
 506{
 507	gfp_t saved_gfp = gfp_mask;
 508	struct bio *bio;
 509	void *p;
 510
 511	/* should not use nobvec bioset for nr_vecs > 0 */
 512	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
 513		return NULL;
 514
 515	if (opf & REQ_ALLOC_CACHE) {
 516		if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
 517			bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
 518						     gfp_mask, bs);
 519			if (bio)
 520				return bio;
 521			/*
 522			 * No cached bio available, bio returned below marked with
 523			 * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
 524			 */
 525		} else {
 526			opf &= ~REQ_ALLOC_CACHE;
 527		}
 528	}
 529
 530	/*
 531	 * submit_bio_noacct() converts recursion to iteration; this means if
 532	 * we're running beneath it, any bios we allocate and submit will not be
 533	 * submitted (and thus freed) until after we return.
 534	 *
 535	 * This exposes us to a potential deadlock if we allocate multiple bios
 536	 * from the same bio_set() while running underneath submit_bio_noacct().
 537	 * If we were to allocate multiple bios (say a stacking block driver
 538	 * that was splitting bios), we would deadlock if we exhausted the
 539	 * mempool's reserve.
 540	 *
 541	 * We solve this, and guarantee forward progress, with a rescuer
 542	 * workqueue per bio_set. If we go to allocate and there are bios on
 543	 * current->bio_list, we first try the allocation without
 544	 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
 545	 * blocking to the rescuer workqueue before we retry with the original
 546	 * gfp_flags.
 547	 */
 548	if (current->bio_list &&
 549	    (!bio_list_empty(&current->bio_list[0]) ||
 550	     !bio_list_empty(&current->bio_list[1])) &&
 551	    bs->rescue_workqueue)
 552		gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 553
 554	p = mempool_alloc(&bs->bio_pool, gfp_mask);
 555	if (!p && gfp_mask != saved_gfp) {
 556		punt_bios_to_rescuer(bs);
 557		gfp_mask = saved_gfp;
 558		p = mempool_alloc(&bs->bio_pool, gfp_mask);
 559	}
 560	if (unlikely(!p))
 561		return NULL;
 562	if (!mempool_is_saturated(&bs->bio_pool))
 563		opf &= ~REQ_ALLOC_CACHE;
 564
 565	bio = p + bs->front_pad;
 566	if (nr_vecs > BIO_INLINE_VECS) {
 567		struct bio_vec *bvl = NULL;
 568
 569		bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
 570		if (!bvl && gfp_mask != saved_gfp) {
 571			punt_bios_to_rescuer(bs);
 572			gfp_mask = saved_gfp;
 573			bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
 574		}
 575		if (unlikely(!bvl))
 576			goto err_free;
 577
 578		bio_init(bio, bdev, bvl, nr_vecs, opf);
 579	} else if (nr_vecs) {
 580		bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
 581	} else {
 582		bio_init(bio, bdev, NULL, 0, opf);
 583	}
 584
 585	bio->bi_pool = bs;
 586	return bio;
 587
 588err_free:
 589	mempool_free(p, &bs->bio_pool);
 590	return NULL;
 591}
 592EXPORT_SYMBOL(bio_alloc_bioset);
 593
 594/**
 595 * bio_kmalloc - kmalloc a bio
 596 * @nr_vecs:	number of bio_vecs to allocate
 597 * @gfp_mask:   the GFP_* mask given to the slab allocator
 598 *
 599 * Use kmalloc to allocate a bio (including bvecs).  The bio must be initialized
 600 * using bio_init() before use.  To free a bio returned from this function use
 601 * kfree() after calling bio_uninit().  A bio returned from this function can
 602 * be reused by calling bio_uninit() before calling bio_init() again.
 603 *
 604 * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
 605 * function are not backed by a mempool can fail.  Do not use this function
 606 * for allocations in the file system I/O path.
 607 *
 608 * Returns: Pointer to new bio on success, NULL on failure.
 609 */
 610struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
 611{
 612	struct bio *bio;
 613
 614	if (nr_vecs > UIO_MAXIOV)
 615		return NULL;
 616	return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
 617}
 618EXPORT_SYMBOL(bio_kmalloc);
 619
 620void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
 621{
 622	struct bio_vec bv;
 623	struct bvec_iter iter;
 624
 625	__bio_for_each_segment(bv, bio, iter, start)
 626		memzero_bvec(&bv);
 627}
 628EXPORT_SYMBOL(zero_fill_bio_iter);
 629
 630/**
 631 * bio_truncate - truncate the bio to small size of @new_size
 632 * @bio:	the bio to be truncated
 633 * @new_size:	new size for truncating the bio
 634 *
 635 * Description:
 636 *   Truncate the bio to new size of @new_size. If bio_op(bio) is
 637 *   REQ_OP_READ, zero the truncated part. This function should only
 638 *   be used for handling corner cases, such as bio eod.
 639 */
 640static void bio_truncate(struct bio *bio, unsigned new_size)
 641{
 642	struct bio_vec bv;
 643	struct bvec_iter iter;
 644	unsigned int done = 0;
 645	bool truncated = false;
 646
 647	if (new_size >= bio->bi_iter.bi_size)
 648		return;
 649
 650	if (bio_op(bio) != REQ_OP_READ)
 651		goto exit;
 652
 653	bio_for_each_segment(bv, bio, iter) {
 654		if (done + bv.bv_len > new_size) {
 655			unsigned offset;
 656
 657			if (!truncated)
 658				offset = new_size - done;
 659			else
 660				offset = 0;
 661			zero_user(bv.bv_page, bv.bv_offset + offset,
 662				  bv.bv_len - offset);
 663			truncated = true;
 664		}
 665		done += bv.bv_len;
 666	}
 667
 668 exit:
 669	/*
 670	 * Don't touch bvec table here and make it really immutable, since
 671	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
 672	 * in its .end_bio() callback.
 673	 *
 674	 * It is enough to truncate bio by updating .bi_size since we can make
 675	 * correct bvec with the updated .bi_size for drivers.
 676	 */
 677	bio->bi_iter.bi_size = new_size;
 678}
 679
 680/**
 681 * guard_bio_eod - truncate a BIO to fit the block device
 682 * @bio:	bio to truncate
 683 *
 684 * This allows us to do IO even on the odd last sectors of a device, even if the
 685 * block size is some multiple of the physical sector size.
 686 *
 687 * We'll just truncate the bio to the size of the device, and clear the end of
 688 * the buffer head manually.  Truly out-of-range accesses will turn into actual
 689 * I/O errors, this only handles the "we need to be able to do I/O at the final
 690 * sector" case.
 691 */
 692void guard_bio_eod(struct bio *bio)
 693{
 694	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
 695
 696	if (!maxsector)
 697		return;
 698
 699	/*
 700	 * If the *whole* IO is past the end of the device,
 701	 * let it through, and the IO layer will turn it into
 702	 * an EIO.
 703	 */
 704	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
 705		return;
 706
 707	maxsector -= bio->bi_iter.bi_sector;
 708	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
 709		return;
 710
 711	bio_truncate(bio, maxsector << 9);
 712}
 713
 714static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
 715				   unsigned int nr)
 716{
 717	unsigned int i = 0;
 718	struct bio *bio;
 719
 720	while ((bio = cache->free_list) != NULL) {
 721		cache->free_list = bio->bi_next;
 722		cache->nr--;
 723		bio_free(bio);
 724		if (++i == nr)
 725			break;
 726	}
 727	return i;
 728}
 729
 730static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
 731				  unsigned int nr)
 732{
 733	nr -= __bio_alloc_cache_prune(cache, nr);
 734	if (!READ_ONCE(cache->free_list)) {
 735		bio_alloc_irq_cache_splice(cache);
 736		__bio_alloc_cache_prune(cache, nr);
 737	}
 738}
 739
 740static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
 741{
 742	struct bio_set *bs;
 743
 744	bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
 745	if (bs->cache) {
 746		struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
 747
 748		bio_alloc_cache_prune(cache, -1U);
 749	}
 750	return 0;
 751}
 752
 753static void bio_alloc_cache_destroy(struct bio_set *bs)
 754{
 755	int cpu;
 756
 757	if (!bs->cache)
 758		return;
 759
 760	cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
 761	for_each_possible_cpu(cpu) {
 762		struct bio_alloc_cache *cache;
 763
 764		cache = per_cpu_ptr(bs->cache, cpu);
 765		bio_alloc_cache_prune(cache, -1U);
 766	}
 767	free_percpu(bs->cache);
 768	bs->cache = NULL;
 769}
 770
 771static inline void bio_put_percpu_cache(struct bio *bio)
 772{
 773	struct bio_alloc_cache *cache;
 774
 775	cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
 776	if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX)
 777		goto out_free;
 778
 779	if (in_task()) {
 780		bio_uninit(bio);
 781		bio->bi_next = cache->free_list;
 782		/* Not necessary but helps not to iopoll already freed bios */
 783		bio->bi_bdev = NULL;
 784		cache->free_list = bio;
 785		cache->nr++;
 786	} else if (in_hardirq()) {
 787		lockdep_assert_irqs_disabled();
 788
 789		bio_uninit(bio);
 790		bio->bi_next = cache->free_list_irq;
 791		cache->free_list_irq = bio;
 792		cache->nr_irq++;
 793	} else {
 794		goto out_free;
 795	}
 796	put_cpu();
 797	return;
 798out_free:
 799	put_cpu();
 800	bio_free(bio);
 801}
 802
 803/**
 804 * bio_put - release a reference to a bio
 805 * @bio:   bio to release reference to
 806 *
 807 * Description:
 808 *   Put a reference to a &struct bio, either one you have gotten with
 809 *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
 810 **/
 811void bio_put(struct bio *bio)
 812{
 813	if (unlikely(bio_flagged(bio, BIO_REFFED))) {
 814		BUG_ON(!atomic_read(&bio->__bi_cnt));
 815		if (!atomic_dec_and_test(&bio->__bi_cnt))
 816			return;
 817	}
 818	if (bio->bi_opf & REQ_ALLOC_CACHE)
 819		bio_put_percpu_cache(bio);
 820	else
 821		bio_free(bio);
 822}
 823EXPORT_SYMBOL(bio_put);
 824
 825static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
 826{
 827	bio_set_flag(bio, BIO_CLONED);
 828	bio->bi_ioprio = bio_src->bi_ioprio;
 829	bio->bi_write_hint = bio_src->bi_write_hint;
 830	bio->bi_iter = bio_src->bi_iter;
 831
 832	if (bio->bi_bdev) {
 833		if (bio->bi_bdev == bio_src->bi_bdev &&
 834		    bio_flagged(bio_src, BIO_REMAPPED))
 835			bio_set_flag(bio, BIO_REMAPPED);
 836		bio_clone_blkg_association(bio, bio_src);
 837	}
 838
 839	if (bio_crypt_clone(bio, bio_src, gfp) < 0)
 840		return -ENOMEM;
 841	if (bio_integrity(bio_src) &&
 842	    bio_integrity_clone(bio, bio_src, gfp) < 0)
 843		return -ENOMEM;
 844	return 0;
 845}
 846
 847/**
 848 * bio_alloc_clone - clone a bio that shares the original bio's biovec
 849 * @bdev: block_device to clone onto
 850 * @bio_src: bio to clone from
 851 * @gfp: allocation priority
 852 * @bs: bio_set to allocate from
 853 *
 854 * Allocate a new bio that is a clone of @bio_src. The caller owns the returned
 855 * bio, but not the actual data it points to.
 856 *
 857 * The caller must ensure that the return bio is not freed before @bio_src.
 858 */
 859struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
 860		gfp_t gfp, struct bio_set *bs)
 861{
 862	struct bio *bio;
 863
 864	bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
 865	if (!bio)
 866		return NULL;
 867
 868	if (__bio_clone(bio, bio_src, gfp) < 0) {
 869		bio_put(bio);
 870		return NULL;
 871	}
 872	bio->bi_io_vec = bio_src->bi_io_vec;
 873
 874	return bio;
 875}
 876EXPORT_SYMBOL(bio_alloc_clone);
 877
 878/**
 879 * bio_init_clone - clone a bio that shares the original bio's biovec
 880 * @bdev: block_device to clone onto
 881 * @bio: bio to clone into
 882 * @bio_src: bio to clone from
 883 * @gfp: allocation priority
 884 *
 885 * Initialize a new bio in caller provided memory that is a clone of @bio_src.
 886 * The caller owns the returned bio, but not the actual data it points to.
 887 *
 888 * The caller must ensure that @bio_src is not freed before @bio.
 889 */
 890int bio_init_clone(struct block_device *bdev, struct bio *bio,
 891		struct bio *bio_src, gfp_t gfp)
 892{
 893	int ret;
 894
 895	bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
 896	ret = __bio_clone(bio, bio_src, gfp);
 897	if (ret)
 898		bio_uninit(bio);
 899	return ret;
 900}
 901EXPORT_SYMBOL(bio_init_clone);
 902
 903/**
 904 * bio_full - check if the bio is full
 905 * @bio:	bio to check
 906 * @len:	length of one segment to be added
 907 *
 908 * Return true if @bio is full and one segment with @len bytes can't be
 909 * added to the bio, otherwise return false
 910 */
 911static inline bool bio_full(struct bio *bio, unsigned len)
 912{
 913	if (bio->bi_vcnt >= bio->bi_max_vecs)
 914		return true;
 915	if (bio->bi_iter.bi_size > UINT_MAX - len)
 916		return true;
 917	return false;
 918}
 919
 920static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
 921		unsigned int len, unsigned int off, bool *same_page)
 922{
 923	size_t bv_end = bv->bv_offset + bv->bv_len;
 924	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
 925	phys_addr_t page_addr = page_to_phys(page);
 926
 927	if (vec_end_addr + 1 != page_addr + off)
 928		return false;
 929	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
 930		return false;
 931	if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
 932		return false;
 933
 934	*same_page = ((vec_end_addr & PAGE_MASK) == ((page_addr + off) &
 935		     PAGE_MASK));
 936	if (!*same_page) {
 937		if (IS_ENABLED(CONFIG_KMSAN))
 938			return false;
 939		if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
 940			return false;
 941	}
 942
 943	bv->bv_len += len;
 944	return true;
 945}
 946
 947/*
 948 * Try to merge a page into a segment, while obeying the hardware segment
 949 * size limit.  This is not for normal read/write bios, but for passthrough
 950 * or Zone Append operations that we can't split.
 951 */
 952bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
 953		struct page *page, unsigned len, unsigned offset,
 954		bool *same_page)
 955{
 956	unsigned long mask = queue_segment_boundary(q);
 957	phys_addr_t addr1 = bvec_phys(bv);
 958	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
 959
 960	if ((addr1 | mask) != (addr2 | mask))
 961		return false;
 962	if (len > queue_max_segment_size(q) - bv->bv_len)
 963		return false;
 964	return bvec_try_merge_page(bv, page, len, offset, same_page);
 965}
 966
 967/**
 968 * bio_add_hw_page - attempt to add a page to a bio with hw constraints
 969 * @q: the target queue
 970 * @bio: destination bio
 971 * @page: page to add
 972 * @len: vec entry length
 973 * @offset: vec entry offset
 974 * @max_sectors: maximum number of sectors that can be added
 975 * @same_page: return if the segment has been merged inside the same page
 976 *
 977 * Add a page to a bio while respecting the hardware max_sectors, max_segment
 978 * and gap limitations.
 979 */
 980int bio_add_hw_page(struct request_queue *q, struct bio *bio,
 981		struct page *page, unsigned int len, unsigned int offset,
 982		unsigned int max_sectors, bool *same_page)
 983{
 984	unsigned int max_size = max_sectors << SECTOR_SHIFT;
 985
 986	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 987		return 0;
 988
 989	len = min3(len, max_size, queue_max_segment_size(q));
 990	if (len > max_size - bio->bi_iter.bi_size)
 991		return 0;
 992
 993	if (bio->bi_vcnt > 0) {
 994		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 995
 996		if (bvec_try_merge_hw_page(q, bv, page, len, offset,
 997				same_page)) {
 998			bio->bi_iter.bi_size += len;
 999			return len;
1000		}
1001
1002		if (bio->bi_vcnt >=
1003		    min(bio->bi_max_vecs, queue_max_segments(q)))
1004			return 0;
1005
1006		/*
1007		 * If the queue doesn't support SG gaps and adding this segment
1008		 * would create a gap, disallow it.
1009		 */
1010		if (bvec_gap_to_prev(&q->limits, bv, offset))
1011			return 0;
1012	}
1013
1014	bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset);
1015	bio->bi_vcnt++;
1016	bio->bi_iter.bi_size += len;
1017	return len;
1018}
1019
1020/**
1021 * bio_add_hw_folio - attempt to add a folio to a bio with hw constraints
1022 * @q: the target queue
1023 * @bio: destination bio
1024 * @folio: folio to add
1025 * @len: vec entry length
1026 * @offset: vec entry offset in the folio
1027 * @max_sectors: maximum number of sectors that can be added
1028 * @same_page: return if the segment has been merged inside the same folio
1029 *
1030 * Add a folio to a bio while respecting the hardware max_sectors, max_segment
1031 * and gap limitations.
1032 */
1033int bio_add_hw_folio(struct request_queue *q, struct bio *bio,
1034		struct folio *folio, size_t len, size_t offset,
1035		unsigned int max_sectors, bool *same_page)
1036{
1037	if (len > UINT_MAX || offset > UINT_MAX)
1038		return 0;
1039	return bio_add_hw_page(q, bio, folio_page(folio, 0), len, offset,
1040			       max_sectors, same_page);
1041}
1042
1043/**
1044 * bio_add_pc_page	- attempt to add page to passthrough bio
1045 * @q: the target queue
1046 * @bio: destination bio
1047 * @page: page to add
1048 * @len: vec entry length
1049 * @offset: vec entry offset
1050 *
1051 * Attempt to add a page to the bio_vec maplist. This can fail for a
1052 * number of reasons, such as the bio being full or target block device
1053 * limitations. The target block device must allow bio's up to PAGE_SIZE,
1054 * so it is always possible to add a single page to an empty bio.
1055 *
1056 * This should only be used by passthrough bios.
1057 */
1058int bio_add_pc_page(struct request_queue *q, struct bio *bio,
1059		struct page *page, unsigned int len, unsigned int offset)
1060{
1061	bool same_page = false;
1062	return bio_add_hw_page(q, bio, page, len, offset,
1063			queue_max_hw_sectors(q), &same_page);
1064}
1065EXPORT_SYMBOL(bio_add_pc_page);
1066
1067/**
1068 * __bio_add_page - add page(s) to a bio in a new segment
1069 * @bio: destination bio
1070 * @page: start page to add
1071 * @len: length of the data to add, may cross pages
1072 * @off: offset of the data relative to @page, may cross pages
1073 *
1074 * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
1075 * that @bio has space for another bvec.
1076 */
1077void __bio_add_page(struct bio *bio, struct page *page,
1078		unsigned int len, unsigned int off)
1079{
1080	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
1081	WARN_ON_ONCE(bio_full(bio, len));
1082
1083	bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
1084	bio->bi_iter.bi_size += len;
1085	bio->bi_vcnt++;
1086}
1087EXPORT_SYMBOL_GPL(__bio_add_page);
1088
1089/**
1090 *	bio_add_page	-	attempt to add page(s) to bio
1091 *	@bio: destination bio
1092 *	@page: start page to add
1093 *	@len: vec entry length, may cross pages
1094 *	@offset: vec entry offset relative to @page, may cross pages
1095 *
1096 *	Attempt to add page(s) to the bio_vec maplist. This will only fail
1097 *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1098 */
1099int bio_add_page(struct bio *bio, struct page *page,
1100		 unsigned int len, unsigned int offset)
1101{
1102	bool same_page = false;
1103
1104	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1105		return 0;
1106	if (bio->bi_iter.bi_size > UINT_MAX - len)
1107		return 0;
1108
1109	if (bio->bi_vcnt > 0 &&
1110	    bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1111				page, len, offset, &same_page)) {
1112		bio->bi_iter.bi_size += len;
1113		return len;
1114	}
1115
1116	if (bio->bi_vcnt >= bio->bi_max_vecs)
1117		return 0;
1118	__bio_add_page(bio, page, len, offset);
1119	return len;
1120}
1121EXPORT_SYMBOL(bio_add_page);
1122
1123void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
1124			  size_t off)
1125{
1126	WARN_ON_ONCE(len > UINT_MAX);
1127	WARN_ON_ONCE(off > UINT_MAX);
1128	__bio_add_page(bio, &folio->page, len, off);
1129}
1130EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
1131
1132/**
1133 * bio_add_folio - Attempt to add part of a folio to a bio.
1134 * @bio: BIO to add to.
1135 * @folio: Folio to add.
1136 * @len: How many bytes from the folio to add.
1137 * @off: First byte in this folio to add.
1138 *
1139 * Filesystems that use folios can call this function instead of calling
1140 * bio_add_page() for each page in the folio.  If @off is bigger than
1141 * PAGE_SIZE, this function can create a bio_vec that starts in a page
1142 * after the bv_page.  BIOs do not support folios that are 4GiB or larger.
1143 *
1144 * Return: Whether the addition was successful.
1145 */
1146bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1147		   size_t off)
1148{
1149	if (len > UINT_MAX || off > UINT_MAX)
1150		return false;
1151	return bio_add_page(bio, &folio->page, len, off) > 0;
1152}
1153EXPORT_SYMBOL(bio_add_folio);
1154
1155void __bio_release_pages(struct bio *bio, bool mark_dirty)
1156{
1157	struct folio_iter fi;
1158
1159	bio_for_each_folio_all(fi, bio) {
1160		size_t nr_pages;
1161
1162		if (mark_dirty) {
1163			folio_lock(fi.folio);
1164			folio_mark_dirty(fi.folio);
1165			folio_unlock(fi.folio);
1166		}
1167		nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
1168			   fi.offset / PAGE_SIZE + 1;
1169		unpin_user_folio(fi.folio, nr_pages);
1170	}
1171}
1172EXPORT_SYMBOL_GPL(__bio_release_pages);
1173
1174void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
1175{
1176	WARN_ON_ONCE(bio->bi_max_vecs);
1177
1178	bio->bi_vcnt = iter->nr_segs;
1179	bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1180	bio->bi_iter.bi_bvec_done = iter->iov_offset;
1181	bio->bi_iter.bi_size = iov_iter_count(iter);
1182	bio_set_flag(bio, BIO_CLONED);
1183}
1184
1185static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len,
1186			     size_t offset)
1187{
1188	bool same_page = false;
1189
1190	if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len))
1191		return -EIO;
1192
1193	if (bio->bi_vcnt > 0 &&
1194	    bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1195				folio_page(folio, 0), len, offset,
1196				&same_page)) {
1197		bio->bi_iter.bi_size += len;
1198		if (same_page && bio_flagged(bio, BIO_PAGE_PINNED))
1199			unpin_user_folio(folio, 1);
1200		return 0;
1201	}
1202	bio_add_folio_nofail(bio, folio, len, offset);
1203	return 0;
1204}
1205
1206static unsigned int get_contig_folio_len(unsigned int *num_pages,
1207					 struct page **pages, unsigned int i,
1208					 struct folio *folio, size_t left,
1209					 size_t offset)
1210{
1211	size_t bytes = left;
1212	size_t contig_sz = min_t(size_t, PAGE_SIZE - offset, bytes);
1213	unsigned int j;
1214
1215	/*
1216	 * We might COW a single page in the middle of
1217	 * a large folio, so we have to check that all
1218	 * pages belong to the same folio.
1219	 */
1220	bytes -= contig_sz;
1221	for (j = i + 1; j < i + *num_pages; j++) {
1222		size_t next = min_t(size_t, PAGE_SIZE, bytes);
1223
1224		if (page_folio(pages[j]) != folio ||
1225		    pages[j] != pages[j - 1] + 1) {
1226			break;
1227		}
1228		contig_sz += next;
1229		bytes -= next;
1230	}
1231	*num_pages = j - i;
1232
1233	return contig_sz;
1234}
1235
1236#define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
1237
1238/**
1239 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1240 * @bio: bio to add pages to
1241 * @iter: iov iterator describing the region to be mapped
1242 *
1243 * Extracts pages from *iter and appends them to @bio's bvec array.  The pages
1244 * will have to be cleaned up in the way indicated by the BIO_PAGE_PINNED flag.
1245 * For a multi-segment *iter, this function only adds pages from the next
1246 * non-empty segment of the iov iterator.
1247 */
1248static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1249{
1250	iov_iter_extraction_t extraction_flags = 0;
1251	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1252	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1253	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1254	struct page **pages = (struct page **)bv;
1255	ssize_t size;
1256	unsigned int num_pages, i = 0;
1257	size_t offset, folio_offset, left, len;
1258	int ret = 0;
1259
1260	/*
1261	 * Move page array up in the allocated memory for the bio vecs as far as
1262	 * possible so that we can start filling biovecs from the beginning
1263	 * without overwriting the temporary page array.
1264	 */
1265	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1266	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1267
1268	if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1269		extraction_flags |= ITER_ALLOW_P2PDMA;
1270
1271	/*
1272	 * Each segment in the iov is required to be a block size multiple.
1273	 * However, we may not be able to get the entire segment if it spans
1274	 * more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the
1275	 * result to ensure the bio's total size is correct. The remainder of
1276	 * the iov data will be picked up in the next bio iteration.
1277	 */
1278	size = iov_iter_extract_pages(iter, &pages,
1279				      UINT_MAX - bio->bi_iter.bi_size,
1280				      nr_pages, extraction_flags, &offset);
1281	if (unlikely(size <= 0))
1282		return size ? size : -EFAULT;
1283
1284	nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
1285
1286	if (bio->bi_bdev) {
1287		size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
1288		iov_iter_revert(iter, trim);
1289		size -= trim;
1290	}
1291
1292	if (unlikely(!size)) {
1293		ret = -EFAULT;
1294		goto out;
1295	}
1296
1297	for (left = size, i = 0; left > 0; left -= len, i += num_pages) {
1298		struct page *page = pages[i];
1299		struct folio *folio = page_folio(page);
1300
1301		folio_offset = ((size_t)folio_page_idx(folio, page) <<
1302			       PAGE_SHIFT) + offset;
1303
1304		len = min(folio_size(folio) - folio_offset, left);
1305
1306		num_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1307
1308		if (num_pages > 1)
1309			len = get_contig_folio_len(&num_pages, pages, i,
1310						   folio, left, offset);
1311
1312		bio_iov_add_folio(bio, folio, len, folio_offset);
1313		offset = 0;
1314	}
1315
1316	iov_iter_revert(iter, left);
1317out:
1318	while (i < nr_pages)
1319		bio_release_page(bio, pages[i++]);
1320
1321	return ret;
1322}
1323
1324/**
1325 * bio_iov_iter_get_pages - add user or kernel pages to a bio
1326 * @bio: bio to add pages to
1327 * @iter: iov iterator describing the region to be added
1328 *
1329 * This takes either an iterator pointing to user memory, or one pointing to
1330 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1331 * map them into the kernel. On IO completion, the caller should put those
1332 * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1333 * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1334 * to ensure the bvecs and pages stay referenced until the submitted I/O is
1335 * completed by a call to ->ki_complete() or returns with an error other than
1336 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1337 * on IO completion. If it isn't, then pages should be released.
1338 *
1339 * The function tries, but does not guarantee, to pin as many pages as
1340 * fit into the bio, or are requested in @iter, whatever is smaller. If
1341 * MM encounters an error pinning the requested pages, it stops. Error
1342 * is returned only if 0 pages could be pinned.
1343 */
1344int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1345{
1346	int ret = 0;
1347
1348	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1349		return -EIO;
1350
1351	if (iov_iter_is_bvec(iter)) {
1352		bio_iov_bvec_set(bio, iter);
1353		iov_iter_advance(iter, bio->bi_iter.bi_size);
1354		return 0;
1355	}
1356
1357	if (iov_iter_extract_will_pin(iter))
1358		bio_set_flag(bio, BIO_PAGE_PINNED);
1359	do {
1360		ret = __bio_iov_iter_get_pages(bio, iter);
1361	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1362
1363	return bio->bi_vcnt ? 0 : ret;
1364}
1365EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
1366
1367static void submit_bio_wait_endio(struct bio *bio)
1368{
1369	complete(bio->bi_private);
1370}
1371
1372/**
1373 * submit_bio_wait - submit a bio, and wait until it completes
1374 * @bio: The &struct bio which describes the I/O
1375 *
1376 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1377 * bio_endio() on failure.
1378 *
1379 * WARNING: Unlike to how submit_bio() is usually used, this function does not
1380 * result in bio reference to be consumed. The caller must drop the reference
1381 * on his own.
1382 */
1383int submit_bio_wait(struct bio *bio)
1384{
1385	DECLARE_COMPLETION_ONSTACK_MAP(done,
1386			bio->bi_bdev->bd_disk->lockdep_map);
1387
1388	bio->bi_private = &done;
1389	bio->bi_end_io = submit_bio_wait_endio;
1390	bio->bi_opf |= REQ_SYNC;
1391	submit_bio(bio);
1392	blk_wait_io(&done);
1393
1394	return blk_status_to_errno(bio->bi_status);
1395}
1396EXPORT_SYMBOL(submit_bio_wait);
1397
1398static void bio_wait_end_io(struct bio *bio)
1399{
1400	complete(bio->bi_private);
1401	bio_put(bio);
1402}
1403
1404/*
1405 * bio_await_chain - ends @bio and waits for every chained bio to complete
1406 */
1407void bio_await_chain(struct bio *bio)
1408{
1409	DECLARE_COMPLETION_ONSTACK_MAP(done,
1410			bio->bi_bdev->bd_disk->lockdep_map);
1411
1412	bio->bi_private = &done;
1413	bio->bi_end_io = bio_wait_end_io;
1414	bio_endio(bio);
1415	blk_wait_io(&done);
1416}
1417
1418void __bio_advance(struct bio *bio, unsigned bytes)
1419{
1420	if (bio_integrity(bio))
1421		bio_integrity_advance(bio, bytes);
1422
1423	bio_crypt_advance(bio, bytes);
1424	bio_advance_iter(bio, &bio->bi_iter, bytes);
1425}
1426EXPORT_SYMBOL(__bio_advance);
1427
1428void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1429			struct bio *src, struct bvec_iter *src_iter)
1430{
1431	while (src_iter->bi_size && dst_iter->bi_size) {
1432		struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1433		struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1434		unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1435		void *src_buf = bvec_kmap_local(&src_bv);
1436		void *dst_buf = bvec_kmap_local(&dst_bv);
1437
1438		memcpy(dst_buf, src_buf, bytes);
1439
1440		kunmap_local(dst_buf);
1441		kunmap_local(src_buf);
1442
1443		bio_advance_iter_single(src, src_iter, bytes);
1444		bio_advance_iter_single(dst, dst_iter, bytes);
1445	}
1446}
1447EXPORT_SYMBOL(bio_copy_data_iter);
1448
1449/**
1450 * bio_copy_data - copy contents of data buffers from one bio to another
1451 * @src: source bio
1452 * @dst: destination bio
1453 *
1454 * Stops when it reaches the end of either @src or @dst - that is, copies
1455 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1456 */
1457void bio_copy_data(struct bio *dst, struct bio *src)
1458{
1459	struct bvec_iter src_iter = src->bi_iter;
1460	struct bvec_iter dst_iter = dst->bi_iter;
1461
1462	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1463}
1464EXPORT_SYMBOL(bio_copy_data);
1465
1466void bio_free_pages(struct bio *bio)
1467{
1468	struct bio_vec *bvec;
1469	struct bvec_iter_all iter_all;
1470
1471	bio_for_each_segment_all(bvec, bio, iter_all)
1472		__free_page(bvec->bv_page);
1473}
1474EXPORT_SYMBOL(bio_free_pages);
1475
1476/*
1477 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1478 * for performing direct-IO in BIOs.
1479 *
1480 * The problem is that we cannot run folio_mark_dirty() from interrupt context
1481 * because the required locks are not interrupt-safe.  So what we can do is to
1482 * mark the pages dirty _before_ performing IO.  And in interrupt context,
1483 * check that the pages are still dirty.   If so, fine.  If not, redirty them
1484 * in process context.
1485 *
1486 * Note that this code is very hard to test under normal circumstances because
1487 * direct-io pins the pages with get_user_pages().  This makes
1488 * is_page_cache_freeable return false, and the VM will not clean the pages.
1489 * But other code (eg, flusher threads) could clean the pages if they are mapped
1490 * pagecache.
1491 *
1492 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1493 * deferred bio dirtying paths.
1494 */
1495
1496/*
1497 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1498 */
1499void bio_set_pages_dirty(struct bio *bio)
1500{
1501	struct folio_iter fi;
1502
1503	bio_for_each_folio_all(fi, bio) {
1504		folio_lock(fi.folio);
1505		folio_mark_dirty(fi.folio);
1506		folio_unlock(fi.folio);
1507	}
1508}
1509EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1510
1511/*
1512 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1513 * If they are, then fine.  If, however, some pages are clean then they must
1514 * have been written out during the direct-IO read.  So we take another ref on
1515 * the BIO and re-dirty the pages in process context.
1516 *
1517 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1518 * here on.  It will unpin each page and will run one bio_put() against the
1519 * BIO.
1520 */
1521
1522static void bio_dirty_fn(struct work_struct *work);
1523
1524static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1525static DEFINE_SPINLOCK(bio_dirty_lock);
1526static struct bio *bio_dirty_list;
1527
1528/*
1529 * This runs in process context
1530 */
1531static void bio_dirty_fn(struct work_struct *work)
1532{
1533	struct bio *bio, *next;
1534
1535	spin_lock_irq(&bio_dirty_lock);
1536	next = bio_dirty_list;
1537	bio_dirty_list = NULL;
1538	spin_unlock_irq(&bio_dirty_lock);
1539
1540	while ((bio = next) != NULL) {
1541		next = bio->bi_private;
1542
1543		bio_release_pages(bio, true);
1544		bio_put(bio);
1545	}
1546}
1547
1548void bio_check_pages_dirty(struct bio *bio)
1549{
1550	struct folio_iter fi;
1551	unsigned long flags;
1552
1553	bio_for_each_folio_all(fi, bio) {
1554		if (!folio_test_dirty(fi.folio))
1555			goto defer;
1556	}
1557
1558	bio_release_pages(bio, false);
1559	bio_put(bio);
1560	return;
1561defer:
1562	spin_lock_irqsave(&bio_dirty_lock, flags);
1563	bio->bi_private = bio_dirty_list;
1564	bio_dirty_list = bio;
1565	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1566	schedule_work(&bio_dirty_work);
1567}
1568EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1569
1570static inline bool bio_remaining_done(struct bio *bio)
1571{
1572	/*
1573	 * If we're not chaining, then ->__bi_remaining is always 1 and
1574	 * we always end io on the first invocation.
1575	 */
1576	if (!bio_flagged(bio, BIO_CHAIN))
1577		return true;
1578
1579	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1580
1581	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1582		bio_clear_flag(bio, BIO_CHAIN);
1583		return true;
1584	}
1585
1586	return false;
1587}
1588
1589/**
1590 * bio_endio - end I/O on a bio
1591 * @bio:	bio
1592 *
1593 * Description:
1594 *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1595 *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1596 *   bio unless they own it and thus know that it has an end_io function.
1597 *
1598 *   bio_endio() can be called several times on a bio that has been chained
1599 *   using bio_chain().  The ->bi_end_io() function will only be called the
1600 *   last time.
1601 **/
1602void bio_endio(struct bio *bio)
1603{
1604again:
1605	if (!bio_remaining_done(bio))
1606		return;
1607	if (!bio_integrity_endio(bio))
1608		return;
1609
1610	blk_zone_bio_endio(bio);
1611
1612	rq_qos_done_bio(bio);
1613
1614	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1615		trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1616		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1617	}
1618
1619	/*
1620	 * Need to have a real endio function for chained bios, otherwise
1621	 * various corner cases will break (like stacking block devices that
1622	 * save/restore bi_end_io) - however, we want to avoid unbounded
1623	 * recursion and blowing the stack. Tail call optimization would
1624	 * handle this, but compiling with frame pointers also disables
1625	 * gcc's sibling call optimization.
1626	 */
1627	if (bio->bi_end_io == bio_chain_endio) {
1628		bio = __bio_chain_endio(bio);
1629		goto again;
1630	}
1631
1632#ifdef CONFIG_BLK_CGROUP
1633	/*
1634	 * Release cgroup info.  We shouldn't have to do this here, but quite
1635	 * a few callers of bio_init fail to call bio_uninit, so we cover up
1636	 * for that here at least for now.
1637	 */
1638	if (bio->bi_blkg) {
1639		blkg_put(bio->bi_blkg);
1640		bio->bi_blkg = NULL;
1641	}
1642#endif
1643
1644	if (bio->bi_end_io)
1645		bio->bi_end_io(bio);
1646}
1647EXPORT_SYMBOL(bio_endio);
1648
1649/**
1650 * bio_split - split a bio
1651 * @bio:	bio to split
1652 * @sectors:	number of sectors to split from the front of @bio
1653 * @gfp:	gfp mask
1654 * @bs:		bio set to allocate from
1655 *
1656 * Allocates and returns a new bio which represents @sectors from the start of
1657 * @bio, and updates @bio to represent the remaining sectors.
1658 *
1659 * Unless this is a discard request the newly allocated bio will point
1660 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1661 * neither @bio nor @bs are freed before the split bio.
1662 */
1663struct bio *bio_split(struct bio *bio, int sectors,
1664		      gfp_t gfp, struct bio_set *bs)
1665{
1666	struct bio *split;
1667
1668	if (WARN_ON_ONCE(sectors <= 0))
1669		return ERR_PTR(-EINVAL);
1670	if (WARN_ON_ONCE(sectors >= bio_sectors(bio)))
1671		return ERR_PTR(-EINVAL);
1672
1673	/* Zone append commands cannot be split */
1674	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1675		return ERR_PTR(-EINVAL);
1676
1677	/* atomic writes cannot be split */
1678	if (bio->bi_opf & REQ_ATOMIC)
1679		return ERR_PTR(-EINVAL);
1680
1681	split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1682	if (!split)
1683		return ERR_PTR(-ENOMEM);
1684
1685	split->bi_iter.bi_size = sectors << 9;
1686
1687	if (bio_integrity(split))
1688		bio_integrity_trim(split);
1689
1690	bio_advance(bio, split->bi_iter.bi_size);
1691
1692	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1693		bio_set_flag(split, BIO_TRACE_COMPLETION);
1694
1695	return split;
1696}
1697EXPORT_SYMBOL(bio_split);
1698
1699/**
1700 * bio_trim - trim a bio
1701 * @bio:	bio to trim
1702 * @offset:	number of sectors to trim from the front of @bio
1703 * @size:	size we want to trim @bio to, in sectors
1704 *
1705 * This function is typically used for bios that are cloned and submitted
1706 * to the underlying device in parts.
1707 */
1708void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1709{
1710	if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1711			 offset + size > bio_sectors(bio)))
1712		return;
1713
1714	size <<= 9;
1715	if (offset == 0 && size == bio->bi_iter.bi_size)
1716		return;
1717
1718	bio_advance(bio, offset << 9);
1719	bio->bi_iter.bi_size = size;
1720
1721	if (bio_integrity(bio))
1722		bio_integrity_trim(bio);
1723}
1724EXPORT_SYMBOL_GPL(bio_trim);
1725
1726/*
1727 * create memory pools for biovec's in a bio_set.
1728 * use the global biovec slabs created for general use.
1729 */
1730int biovec_init_pool(mempool_t *pool, int pool_entries)
1731{
1732	struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1733
1734	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1735}
1736
1737/*
1738 * bioset_exit - exit a bioset initialized with bioset_init()
1739 *
1740 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1741 * kzalloc()).
1742 */
1743void bioset_exit(struct bio_set *bs)
1744{
1745	bio_alloc_cache_destroy(bs);
1746	if (bs->rescue_workqueue)
1747		destroy_workqueue(bs->rescue_workqueue);
1748	bs->rescue_workqueue = NULL;
1749
1750	mempool_exit(&bs->bio_pool);
1751	mempool_exit(&bs->bvec_pool);
1752
1753	bioset_integrity_free(bs);
1754	if (bs->bio_slab)
1755		bio_put_slab(bs);
1756	bs->bio_slab = NULL;
1757}
1758EXPORT_SYMBOL(bioset_exit);
1759
1760/**
1761 * bioset_init - Initialize a bio_set
1762 * @bs:		pool to initialize
1763 * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1764 * @front_pad:	Number of bytes to allocate in front of the returned bio
1765 * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1766 *              and %BIOSET_NEED_RESCUER
1767 *
1768 * Description:
1769 *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1770 *    to ask for a number of bytes to be allocated in front of the bio.
1771 *    Front pad allocation is useful for embedding the bio inside
1772 *    another structure, to avoid allocating extra data to go with the bio.
1773 *    Note that the bio must be embedded at the END of that structure always,
1774 *    or things will break badly.
1775 *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1776 *    for allocating iovecs.  This pool is not needed e.g. for bio_init_clone().
1777 *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
1778 *    to dispatch queued requests when the mempool runs out of space.
1779 *
1780 */
1781int bioset_init(struct bio_set *bs,
1782		unsigned int pool_size,
1783		unsigned int front_pad,
1784		int flags)
1785{
1786	bs->front_pad = front_pad;
1787	if (flags & BIOSET_NEED_BVECS)
1788		bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1789	else
1790		bs->back_pad = 0;
1791
1792	spin_lock_init(&bs->rescue_lock);
1793	bio_list_init(&bs->rescue_list);
1794	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1795
1796	bs->bio_slab = bio_find_or_create_slab(bs);
1797	if (!bs->bio_slab)
1798		return -ENOMEM;
1799
1800	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1801		goto bad;
1802
1803	if ((flags & BIOSET_NEED_BVECS) &&
1804	    biovec_init_pool(&bs->bvec_pool, pool_size))
1805		goto bad;
1806
1807	if (flags & BIOSET_NEED_RESCUER) {
1808		bs->rescue_workqueue = alloc_workqueue("bioset",
1809							WQ_MEM_RECLAIM, 0);
1810		if (!bs->rescue_workqueue)
1811			goto bad;
1812	}
1813	if (flags & BIOSET_PERCPU_CACHE) {
1814		bs->cache = alloc_percpu(struct bio_alloc_cache);
1815		if (!bs->cache)
1816			goto bad;
1817		cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1818	}
1819
1820	return 0;
1821bad:
1822	bioset_exit(bs);
1823	return -ENOMEM;
1824}
1825EXPORT_SYMBOL(bioset_init);
1826
1827static int __init init_bio(void)
1828{
1829	int i;
1830
1831	BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
1832
1833	bio_integrity_init();
1834
1835	for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1836		struct biovec_slab *bvs = bvec_slabs + i;
1837
1838		bvs->slab = kmem_cache_create(bvs->name,
1839				bvs->nr_vecs * sizeof(struct bio_vec), 0,
1840				SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1841	}
1842
1843	cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1844					bio_cpu_dead);
1845
1846	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0,
1847			BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
1848		panic("bio: can't allocate bios\n");
1849
1850	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1851		panic("bio: can't create integrity pool\n");
1852
1853	return 0;
1854}
1855subsys_initcall(init_bio);