Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 1991, 1992 Linus Torvalds
   4 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   5 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
   7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   8 *	-  July2000
   9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
  10 */
  11
  12/*
  13 * This handles all read/write requests to block devices
  14 */
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/bio.h>
  18#include <linux/blkdev.h>
  19#include <linux/blk-pm.h>
  20#include <linux/blk-integrity.h>
  21#include <linux/highmem.h>
  22#include <linux/mm.h>
  23#include <linux/pagemap.h>
  24#include <linux/kernel_stat.h>
  25#include <linux/string.h>
  26#include <linux/init.h>
  27#include <linux/completion.h>
  28#include <linux/slab.h>
  29#include <linux/swap.h>
  30#include <linux/writeback.h>
  31#include <linux/task_io_accounting_ops.h>
  32#include <linux/fault-inject.h>
  33#include <linux/list_sort.h>
  34#include <linux/delay.h>
  35#include <linux/ratelimit.h>
  36#include <linux/pm_runtime.h>
  37#include <linux/t10-pi.h>
  38#include <linux/debugfs.h>
  39#include <linux/bpf.h>
  40#include <linux/part_stat.h>
  41#include <linux/sched/sysctl.h>
  42#include <linux/blk-crypto.h>
  43
  44#define CREATE_TRACE_POINTS
  45#include <trace/events/block.h>
  46
  47#include "blk.h"
  48#include "blk-mq-sched.h"
  49#include "blk-pm.h"
  50#include "blk-cgroup.h"
  51#include "blk-throttle.h"
 
  52
  53struct dentry *blk_debugfs_root;
  54
  55EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
  56EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
  57EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
  58EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
  59EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
  60EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
  61
  62static DEFINE_IDA(blk_queue_ida);
  63
  64/*
  65 * For queue allocation
  66 */
  67static struct kmem_cache *blk_requestq_cachep;
  68
  69/*
  70 * Controlling structure to kblockd
  71 */
  72static struct workqueue_struct *kblockd_workqueue;
  73
  74/**
  75 * blk_queue_flag_set - atomically set a queue flag
  76 * @flag: flag to be set
  77 * @q: request queue
  78 */
  79void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
  80{
  81	set_bit(flag, &q->queue_flags);
  82}
  83EXPORT_SYMBOL(blk_queue_flag_set);
  84
  85/**
  86 * blk_queue_flag_clear - atomically clear a queue flag
  87 * @flag: flag to be cleared
  88 * @q: request queue
  89 */
  90void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
  91{
  92	clear_bit(flag, &q->queue_flags);
  93}
  94EXPORT_SYMBOL(blk_queue_flag_clear);
  95
  96/**
  97 * blk_queue_flag_test_and_set - atomically test and set a queue flag
  98 * @flag: flag to be set
  99 * @q: request queue
 100 *
 101 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 102 * the flag was already set.
 103 */
 104bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
 105{
 106	return test_and_set_bit(flag, &q->queue_flags);
 107}
 108EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
 109
 110#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
 111static const char *const blk_op_name[] = {
 112	REQ_OP_NAME(READ),
 113	REQ_OP_NAME(WRITE),
 114	REQ_OP_NAME(FLUSH),
 115	REQ_OP_NAME(DISCARD),
 116	REQ_OP_NAME(SECURE_ERASE),
 117	REQ_OP_NAME(ZONE_RESET),
 118	REQ_OP_NAME(ZONE_RESET_ALL),
 119	REQ_OP_NAME(ZONE_OPEN),
 120	REQ_OP_NAME(ZONE_CLOSE),
 121	REQ_OP_NAME(ZONE_FINISH),
 122	REQ_OP_NAME(ZONE_APPEND),
 123	REQ_OP_NAME(WRITE_ZEROES),
 124	REQ_OP_NAME(DRV_IN),
 125	REQ_OP_NAME(DRV_OUT),
 126};
 127#undef REQ_OP_NAME
 128
 129/**
 130 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 131 * @op: REQ_OP_XXX.
 132 *
 133 * Description: Centralize block layer function to convert REQ_OP_XXX into
 134 * string format. Useful in the debugging and tracing bio or request. For
 135 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 136 */
 137inline const char *blk_op_str(enum req_op op)
 138{
 139	const char *op_str = "UNKNOWN";
 140
 141	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
 142		op_str = blk_op_name[op];
 143
 144	return op_str;
 145}
 146EXPORT_SYMBOL_GPL(blk_op_str);
 147
 148static const struct {
 149	int		errno;
 150	const char	*name;
 151} blk_errors[] = {
 152	[BLK_STS_OK]		= { 0,		"" },
 153	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
 154	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
 155	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
 156	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
 157	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
 158	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
 159	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
 160	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
 161	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
 162	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
 163	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
 164	[BLK_STS_OFFLINE]	= { -ENODEV,	"device offline" },
 165
 166	/* device mapper special case, should not leak out: */
 167	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
 168
 169	/* zone device specific errors */
 170	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
 171	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },
 172
 
 
 
 
 
 173	/* everything else not covered above: */
 174	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
 175};
 176
 177blk_status_t errno_to_blk_status(int errno)
 178{
 179	int i;
 180
 181	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
 182		if (blk_errors[i].errno == errno)
 183			return (__force blk_status_t)i;
 184	}
 185
 186	return BLK_STS_IOERR;
 187}
 188EXPORT_SYMBOL_GPL(errno_to_blk_status);
 189
 190int blk_status_to_errno(blk_status_t status)
 191{
 192	int idx = (__force int)status;
 193
 194	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
 195		return -EIO;
 196	return blk_errors[idx].errno;
 197}
 198EXPORT_SYMBOL_GPL(blk_status_to_errno);
 199
 200const char *blk_status_to_str(blk_status_t status)
 201{
 202	int idx = (__force int)status;
 203
 204	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
 205		return "<null>";
 206	return blk_errors[idx].name;
 207}
 
 208
 209/**
 210 * blk_sync_queue - cancel any pending callbacks on a queue
 211 * @q: the queue
 212 *
 213 * Description:
 214 *     The block layer may perform asynchronous callback activity
 215 *     on a queue, such as calling the unplug function after a timeout.
 216 *     A block device may call blk_sync_queue to ensure that any
 217 *     such activity is cancelled, thus allowing it to release resources
 218 *     that the callbacks might use. The caller must already have made sure
 219 *     that its ->submit_bio will not re-add plugging prior to calling
 220 *     this function.
 221 *
 222 *     This function does not cancel any asynchronous activity arising
 223 *     out of elevator or throttling code. That would require elevator_exit()
 224 *     and blkcg_exit_queue() to be called with queue lock initialized.
 225 *
 226 */
 227void blk_sync_queue(struct request_queue *q)
 228{
 229	del_timer_sync(&q->timeout);
 230	cancel_work_sync(&q->timeout_work);
 231}
 232EXPORT_SYMBOL(blk_sync_queue);
 233
 234/**
 235 * blk_set_pm_only - increment pm_only counter
 236 * @q: request queue pointer
 237 */
 238void blk_set_pm_only(struct request_queue *q)
 239{
 240	atomic_inc(&q->pm_only);
 241}
 242EXPORT_SYMBOL_GPL(blk_set_pm_only);
 243
 244void blk_clear_pm_only(struct request_queue *q)
 245{
 246	int pm_only;
 247
 248	pm_only = atomic_dec_return(&q->pm_only);
 249	WARN_ON_ONCE(pm_only < 0);
 250	if (pm_only == 0)
 251		wake_up_all(&q->mq_freeze_wq);
 252}
 253EXPORT_SYMBOL_GPL(blk_clear_pm_only);
 254
 255static void blk_free_queue_rcu(struct rcu_head *rcu_head)
 256{
 257	struct request_queue *q = container_of(rcu_head,
 258			struct request_queue, rcu_head);
 259
 260	percpu_ref_exit(&q->q_usage_counter);
 261	kmem_cache_free(blk_requestq_cachep, q);
 262}
 263
 264static void blk_free_queue(struct request_queue *q)
 265{
 266	if (q->poll_stat)
 267		blk_stat_remove_callback(q, q->poll_cb);
 268	blk_stat_free_callback(q->poll_cb);
 269
 270	blk_free_queue_stats(q->stats);
 271	kfree(q->poll_stat);
 272
 273	if (queue_is_mq(q))
 274		blk_mq_release(q);
 275
 276	ida_free(&blk_queue_ida, q->id);
 
 
 277	call_rcu(&q->rcu_head, blk_free_queue_rcu);
 278}
 279
 280/**
 281 * blk_put_queue - decrement the request_queue refcount
 282 * @q: the request_queue structure to decrement the refcount for
 283 *
 284 * Decrements the refcount of the request_queue and free it when the refcount
 285 * reaches 0.
 286 */
 287void blk_put_queue(struct request_queue *q)
 288{
 289	if (refcount_dec_and_test(&q->refs))
 290		blk_free_queue(q);
 291}
 292EXPORT_SYMBOL(blk_put_queue);
 293
 294void blk_queue_start_drain(struct request_queue *q)
 295{
 296	/*
 297	 * When queue DYING flag is set, we need to block new req
 298	 * entering queue, so we call blk_freeze_queue_start() to
 299	 * prevent I/O from crossing blk_queue_enter().
 300	 */
 301	blk_freeze_queue_start(q);
 302	if (queue_is_mq(q))
 303		blk_mq_wake_waiters(q);
 304	/* Make blk_queue_enter() reexamine the DYING flag. */
 305	wake_up_all(&q->mq_freeze_wq);
 
 
 306}
 307
 308/**
 309 * blk_queue_enter() - try to increase q->q_usage_counter
 310 * @q: request queue pointer
 311 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
 312 */
 313int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 314{
 315	const bool pm = flags & BLK_MQ_REQ_PM;
 316
 317	while (!blk_try_enter_queue(q, pm)) {
 318		if (flags & BLK_MQ_REQ_NOWAIT)
 319			return -EAGAIN;
 320
 321		/*
 322		 * read pair of barrier in blk_freeze_queue_start(), we need to
 323		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
 324		 * reading .mq_freeze_depth or queue dying flag, otherwise the
 325		 * following wait may never return if the two reads are
 326		 * reordered.
 327		 */
 328		smp_rmb();
 329		wait_event(q->mq_freeze_wq,
 330			   (!q->mq_freeze_depth &&
 331			    blk_pm_resume_queue(pm, q)) ||
 332			   blk_queue_dying(q));
 333		if (blk_queue_dying(q))
 334			return -ENODEV;
 335	}
 336
 
 
 337	return 0;
 338}
 339
 340int __bio_queue_enter(struct request_queue *q, struct bio *bio)
 341{
 342	while (!blk_try_enter_queue(q, false)) {
 343		struct gendisk *disk = bio->bi_bdev->bd_disk;
 344
 345		if (bio->bi_opf & REQ_NOWAIT) {
 346			if (test_bit(GD_DEAD, &disk->state))
 347				goto dead;
 348			bio_wouldblock_error(bio);
 349			return -EAGAIN;
 350		}
 351
 352		/*
 353		 * read pair of barrier in blk_freeze_queue_start(), we need to
 354		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
 355		 * reading .mq_freeze_depth or queue dying flag, otherwise the
 356		 * following wait may never return if the two reads are
 357		 * reordered.
 358		 */
 359		smp_rmb();
 360		wait_event(q->mq_freeze_wq,
 361			   (!q->mq_freeze_depth &&
 362			    blk_pm_resume_queue(false, q)) ||
 363			   test_bit(GD_DEAD, &disk->state));
 364		if (test_bit(GD_DEAD, &disk->state))
 365			goto dead;
 366	}
 367
 
 
 368	return 0;
 369dead:
 370	bio_io_error(bio);
 371	return -ENODEV;
 372}
 373
 374void blk_queue_exit(struct request_queue *q)
 375{
 376	percpu_ref_put(&q->q_usage_counter);
 377}
 378
 379static void blk_queue_usage_counter_release(struct percpu_ref *ref)
 380{
 381	struct request_queue *q =
 382		container_of(ref, struct request_queue, q_usage_counter);
 383
 384	wake_up_all(&q->mq_freeze_wq);
 385}
 386
 387static void blk_rq_timed_out_timer(struct timer_list *t)
 388{
 389	struct request_queue *q = from_timer(q, t, timeout);
 390
 391	kblockd_schedule_work(&q->timeout_work);
 392}
 393
 394static void blk_timeout_work(struct work_struct *work)
 395{
 396}
 397
 398struct request_queue *blk_alloc_queue(int node_id)
 399{
 400	struct request_queue *q;
 
 401
 402	q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
 403				  node_id);
 404	if (!q)
 405		return NULL;
 406
 407	q->last_merge = NULL;
 408
 409	q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
 410	if (q->id < 0)
 
 411		goto fail_q;
 
 412
 413	q->stats = blk_alloc_queue_stats();
 414	if (!q->stats)
 
 415		goto fail_id;
 
 
 
 
 
 
 416
 417	q->node = node_id;
 418
 419	atomic_set(&q->nr_active_requests_shared_tags, 0);
 420
 421	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
 422	INIT_WORK(&q->timeout_work, blk_timeout_work);
 423	INIT_LIST_HEAD(&q->icq_list);
 424
 425	refcount_set(&q->refs, 1);
 426	mutex_init(&q->debugfs_mutex);
 427	mutex_init(&q->sysfs_lock);
 428	mutex_init(&q->sysfs_dir_lock);
 
 
 429	spin_lock_init(&q->queue_lock);
 430
 431	init_waitqueue_head(&q->mq_freeze_wq);
 432	mutex_init(&q->mq_freeze_lock);
 433
 
 
 434	/*
 435	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
 436	 * See blk_register_queue() for details.
 437	 */
 438	if (percpu_ref_init(&q->q_usage_counter,
 439				blk_queue_usage_counter_release,
 440				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
 
 441		goto fail_stats;
 
 
 
 
 
 
 442
 443	blk_set_default_limits(&q->limits);
 444	q->nr_requests = BLKDEV_DEFAULT_RQ;
 445
 446	return q;
 447
 448fail_stats:
 449	blk_free_queue_stats(q->stats);
 450fail_id:
 451	ida_free(&blk_queue_ida, q->id);
 452fail_q:
 453	kmem_cache_free(blk_requestq_cachep, q);
 454	return NULL;
 455}
 456
 457/**
 458 * blk_get_queue - increment the request_queue refcount
 459 * @q: the request_queue structure to increment the refcount for
 460 *
 461 * Increment the refcount of the request_queue kobject.
 462 *
 463 * Context: Any context.
 464 */
 465bool blk_get_queue(struct request_queue *q)
 466{
 467	if (unlikely(blk_queue_dying(q)))
 468		return false;
 469	refcount_inc(&q->refs);
 470	return true;
 471}
 472EXPORT_SYMBOL(blk_get_queue);
 473
 474#ifdef CONFIG_FAIL_MAKE_REQUEST
 475
 476static DECLARE_FAULT_ATTR(fail_make_request);
 477
 478static int __init setup_fail_make_request(char *str)
 479{
 480	return setup_fault_attr(&fail_make_request, str);
 481}
 482__setup("fail_make_request=", setup_fail_make_request);
 483
 484bool should_fail_request(struct block_device *part, unsigned int bytes)
 485{
 486	return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
 
 487}
 488
 489static int __init fail_make_request_debugfs(void)
 490{
 491	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
 492						NULL, &fail_make_request);
 493
 494	return PTR_ERR_OR_ZERO(dir);
 495}
 496
 497late_initcall(fail_make_request_debugfs);
 498#endif /* CONFIG_FAIL_MAKE_REQUEST */
 499
 500static inline void bio_check_ro(struct bio *bio)
 501{
 502	if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
 503		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
 504			return;
 
 
 
 
 
 
 
 
 
 
 505		pr_warn("Trying to write to read-only block-device %pg\n",
 506			bio->bi_bdev);
 507		/* Older lvm-tools actually trigger this */
 508	}
 509}
 510
 511static noinline int should_fail_bio(struct bio *bio)
 512{
 513	if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
 514		return -EIO;
 515	return 0;
 516}
 517ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
 518
 519/*
 520 * Check whether this bio extends beyond the end of the device or partition.
 521 * This may well happen - the kernel calls bread() without checking the size of
 522 * the device, e.g., when mounting a file system.
 523 */
 524static inline int bio_check_eod(struct bio *bio)
 525{
 526	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
 527	unsigned int nr_sectors = bio_sectors(bio);
 528
 529	if (nr_sectors && maxsector &&
 530	    (nr_sectors > maxsector ||
 531	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
 532		pr_info_ratelimited("%s: attempt to access beyond end of device\n"
 533				    "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
 534				    current->comm, bio->bi_bdev, bio->bi_opf,
 535				    bio->bi_iter.bi_sector, nr_sectors, maxsector);
 536		return -EIO;
 537	}
 538	return 0;
 539}
 540
 541/*
 542 * Remap block n of partition p to block n+start(p) of the disk.
 543 */
 544static int blk_partition_remap(struct bio *bio)
 545{
 546	struct block_device *p = bio->bi_bdev;
 547
 548	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
 549		return -EIO;
 550	if (bio_sectors(bio)) {
 551		bio->bi_iter.bi_sector += p->bd_start_sect;
 552		trace_block_bio_remap(bio, p->bd_dev,
 553				      bio->bi_iter.bi_sector -
 554				      p->bd_start_sect);
 555	}
 556	bio_set_flag(bio, BIO_REMAPPED);
 557	return 0;
 558}
 559
 560/*
 561 * Check write append to a zoned block device.
 562 */
 563static inline blk_status_t blk_check_zone_append(struct request_queue *q,
 564						 struct bio *bio)
 565{
 566	int nr_sectors = bio_sectors(bio);
 567
 568	/* Only applicable to zoned block devices */
 569	if (!bdev_is_zoned(bio->bi_bdev))
 570		return BLK_STS_NOTSUPP;
 571
 572	/* The bio sector must point to the start of a sequential zone */
 573	if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) ||
 574	    !bio_zone_is_seq(bio))
 575		return BLK_STS_IOERR;
 576
 577	/*
 578	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
 579	 * split and could result in non-contiguous sectors being written in
 580	 * different zones.
 581	 */
 582	if (nr_sectors > q->limits.chunk_sectors)
 583		return BLK_STS_IOERR;
 584
 585	/* Make sure the BIO is small enough and will not get split */
 586	if (nr_sectors > q->limits.max_zone_append_sectors)
 587		return BLK_STS_IOERR;
 588
 589	bio->bi_opf |= REQ_NOMERGE;
 590
 591	return BLK_STS_OK;
 592}
 593
 594static void __submit_bio(struct bio *bio)
 595{
 596	struct gendisk *disk = bio->bi_bdev->bd_disk;
 
 597
 598	if (unlikely(!blk_crypto_bio_prep(&bio)))
 599		return;
 600
 601	if (!disk->fops->submit_bio) {
 
 
 602		blk_mq_submit_bio(bio);
 603	} else if (likely(bio_queue_enter(bio) == 0)) {
 604		disk->fops->submit_bio(bio);
 
 
 
 
 
 
 
 
 605		blk_queue_exit(disk->queue);
 606	}
 
 
 607}
 608
 609/*
 610 * The loop in this function may be a bit non-obvious, and so deserves some
 611 * explanation:
 612 *
 613 *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
 614 *    that), so we have a list with a single bio.
 615 *  - We pretend that we have just taken it off a longer list, so we assign
 616 *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
 617 *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
 618 *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
 619 *    non-NULL value in bio_list and re-enter the loop from the top.
 620 *  - In this case we really did just take the bio of the top of the list (no
 621 *    pretending) and so remove it from bio_list, and call into ->submit_bio()
 622 *    again.
 623 *
 624 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
 625 * bio_list_on_stack[1] contains bios that were submitted before the current
 626 *	->submit_bio, but that haven't been processed yet.
 627 */
 628static void __submit_bio_noacct(struct bio *bio)
 629{
 630	struct bio_list bio_list_on_stack[2];
 631
 632	BUG_ON(bio->bi_next);
 633
 634	bio_list_init(&bio_list_on_stack[0]);
 635	current->bio_list = bio_list_on_stack;
 636
 637	do {
 638		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 639		struct bio_list lower, same;
 640
 641		/*
 642		 * Create a fresh bio_list for all subordinate requests.
 643		 */
 644		bio_list_on_stack[1] = bio_list_on_stack[0];
 645		bio_list_init(&bio_list_on_stack[0]);
 646
 647		__submit_bio(bio);
 648
 649		/*
 650		 * Sort new bios into those for a lower level and those for the
 651		 * same level.
 652		 */
 653		bio_list_init(&lower);
 654		bio_list_init(&same);
 655		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
 656			if (q == bdev_get_queue(bio->bi_bdev))
 657				bio_list_add(&same, bio);
 658			else
 659				bio_list_add(&lower, bio);
 660
 661		/*
 662		 * Now assemble so we handle the lowest level first.
 663		 */
 664		bio_list_merge(&bio_list_on_stack[0], &lower);
 665		bio_list_merge(&bio_list_on_stack[0], &same);
 666		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
 667	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
 668
 669	current->bio_list = NULL;
 670}
 671
 672static void __submit_bio_noacct_mq(struct bio *bio)
 673{
 674	struct bio_list bio_list[2] = { };
 675
 676	current->bio_list = bio_list;
 677
 678	do {
 679		__submit_bio(bio);
 680	} while ((bio = bio_list_pop(&bio_list[0])));
 681
 682	current->bio_list = NULL;
 683}
 684
 685void submit_bio_noacct_nocheck(struct bio *bio)
 686{
 
 
 
 
 
 
 
 
 
 
 
 
 687	/*
 688	 * We only want one ->submit_bio to be active at a time, else stack
 689	 * usage with stacked devices could be a problem.  Use current->bio_list
 690	 * to collect a list of requests submited by a ->submit_bio method while
 691	 * it is active, and then process them after it returned.
 692	 */
 693	if (current->bio_list)
 694		bio_list_add(&current->bio_list[0], bio);
 695	else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
 696		__submit_bio_noacct_mq(bio);
 697	else
 698		__submit_bio_noacct(bio);
 699}
 700
 
 
 
 
 
 
 
 
 
 
 
 
 701/**
 702 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
 703 * @bio:  The bio describing the location in memory and on the device.
 704 *
 705 * This is a version of submit_bio() that shall only be used for I/O that is
 706 * resubmitted to lower level drivers by stacking block drivers.  All file
 707 * systems and other upper level users of the block layer should use
 708 * submit_bio() instead.
 709 */
 710void submit_bio_noacct(struct bio *bio)
 711{
 712	struct block_device *bdev = bio->bi_bdev;
 713	struct request_queue *q = bdev_get_queue(bdev);
 714	blk_status_t status = BLK_STS_IOERR;
 715	struct blk_plug *plug;
 716
 717	might_sleep();
 718
 719	plug = blk_mq_plug(bio);
 720	if (plug && plug->nowait)
 721		bio->bi_opf |= REQ_NOWAIT;
 722
 723	/*
 724	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
 725	 * if queue does not support NOWAIT.
 726	 */
 727	if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
 728		goto not_supported;
 729
 730	if (should_fail_bio(bio))
 731		goto end_io;
 732	bio_check_ro(bio);
 733	if (!bio_flagged(bio, BIO_REMAPPED)) {
 734		if (unlikely(bio_check_eod(bio)))
 735			goto end_io;
 736		if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
 
 737			goto end_io;
 738	}
 739
 740	/*
 741	 * Filter flush bio's early so that bio based drivers without flush
 742	 * support don't have to worry about them.
 743	 */
 744	if (op_is_flush(bio->bi_opf) &&
 745	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
 746		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
 747		if (!bio_sectors(bio)) {
 748			status = BLK_STS_OK;
 749			goto end_io;
 
 
 
 
 
 
 750		}
 751	}
 752
 753	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
 754		bio_clear_polled(bio);
 755
 756	switch (bio_op(bio)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 757	case REQ_OP_DISCARD:
 758		if (!bdev_max_discard_sectors(bdev))
 759			goto not_supported;
 760		break;
 761	case REQ_OP_SECURE_ERASE:
 762		if (!bdev_max_secure_erase_sectors(bdev))
 763			goto not_supported;
 764		break;
 765	case REQ_OP_ZONE_APPEND:
 766		status = blk_check_zone_append(q, bio);
 767		if (status != BLK_STS_OK)
 768			goto end_io;
 769		break;
 
 
 
 
 770	case REQ_OP_ZONE_RESET:
 771	case REQ_OP_ZONE_OPEN:
 772	case REQ_OP_ZONE_CLOSE:
 773	case REQ_OP_ZONE_FINISH:
 774		if (!bdev_is_zoned(bio->bi_bdev))
 775			goto not_supported;
 776		break;
 777	case REQ_OP_ZONE_RESET_ALL:
 778		if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
 779			goto not_supported;
 780		break;
 781	case REQ_OP_WRITE_ZEROES:
 782		if (!q->limits.max_write_zeroes_sectors)
 783			goto not_supported;
 784		break;
 
 
 
 
 
 
 
 785	default:
 786		break;
 787	}
 788
 789	if (blk_throtl_bio(bio))
 790		return;
 791
 792	blk_cgroup_bio_start(bio);
 793	blkcg_bio_issue_init(bio);
 794
 795	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
 796		trace_block_bio_queue(bio);
 797		/* Now that enqueuing has been traced, we need to trace
 798		 * completion as well.
 799		 */
 800		bio_set_flag(bio, BIO_TRACE_COMPLETION);
 801	}
 802	submit_bio_noacct_nocheck(bio);
 803	return;
 804
 805not_supported:
 806	status = BLK_STS_NOTSUPP;
 807end_io:
 808	bio->bi_status = status;
 809	bio_endio(bio);
 810}
 811EXPORT_SYMBOL(submit_bio_noacct);
 812
 
 
 
 
 
 
 
 
 813/**
 814 * submit_bio - submit a bio to the block device layer for I/O
 815 * @bio: The &struct bio which describes the I/O
 816 *
 817 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 818 * fully set up &struct bio that describes the I/O that needs to be done.  The
 819 * bio will be send to the device described by the bi_bdev field.
 820 *
 821 * The success/failure status of the request, along with notification of
 822 * completion, is delivered asynchronously through the ->bi_end_io() callback
 823 * in @bio.  The bio must NOT be touched by the caller until ->bi_end_io() has
 824 * been called.
 825 */
 826void submit_bio(struct bio *bio)
 827{
 828	if (blkcg_punt_bio_submit(bio))
 829		return;
 830
 831	if (bio_op(bio) == REQ_OP_READ) {
 832		task_io_account_read(bio->bi_iter.bi_size);
 833		count_vm_events(PGPGIN, bio_sectors(bio));
 834	} else if (bio_op(bio) == REQ_OP_WRITE) {
 835		count_vm_events(PGPGOUT, bio_sectors(bio));
 836	}
 837
 
 838	submit_bio_noacct(bio);
 839}
 840EXPORT_SYMBOL(submit_bio);
 841
 842/**
 843 * bio_poll - poll for BIO completions
 844 * @bio: bio to poll for
 845 * @iob: batches of IO
 846 * @flags: BLK_POLL_* flags that control the behavior
 847 *
 848 * Poll for completions on queue associated with the bio. Returns number of
 849 * completed entries found.
 850 *
 851 * Note: the caller must either be the context that submitted @bio, or
 852 * be in a RCU critical section to prevent freeing of @bio.
 853 */
 854int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
 855{
 856	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 857	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
 
 
 858	int ret = 0;
 859
 860	if (cookie == BLK_QC_T_NONE ||
 861	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
 
 
 
 
 862		return 0;
 863
 864	/*
 865	 * As the requests that require a zone lock are not plugged in the
 866	 * first place, directly accessing the plug instead of using
 867	 * blk_mq_plug() should not have any consequences during flushing for
 868	 * zoned devices.
 869	 */
 870	blk_flush_plug(current->plug, false);
 871
 872	if (bio_queue_enter(bio))
 
 
 
 
 
 
 
 
 
 873		return 0;
 874	if (queue_is_mq(q)) {
 875		ret = blk_mq_poll(q, cookie, iob, flags);
 876	} else {
 877		struct gendisk *disk = q->disk;
 878
 879		if (disk && disk->fops->poll_bio)
 
 880			ret = disk->fops->poll_bio(bio, iob, flags);
 881	}
 882	blk_queue_exit(q);
 883	return ret;
 884}
 885EXPORT_SYMBOL_GPL(bio_poll);
 886
 887/*
 888 * Helper to implement file_operations.iopoll.  Requires the bio to be stored
 889 * in iocb->private, and cleared before freeing the bio.
 890 */
 891int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
 892		    unsigned int flags)
 893{
 894	struct bio *bio;
 895	int ret = 0;
 896
 897	/*
 898	 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
 899	 * point to a freshly allocated bio at this point.  If that happens
 900	 * we have a few cases to consider:
 901	 *
 902	 *  1) the bio is beeing initialized and bi_bdev is NULL.  We can just
 903	 *     simply nothing in this case
 904	 *  2) the bio points to a not poll enabled device.  bio_poll will catch
 905	 *     this and return 0
 906	 *  3) the bio points to a poll capable device, including but not
 907	 *     limited to the one that the original bio pointed to.  In this
 908	 *     case we will call into the actual poll method and poll for I/O,
 909	 *     even if we don't need to, but it won't cause harm either.
 910	 *
 911	 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
 912	 * is still allocated. Because partitions hold a reference to the whole
 913	 * device bdev and thus disk, the disk is also still valid.  Grabbing
 914	 * a reference to the queue in bio_poll() ensures the hctxs and requests
 915	 * are still valid as well.
 916	 */
 917	rcu_read_lock();
 918	bio = READ_ONCE(kiocb->private);
 919	if (bio && bio->bi_bdev)
 920		ret = bio_poll(bio, iob, flags);
 921	rcu_read_unlock();
 922
 923	return ret;
 924}
 925EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
 926
 927void update_io_ticks(struct block_device *part, unsigned long now, bool end)
 928{
 929	unsigned long stamp;
 930again:
 931	stamp = READ_ONCE(part->bd_stamp);
 932	if (unlikely(time_after(now, stamp))) {
 933		if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
 934			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
 935	}
 936	if (part->bd_partno) {
 
 937		part = bdev_whole(part);
 938		goto again;
 939	}
 940}
 941
 942unsigned long bdev_start_io_acct(struct block_device *bdev,
 943				 unsigned int sectors, enum req_op op,
 944				 unsigned long start_time)
 945{
 946	const int sgrp = op_stat_group(op);
 947
 948	part_stat_lock();
 949	update_io_ticks(bdev, start_time, false);
 950	part_stat_inc(bdev, ios[sgrp]);
 951	part_stat_add(bdev, sectors[sgrp], sectors);
 952	part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
 953	part_stat_unlock();
 954
 955	return start_time;
 956}
 957EXPORT_SYMBOL(bdev_start_io_acct);
 958
 959/**
 960 * bio_start_io_acct - start I/O accounting for bio based drivers
 961 * @bio:	bio to start account for
 962 *
 963 * Returns the start time that should be passed back to bio_end_io_acct().
 964 */
 965unsigned long bio_start_io_acct(struct bio *bio)
 966{
 967	return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
 968				  bio_op(bio), jiffies);
 969}
 970EXPORT_SYMBOL_GPL(bio_start_io_acct);
 971
 972void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
 973		      unsigned long start_time)
 974{
 975	const int sgrp = op_stat_group(op);
 976	unsigned long now = READ_ONCE(jiffies);
 977	unsigned long duration = now - start_time;
 978
 979	part_stat_lock();
 980	update_io_ticks(bdev, now, true);
 
 
 981	part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
 982	part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
 983	part_stat_unlock();
 984}
 985EXPORT_SYMBOL(bdev_end_io_acct);
 986
 987void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
 988			      struct block_device *orig_bdev)
 989{
 990	bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
 991}
 992EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
 993
 994/**
 995 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
 996 * @q : the queue of the device being checked
 997 *
 998 * Description:
 999 *    Check if underlying low-level drivers of a device are busy.
1000 *    If the drivers want to export their busy state, they must set own
1001 *    exporting function using blk_queue_lld_busy() first.
1002 *
1003 *    Basically, this function is used only by request stacking drivers
1004 *    to stop dispatching requests to underlying devices when underlying
1005 *    devices are busy.  This behavior helps more I/O merging on the queue
1006 *    of the request stacking driver and prevents I/O throughput regression
1007 *    on burst I/O load.
1008 *
1009 * Return:
1010 *    0 - Not busy (The request stacking driver should dispatch request)
1011 *    1 - Busy (The request stacking driver should stop dispatching request)
1012 */
1013int blk_lld_busy(struct request_queue *q)
1014{
1015	if (queue_is_mq(q) && q->mq_ops->busy)
1016		return q->mq_ops->busy(q);
1017
1018	return 0;
1019}
1020EXPORT_SYMBOL_GPL(blk_lld_busy);
1021
1022int kblockd_schedule_work(struct work_struct *work)
1023{
1024	return queue_work(kblockd_workqueue, work);
1025}
1026EXPORT_SYMBOL(kblockd_schedule_work);
1027
1028int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1029				unsigned long delay)
1030{
1031	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1032}
1033EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1034
1035void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1036{
1037	struct task_struct *tsk = current;
1038
1039	/*
1040	 * If this is a nested plug, don't actually assign it.
1041	 */
1042	if (tsk->plug)
1043		return;
1044
1045	plug->mq_list = NULL;
1046	plug->cached_rq = NULL;
 
1047	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1048	plug->rq_count = 0;
1049	plug->multiple_queues = false;
1050	plug->has_elevator = false;
1051	plug->nowait = false;
1052	INIT_LIST_HEAD(&plug->cb_list);
1053
1054	/*
1055	 * Store ordering should not be needed here, since a potential
1056	 * preempt will imply a full memory barrier
1057	 */
1058	tsk->plug = plug;
1059}
1060
1061/**
1062 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1063 * @plug:	The &struct blk_plug that needs to be initialized
1064 *
1065 * Description:
1066 *   blk_start_plug() indicates to the block layer an intent by the caller
1067 *   to submit multiple I/O requests in a batch.  The block layer may use
1068 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
1069 *   is called.  However, the block layer may choose to submit requests
1070 *   before a call to blk_finish_plug() if the number of queued I/Os
1071 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1072 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
1073 *   the task schedules (see below).
1074 *
1075 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
1076 *   pending I/O should the task end up blocking between blk_start_plug() and
1077 *   blk_finish_plug(). This is important from a performance perspective, but
1078 *   also ensures that we don't deadlock. For instance, if the task is blocking
1079 *   for a memory allocation, memory reclaim could end up wanting to free a
1080 *   page belonging to that request that is currently residing in our private
1081 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
1082 *   this kind of deadlock.
1083 */
1084void blk_start_plug(struct blk_plug *plug)
1085{
1086	blk_start_plug_nr_ios(plug, 1);
1087}
1088EXPORT_SYMBOL(blk_start_plug);
1089
1090static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1091{
1092	LIST_HEAD(callbacks);
1093
1094	while (!list_empty(&plug->cb_list)) {
1095		list_splice_init(&plug->cb_list, &callbacks);
1096
1097		while (!list_empty(&callbacks)) {
1098			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1099							  struct blk_plug_cb,
1100							  list);
1101			list_del(&cb->list);
1102			cb->callback(cb, from_schedule);
1103		}
1104	}
1105}
1106
1107struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1108				      int size)
1109{
1110	struct blk_plug *plug = current->plug;
1111	struct blk_plug_cb *cb;
1112
1113	if (!plug)
1114		return NULL;
1115
1116	list_for_each_entry(cb, &plug->cb_list, list)
1117		if (cb->callback == unplug && cb->data == data)
1118			return cb;
1119
1120	/* Not currently on the callback list */
1121	BUG_ON(size < sizeof(*cb));
1122	cb = kzalloc(size, GFP_ATOMIC);
1123	if (cb) {
1124		cb->data = data;
1125		cb->callback = unplug;
1126		list_add(&cb->list, &plug->cb_list);
1127	}
1128	return cb;
1129}
1130EXPORT_SYMBOL(blk_check_plugged);
1131
1132void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1133{
1134	if (!list_empty(&plug->cb_list))
1135		flush_plug_callbacks(plug, from_schedule);
1136	if (!rq_list_empty(plug->mq_list))
1137		blk_mq_flush_plug_list(plug, from_schedule);
1138	/*
1139	 * Unconditionally flush out cached requests, even if the unplug
1140	 * event came from schedule. Since we know hold references to the
1141	 * queue for cached requests, we don't want a blocked task holding
1142	 * up a queue freeze/quiesce event.
1143	 */
1144	if (unlikely(!rq_list_empty(plug->cached_rq)))
1145		blk_mq_free_plug_rqs(plug);
 
 
 
1146}
1147
1148/**
1149 * blk_finish_plug - mark the end of a batch of submitted I/O
1150 * @plug:	The &struct blk_plug passed to blk_start_plug()
1151 *
1152 * Description:
1153 * Indicate that a batch of I/O submissions is complete.  This function
1154 * must be paired with an initial call to blk_start_plug().  The intent
1155 * is to allow the block layer to optimize I/O submission.  See the
1156 * documentation for blk_start_plug() for more information.
1157 */
1158void blk_finish_plug(struct blk_plug *plug)
1159{
1160	if (plug == current->plug) {
1161		__blk_flush_plug(plug, false);
1162		current->plug = NULL;
1163	}
1164}
1165EXPORT_SYMBOL(blk_finish_plug);
1166
1167void blk_io_schedule(void)
1168{
1169	/* Prevent hang_check timer from firing at us during very long I/O */
1170	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1171
1172	if (timeout)
1173		io_schedule_timeout(timeout);
1174	else
1175		io_schedule();
1176}
1177EXPORT_SYMBOL_GPL(blk_io_schedule);
1178
1179int __init blk_dev_init(void)
1180{
1181	BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
1182	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1183			sizeof_field(struct request, cmd_flags));
1184	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1185			sizeof_field(struct bio, bi_opf));
1186
1187	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
1188	kblockd_workqueue = alloc_workqueue("kblockd",
1189					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1190	if (!kblockd_workqueue)
1191		panic("Failed to create kblockd\n");
1192
1193	blk_requestq_cachep = kmem_cache_create("request_queue",
1194			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1195
1196	blk_debugfs_root = debugfs_create_dir("block", NULL);
1197
1198	return 0;
1199}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 1991, 1992 Linus Torvalds
   4 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   5 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
   7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   8 *	-  July2000
   9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
  10 */
  11
  12/*
  13 * This handles all read/write requests to block devices
  14 */
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/bio.h>
  18#include <linux/blkdev.h>
  19#include <linux/blk-pm.h>
  20#include <linux/blk-integrity.h>
  21#include <linux/highmem.h>
  22#include <linux/mm.h>
  23#include <linux/pagemap.h>
  24#include <linux/kernel_stat.h>
  25#include <linux/string.h>
  26#include <linux/init.h>
  27#include <linux/completion.h>
  28#include <linux/slab.h>
  29#include <linux/swap.h>
  30#include <linux/writeback.h>
  31#include <linux/task_io_accounting_ops.h>
  32#include <linux/fault-inject.h>
  33#include <linux/list_sort.h>
  34#include <linux/delay.h>
  35#include <linux/ratelimit.h>
  36#include <linux/pm_runtime.h>
  37#include <linux/t10-pi.h>
  38#include <linux/debugfs.h>
  39#include <linux/bpf.h>
  40#include <linux/part_stat.h>
  41#include <linux/sched/sysctl.h>
  42#include <linux/blk-crypto.h>
  43
  44#define CREATE_TRACE_POINTS
  45#include <trace/events/block.h>
  46
  47#include "blk.h"
  48#include "blk-mq-sched.h"
  49#include "blk-pm.h"
  50#include "blk-cgroup.h"
  51#include "blk-throttle.h"
  52#include "blk-ioprio.h"
  53
  54struct dentry *blk_debugfs_root;
  55
  56EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
  57EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
  58EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
  59EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
  60EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
  61EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
  62
  63static DEFINE_IDA(blk_queue_ida);
  64
  65/*
  66 * For queue allocation
  67 */
  68static struct kmem_cache *blk_requestq_cachep;
  69
  70/*
  71 * Controlling structure to kblockd
  72 */
  73static struct workqueue_struct *kblockd_workqueue;
  74
  75/**
  76 * blk_queue_flag_set - atomically set a queue flag
  77 * @flag: flag to be set
  78 * @q: request queue
  79 */
  80void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
  81{
  82	set_bit(flag, &q->queue_flags);
  83}
  84EXPORT_SYMBOL(blk_queue_flag_set);
  85
  86/**
  87 * blk_queue_flag_clear - atomically clear a queue flag
  88 * @flag: flag to be cleared
  89 * @q: request queue
  90 */
  91void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
  92{
  93	clear_bit(flag, &q->queue_flags);
  94}
  95EXPORT_SYMBOL(blk_queue_flag_clear);
  96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  97#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
  98static const char *const blk_op_name[] = {
  99	REQ_OP_NAME(READ),
 100	REQ_OP_NAME(WRITE),
 101	REQ_OP_NAME(FLUSH),
 102	REQ_OP_NAME(DISCARD),
 103	REQ_OP_NAME(SECURE_ERASE),
 104	REQ_OP_NAME(ZONE_RESET),
 105	REQ_OP_NAME(ZONE_RESET_ALL),
 106	REQ_OP_NAME(ZONE_OPEN),
 107	REQ_OP_NAME(ZONE_CLOSE),
 108	REQ_OP_NAME(ZONE_FINISH),
 109	REQ_OP_NAME(ZONE_APPEND),
 110	REQ_OP_NAME(WRITE_ZEROES),
 111	REQ_OP_NAME(DRV_IN),
 112	REQ_OP_NAME(DRV_OUT),
 113};
 114#undef REQ_OP_NAME
 115
 116/**
 117 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 118 * @op: REQ_OP_XXX.
 119 *
 120 * Description: Centralize block layer function to convert REQ_OP_XXX into
 121 * string format. Useful in the debugging and tracing bio or request. For
 122 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 123 */
 124inline const char *blk_op_str(enum req_op op)
 125{
 126	const char *op_str = "UNKNOWN";
 127
 128	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
 129		op_str = blk_op_name[op];
 130
 131	return op_str;
 132}
 133EXPORT_SYMBOL_GPL(blk_op_str);
 134
 135static const struct {
 136	int		errno;
 137	const char	*name;
 138} blk_errors[] = {
 139	[BLK_STS_OK]		= { 0,		"" },
 140	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
 141	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
 142	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
 143	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
 144	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
 145	[BLK_STS_RESV_CONFLICT]	= { -EBADE,	"reservation conflict" },
 146	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
 147	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
 148	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
 149	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
 150	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
 151	[BLK_STS_OFFLINE]	= { -ENODEV,	"device offline" },
 152
 153	/* device mapper special case, should not leak out: */
 154	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
 155
 156	/* zone device specific errors */
 157	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
 158	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },
 159
 160	/* Command duration limit device-side timeout */
 161	[BLK_STS_DURATION_LIMIT]	= { -ETIME, "duration limit exceeded" },
 162
 163	[BLK_STS_INVAL]		= { -EINVAL,	"invalid" },
 164
 165	/* everything else not covered above: */
 166	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
 167};
 168
 169blk_status_t errno_to_blk_status(int errno)
 170{
 171	int i;
 172
 173	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
 174		if (blk_errors[i].errno == errno)
 175			return (__force blk_status_t)i;
 176	}
 177
 178	return BLK_STS_IOERR;
 179}
 180EXPORT_SYMBOL_GPL(errno_to_blk_status);
 181
 182int blk_status_to_errno(blk_status_t status)
 183{
 184	int idx = (__force int)status;
 185
 186	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
 187		return -EIO;
 188	return blk_errors[idx].errno;
 189}
 190EXPORT_SYMBOL_GPL(blk_status_to_errno);
 191
 192const char *blk_status_to_str(blk_status_t status)
 193{
 194	int idx = (__force int)status;
 195
 196	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
 197		return "<null>";
 198	return blk_errors[idx].name;
 199}
 200EXPORT_SYMBOL_GPL(blk_status_to_str);
 201
 202/**
 203 * blk_sync_queue - cancel any pending callbacks on a queue
 204 * @q: the queue
 205 *
 206 * Description:
 207 *     The block layer may perform asynchronous callback activity
 208 *     on a queue, such as calling the unplug function after a timeout.
 209 *     A block device may call blk_sync_queue to ensure that any
 210 *     such activity is cancelled, thus allowing it to release resources
 211 *     that the callbacks might use. The caller must already have made sure
 212 *     that its ->submit_bio will not re-add plugging prior to calling
 213 *     this function.
 214 *
 215 *     This function does not cancel any asynchronous activity arising
 216 *     out of elevator or throttling code. That would require elevator_exit()
 217 *     and blkcg_exit_queue() to be called with queue lock initialized.
 218 *
 219 */
 220void blk_sync_queue(struct request_queue *q)
 221{
 222	del_timer_sync(&q->timeout);
 223	cancel_work_sync(&q->timeout_work);
 224}
 225EXPORT_SYMBOL(blk_sync_queue);
 226
 227/**
 228 * blk_set_pm_only - increment pm_only counter
 229 * @q: request queue pointer
 230 */
 231void blk_set_pm_only(struct request_queue *q)
 232{
 233	atomic_inc(&q->pm_only);
 234}
 235EXPORT_SYMBOL_GPL(blk_set_pm_only);
 236
 237void blk_clear_pm_only(struct request_queue *q)
 238{
 239	int pm_only;
 240
 241	pm_only = atomic_dec_return(&q->pm_only);
 242	WARN_ON_ONCE(pm_only < 0);
 243	if (pm_only == 0)
 244		wake_up_all(&q->mq_freeze_wq);
 245}
 246EXPORT_SYMBOL_GPL(blk_clear_pm_only);
 247
 248static void blk_free_queue_rcu(struct rcu_head *rcu_head)
 249{
 250	struct request_queue *q = container_of(rcu_head,
 251			struct request_queue, rcu_head);
 252
 253	percpu_ref_exit(&q->q_usage_counter);
 254	kmem_cache_free(blk_requestq_cachep, q);
 255}
 256
 257static void blk_free_queue(struct request_queue *q)
 258{
 
 
 
 
 259	blk_free_queue_stats(q->stats);
 
 
 260	if (queue_is_mq(q))
 261		blk_mq_release(q);
 262
 263	ida_free(&blk_queue_ida, q->id);
 264	lockdep_unregister_key(&q->io_lock_cls_key);
 265	lockdep_unregister_key(&q->q_lock_cls_key);
 266	call_rcu(&q->rcu_head, blk_free_queue_rcu);
 267}
 268
 269/**
 270 * blk_put_queue - decrement the request_queue refcount
 271 * @q: the request_queue structure to decrement the refcount for
 272 *
 273 * Decrements the refcount of the request_queue and free it when the refcount
 274 * reaches 0.
 275 */
 276void blk_put_queue(struct request_queue *q)
 277{
 278	if (refcount_dec_and_test(&q->refs))
 279		blk_free_queue(q);
 280}
 281EXPORT_SYMBOL(blk_put_queue);
 282
 283bool blk_queue_start_drain(struct request_queue *q)
 284{
 285	/*
 286	 * When queue DYING flag is set, we need to block new req
 287	 * entering queue, so we call blk_freeze_queue_start() to
 288	 * prevent I/O from crossing blk_queue_enter().
 289	 */
 290	bool freeze = __blk_freeze_queue_start(q, current);
 291	if (queue_is_mq(q))
 292		blk_mq_wake_waiters(q);
 293	/* Make blk_queue_enter() reexamine the DYING flag. */
 294	wake_up_all(&q->mq_freeze_wq);
 295
 296	return freeze;
 297}
 298
 299/**
 300 * blk_queue_enter() - try to increase q->q_usage_counter
 301 * @q: request queue pointer
 302 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
 303 */
 304int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 305{
 306	const bool pm = flags & BLK_MQ_REQ_PM;
 307
 308	while (!blk_try_enter_queue(q, pm)) {
 309		if (flags & BLK_MQ_REQ_NOWAIT)
 310			return -EAGAIN;
 311
 312		/*
 313		 * read pair of barrier in blk_freeze_queue_start(), we need to
 314		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
 315		 * reading .mq_freeze_depth or queue dying flag, otherwise the
 316		 * following wait may never return if the two reads are
 317		 * reordered.
 318		 */
 319		smp_rmb();
 320		wait_event(q->mq_freeze_wq,
 321			   (!q->mq_freeze_depth &&
 322			    blk_pm_resume_queue(pm, q)) ||
 323			   blk_queue_dying(q));
 324		if (blk_queue_dying(q))
 325			return -ENODEV;
 326	}
 327
 328	rwsem_acquire_read(&q->q_lockdep_map, 0, 0, _RET_IP_);
 329	rwsem_release(&q->q_lockdep_map, _RET_IP_);
 330	return 0;
 331}
 332
 333int __bio_queue_enter(struct request_queue *q, struct bio *bio)
 334{
 335	while (!blk_try_enter_queue(q, false)) {
 336		struct gendisk *disk = bio->bi_bdev->bd_disk;
 337
 338		if (bio->bi_opf & REQ_NOWAIT) {
 339			if (test_bit(GD_DEAD, &disk->state))
 340				goto dead;
 341			bio_wouldblock_error(bio);
 342			return -EAGAIN;
 343		}
 344
 345		/*
 346		 * read pair of barrier in blk_freeze_queue_start(), we need to
 347		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
 348		 * reading .mq_freeze_depth or queue dying flag, otherwise the
 349		 * following wait may never return if the two reads are
 350		 * reordered.
 351		 */
 352		smp_rmb();
 353		wait_event(q->mq_freeze_wq,
 354			   (!q->mq_freeze_depth &&
 355			    blk_pm_resume_queue(false, q)) ||
 356			   test_bit(GD_DEAD, &disk->state));
 357		if (test_bit(GD_DEAD, &disk->state))
 358			goto dead;
 359	}
 360
 361	rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
 362	rwsem_release(&q->io_lockdep_map, _RET_IP_);
 363	return 0;
 364dead:
 365	bio_io_error(bio);
 366	return -ENODEV;
 367}
 368
 369void blk_queue_exit(struct request_queue *q)
 370{
 371	percpu_ref_put(&q->q_usage_counter);
 372}
 373
 374static void blk_queue_usage_counter_release(struct percpu_ref *ref)
 375{
 376	struct request_queue *q =
 377		container_of(ref, struct request_queue, q_usage_counter);
 378
 379	wake_up_all(&q->mq_freeze_wq);
 380}
 381
 382static void blk_rq_timed_out_timer(struct timer_list *t)
 383{
 384	struct request_queue *q = from_timer(q, t, timeout);
 385
 386	kblockd_schedule_work(&q->timeout_work);
 387}
 388
 389static void blk_timeout_work(struct work_struct *work)
 390{
 391}
 392
 393struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
 394{
 395	struct request_queue *q;
 396	int error;
 397
 398	q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
 399				  node_id);
 400	if (!q)
 401		return ERR_PTR(-ENOMEM);
 402
 403	q->last_merge = NULL;
 404
 405	q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
 406	if (q->id < 0) {
 407		error = q->id;
 408		goto fail_q;
 409	}
 410
 411	q->stats = blk_alloc_queue_stats();
 412	if (!q->stats) {
 413		error = -ENOMEM;
 414		goto fail_id;
 415	}
 416
 417	error = blk_set_default_limits(lim);
 418	if (error)
 419		goto fail_stats;
 420	q->limits = *lim;
 421
 422	q->node = node_id;
 423
 424	atomic_set(&q->nr_active_requests_shared_tags, 0);
 425
 426	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
 427	INIT_WORK(&q->timeout_work, blk_timeout_work);
 428	INIT_LIST_HEAD(&q->icq_list);
 429
 430	refcount_set(&q->refs, 1);
 431	mutex_init(&q->debugfs_mutex);
 432	mutex_init(&q->sysfs_lock);
 433	mutex_init(&q->sysfs_dir_lock);
 434	mutex_init(&q->limits_lock);
 435	mutex_init(&q->rq_qos_mutex);
 436	spin_lock_init(&q->queue_lock);
 437
 438	init_waitqueue_head(&q->mq_freeze_wq);
 439	mutex_init(&q->mq_freeze_lock);
 440
 441	blkg_init_queue(q);
 442
 443	/*
 444	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
 445	 * See blk_register_queue() for details.
 446	 */
 447	error = percpu_ref_init(&q->q_usage_counter,
 448				blk_queue_usage_counter_release,
 449				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
 450	if (error)
 451		goto fail_stats;
 452	lockdep_register_key(&q->io_lock_cls_key);
 453	lockdep_register_key(&q->q_lock_cls_key);
 454	lockdep_init_map(&q->io_lockdep_map, "&q->q_usage_counter(io)",
 455			 &q->io_lock_cls_key, 0);
 456	lockdep_init_map(&q->q_lockdep_map, "&q->q_usage_counter(queue)",
 457			 &q->q_lock_cls_key, 0);
 458
 
 459	q->nr_requests = BLKDEV_DEFAULT_RQ;
 460
 461	return q;
 462
 463fail_stats:
 464	blk_free_queue_stats(q->stats);
 465fail_id:
 466	ida_free(&blk_queue_ida, q->id);
 467fail_q:
 468	kmem_cache_free(blk_requestq_cachep, q);
 469	return ERR_PTR(error);
 470}
 471
 472/**
 473 * blk_get_queue - increment the request_queue refcount
 474 * @q: the request_queue structure to increment the refcount for
 475 *
 476 * Increment the refcount of the request_queue kobject.
 477 *
 478 * Context: Any context.
 479 */
 480bool blk_get_queue(struct request_queue *q)
 481{
 482	if (unlikely(blk_queue_dying(q)))
 483		return false;
 484	refcount_inc(&q->refs);
 485	return true;
 486}
 487EXPORT_SYMBOL(blk_get_queue);
 488
 489#ifdef CONFIG_FAIL_MAKE_REQUEST
 490
 491static DECLARE_FAULT_ATTR(fail_make_request);
 492
 493static int __init setup_fail_make_request(char *str)
 494{
 495	return setup_fault_attr(&fail_make_request, str);
 496}
 497__setup("fail_make_request=", setup_fail_make_request);
 498
 499bool should_fail_request(struct block_device *part, unsigned int bytes)
 500{
 501	return bdev_test_flag(part, BD_MAKE_IT_FAIL) &&
 502	       should_fail(&fail_make_request, bytes);
 503}
 504
 505static int __init fail_make_request_debugfs(void)
 506{
 507	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
 508						NULL, &fail_make_request);
 509
 510	return PTR_ERR_OR_ZERO(dir);
 511}
 512
 513late_initcall(fail_make_request_debugfs);
 514#endif /* CONFIG_FAIL_MAKE_REQUEST */
 515
 516static inline void bio_check_ro(struct bio *bio)
 517{
 518	if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
 519		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
 520			return;
 521
 522		if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED))
 523			return;
 524
 525		bdev_set_flag(bio->bi_bdev, BD_RO_WARNED);
 526
 527		/*
 528		 * Use ioctl to set underlying disk of raid/dm to read-only
 529		 * will trigger this.
 530		 */
 531		pr_warn("Trying to write to read-only block-device %pg\n",
 532			bio->bi_bdev);
 
 533	}
 534}
 535
 536static noinline int should_fail_bio(struct bio *bio)
 537{
 538	if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
 539		return -EIO;
 540	return 0;
 541}
 542ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
 543
 544/*
 545 * Check whether this bio extends beyond the end of the device or partition.
 546 * This may well happen - the kernel calls bread() without checking the size of
 547 * the device, e.g., when mounting a file system.
 548 */
 549static inline int bio_check_eod(struct bio *bio)
 550{
 551	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
 552	unsigned int nr_sectors = bio_sectors(bio);
 553
 554	if (nr_sectors &&
 555	    (nr_sectors > maxsector ||
 556	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
 557		pr_info_ratelimited("%s: attempt to access beyond end of device\n"
 558				    "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
 559				    current->comm, bio->bi_bdev, bio->bi_opf,
 560				    bio->bi_iter.bi_sector, nr_sectors, maxsector);
 561		return -EIO;
 562	}
 563	return 0;
 564}
 565
 566/*
 567 * Remap block n of partition p to block n+start(p) of the disk.
 568 */
 569static int blk_partition_remap(struct bio *bio)
 570{
 571	struct block_device *p = bio->bi_bdev;
 572
 573	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
 574		return -EIO;
 575	if (bio_sectors(bio)) {
 576		bio->bi_iter.bi_sector += p->bd_start_sect;
 577		trace_block_bio_remap(bio, p->bd_dev,
 578				      bio->bi_iter.bi_sector -
 579				      p->bd_start_sect);
 580	}
 581	bio_set_flag(bio, BIO_REMAPPED);
 582	return 0;
 583}
 584
 585/*
 586 * Check write append to a zoned block device.
 587 */
 588static inline blk_status_t blk_check_zone_append(struct request_queue *q,
 589						 struct bio *bio)
 590{
 591	int nr_sectors = bio_sectors(bio);
 592
 593	/* Only applicable to zoned block devices */
 594	if (!bdev_is_zoned(bio->bi_bdev))
 595		return BLK_STS_NOTSUPP;
 596
 597	/* The bio sector must point to the start of a sequential zone */
 598	if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector))
 
 599		return BLK_STS_IOERR;
 600
 601	/*
 602	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
 603	 * split and could result in non-contiguous sectors being written in
 604	 * different zones.
 605	 */
 606	if (nr_sectors > q->limits.chunk_sectors)
 607		return BLK_STS_IOERR;
 608
 609	/* Make sure the BIO is small enough and will not get split */
 610	if (nr_sectors > q->limits.max_zone_append_sectors)
 611		return BLK_STS_IOERR;
 612
 613	bio->bi_opf |= REQ_NOMERGE;
 614
 615	return BLK_STS_OK;
 616}
 617
 618static void __submit_bio(struct bio *bio)
 619{
 620	/* If plug is not used, add new plug here to cache nsecs time. */
 621	struct blk_plug plug;
 622
 623	if (unlikely(!blk_crypto_bio_prep(&bio)))
 624		return;
 625
 626	blk_start_plug(&plug);
 627
 628	if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
 629		blk_mq_submit_bio(bio);
 630	} else if (likely(bio_queue_enter(bio) == 0)) {
 631		struct gendisk *disk = bio->bi_bdev->bd_disk;
 632	
 633		if ((bio->bi_opf & REQ_POLLED) &&
 634		    !(disk->queue->limits.features & BLK_FEAT_POLL)) {
 635			bio->bi_status = BLK_STS_NOTSUPP;
 636			bio_endio(bio);
 637		} else {
 638			disk->fops->submit_bio(bio);
 639		}
 640		blk_queue_exit(disk->queue);
 641	}
 642
 643	blk_finish_plug(&plug);
 644}
 645
 646/*
 647 * The loop in this function may be a bit non-obvious, and so deserves some
 648 * explanation:
 649 *
 650 *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
 651 *    that), so we have a list with a single bio.
 652 *  - We pretend that we have just taken it off a longer list, so we assign
 653 *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
 654 *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
 655 *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
 656 *    non-NULL value in bio_list and re-enter the loop from the top.
 657 *  - In this case we really did just take the bio of the top of the list (no
 658 *    pretending) and so remove it from bio_list, and call into ->submit_bio()
 659 *    again.
 660 *
 661 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
 662 * bio_list_on_stack[1] contains bios that were submitted before the current
 663 *	->submit_bio, but that haven't been processed yet.
 664 */
 665static void __submit_bio_noacct(struct bio *bio)
 666{
 667	struct bio_list bio_list_on_stack[2];
 668
 669	BUG_ON(bio->bi_next);
 670
 671	bio_list_init(&bio_list_on_stack[0]);
 672	current->bio_list = bio_list_on_stack;
 673
 674	do {
 675		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 676		struct bio_list lower, same;
 677
 678		/*
 679		 * Create a fresh bio_list for all subordinate requests.
 680		 */
 681		bio_list_on_stack[1] = bio_list_on_stack[0];
 682		bio_list_init(&bio_list_on_stack[0]);
 683
 684		__submit_bio(bio);
 685
 686		/*
 687		 * Sort new bios into those for a lower level and those for the
 688		 * same level.
 689		 */
 690		bio_list_init(&lower);
 691		bio_list_init(&same);
 692		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
 693			if (q == bdev_get_queue(bio->bi_bdev))
 694				bio_list_add(&same, bio);
 695			else
 696				bio_list_add(&lower, bio);
 697
 698		/*
 699		 * Now assemble so we handle the lowest level first.
 700		 */
 701		bio_list_merge(&bio_list_on_stack[0], &lower);
 702		bio_list_merge(&bio_list_on_stack[0], &same);
 703		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
 704	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
 705
 706	current->bio_list = NULL;
 707}
 708
 709static void __submit_bio_noacct_mq(struct bio *bio)
 710{
 711	struct bio_list bio_list[2] = { };
 712
 713	current->bio_list = bio_list;
 714
 715	do {
 716		__submit_bio(bio);
 717	} while ((bio = bio_list_pop(&bio_list[0])));
 718
 719	current->bio_list = NULL;
 720}
 721
 722void submit_bio_noacct_nocheck(struct bio *bio)
 723{
 724	blk_cgroup_bio_start(bio);
 725	blkcg_bio_issue_init(bio);
 726
 727	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
 728		trace_block_bio_queue(bio);
 729		/*
 730		 * Now that enqueuing has been traced, we need to trace
 731		 * completion as well.
 732		 */
 733		bio_set_flag(bio, BIO_TRACE_COMPLETION);
 734	}
 735
 736	/*
 737	 * We only want one ->submit_bio to be active at a time, else stack
 738	 * usage with stacked devices could be a problem.  Use current->bio_list
 739	 * to collect a list of requests submited by a ->submit_bio method while
 740	 * it is active, and then process them after it returned.
 741	 */
 742	if (current->bio_list)
 743		bio_list_add(&current->bio_list[0], bio);
 744	else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
 745		__submit_bio_noacct_mq(bio);
 746	else
 747		__submit_bio_noacct(bio);
 748}
 749
 750static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q,
 751						 struct bio *bio)
 752{
 753	if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q))
 754		return BLK_STS_INVAL;
 755
 756	if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q))
 757		return BLK_STS_INVAL;
 758
 759	return BLK_STS_OK;
 760}
 761
 762/**
 763 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
 764 * @bio:  The bio describing the location in memory and on the device.
 765 *
 766 * This is a version of submit_bio() that shall only be used for I/O that is
 767 * resubmitted to lower level drivers by stacking block drivers.  All file
 768 * systems and other upper level users of the block layer should use
 769 * submit_bio() instead.
 770 */
 771void submit_bio_noacct(struct bio *bio)
 772{
 773	struct block_device *bdev = bio->bi_bdev;
 774	struct request_queue *q = bdev_get_queue(bdev);
 775	blk_status_t status = BLK_STS_IOERR;
 
 776
 777	might_sleep();
 778
 
 
 
 
 779	/*
 780	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
 781	 * if queue does not support NOWAIT.
 782	 */
 783	if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
 784		goto not_supported;
 785
 786	if (should_fail_bio(bio))
 787		goto end_io;
 788	bio_check_ro(bio);
 789	if (!bio_flagged(bio, BIO_REMAPPED)) {
 790		if (unlikely(bio_check_eod(bio)))
 791			goto end_io;
 792		if (bdev_is_partition(bdev) &&
 793		    unlikely(blk_partition_remap(bio)))
 794			goto end_io;
 795	}
 796
 797	/*
 798	 * Filter flush bio's early so that bio based drivers without flush
 799	 * support don't have to worry about them.
 800	 */
 801	if (op_is_flush(bio->bi_opf)) {
 802		if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
 803				 bio_op(bio) != REQ_OP_ZONE_APPEND))
 
 
 804			goto end_io;
 805		if (!bdev_write_cache(bdev)) {
 806			bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
 807			if (!bio_sectors(bio)) {
 808				status = BLK_STS_OK;
 809				goto end_io;
 810			}
 811		}
 812	}
 813
 
 
 
 814	switch (bio_op(bio)) {
 815	case REQ_OP_READ:
 816		break;
 817	case REQ_OP_WRITE:
 818		if (bio->bi_opf & REQ_ATOMIC) {
 819			status = blk_validate_atomic_write_op_size(q, bio);
 820			if (status != BLK_STS_OK)
 821				goto end_io;
 822		}
 823		break;
 824	case REQ_OP_FLUSH:
 825		/*
 826		 * REQ_OP_FLUSH can't be submitted through bios, it is only
 827		 * synthetized in struct request by the flush state machine.
 828		 */
 829		goto not_supported;
 830	case REQ_OP_DISCARD:
 831		if (!bdev_max_discard_sectors(bdev))
 832			goto not_supported;
 833		break;
 834	case REQ_OP_SECURE_ERASE:
 835		if (!bdev_max_secure_erase_sectors(bdev))
 836			goto not_supported;
 837		break;
 838	case REQ_OP_ZONE_APPEND:
 839		status = blk_check_zone_append(q, bio);
 840		if (status != BLK_STS_OK)
 841			goto end_io;
 842		break;
 843	case REQ_OP_WRITE_ZEROES:
 844		if (!q->limits.max_write_zeroes_sectors)
 845			goto not_supported;
 846		break;
 847	case REQ_OP_ZONE_RESET:
 848	case REQ_OP_ZONE_OPEN:
 849	case REQ_OP_ZONE_CLOSE:
 850	case REQ_OP_ZONE_FINISH:
 
 
 
 851	case REQ_OP_ZONE_RESET_ALL:
 852		if (!bdev_is_zoned(bio->bi_bdev))
 
 
 
 
 853			goto not_supported;
 854		break;
 855	case REQ_OP_DRV_IN:
 856	case REQ_OP_DRV_OUT:
 857		/*
 858		 * Driver private operations are only used with passthrough
 859		 * requests.
 860		 */
 861		fallthrough;
 862	default:
 863		goto not_supported;
 864	}
 865
 866	if (blk_throtl_bio(bio))
 867		return;
 
 
 
 
 
 
 
 
 
 
 
 868	submit_bio_noacct_nocheck(bio);
 869	return;
 870
 871not_supported:
 872	status = BLK_STS_NOTSUPP;
 873end_io:
 874	bio->bi_status = status;
 875	bio_endio(bio);
 876}
 877EXPORT_SYMBOL(submit_bio_noacct);
 878
 879static void bio_set_ioprio(struct bio *bio)
 880{
 881	/* Nobody set ioprio so far? Initialize it based on task's nice value */
 882	if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
 883		bio->bi_ioprio = get_current_ioprio();
 884	blkcg_set_ioprio(bio);
 885}
 886
 887/**
 888 * submit_bio - submit a bio to the block device layer for I/O
 889 * @bio: The &struct bio which describes the I/O
 890 *
 891 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 892 * fully set up &struct bio that describes the I/O that needs to be done.  The
 893 * bio will be send to the device described by the bi_bdev field.
 894 *
 895 * The success/failure status of the request, along with notification of
 896 * completion, is delivered asynchronously through the ->bi_end_io() callback
 897 * in @bio.  The bio must NOT be touched by the caller until ->bi_end_io() has
 898 * been called.
 899 */
 900void submit_bio(struct bio *bio)
 901{
 
 
 
 902	if (bio_op(bio) == REQ_OP_READ) {
 903		task_io_account_read(bio->bi_iter.bi_size);
 904		count_vm_events(PGPGIN, bio_sectors(bio));
 905	} else if (bio_op(bio) == REQ_OP_WRITE) {
 906		count_vm_events(PGPGOUT, bio_sectors(bio));
 907	}
 908
 909	bio_set_ioprio(bio);
 910	submit_bio_noacct(bio);
 911}
 912EXPORT_SYMBOL(submit_bio);
 913
 914/**
 915 * bio_poll - poll for BIO completions
 916 * @bio: bio to poll for
 917 * @iob: batches of IO
 918 * @flags: BLK_POLL_* flags that control the behavior
 919 *
 920 * Poll for completions on queue associated with the bio. Returns number of
 921 * completed entries found.
 922 *
 923 * Note: the caller must either be the context that submitted @bio, or
 924 * be in a RCU critical section to prevent freeing of @bio.
 925 */
 926int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
 927{
 
 928	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
 929	struct block_device *bdev;
 930	struct request_queue *q;
 931	int ret = 0;
 932
 933	bdev = READ_ONCE(bio->bi_bdev);
 934	if (!bdev)
 935		return 0;
 936
 937	q = bdev_get_queue(bdev);
 938	if (cookie == BLK_QC_T_NONE)
 939		return 0;
 940
 
 
 
 
 
 
 941	blk_flush_plug(current->plug, false);
 942
 943	/*
 944	 * We need to be able to enter a frozen queue, similar to how
 945	 * timeouts also need to do that. If that is blocked, then we can
 946	 * have pending IO when a queue freeze is started, and then the
 947	 * wait for the freeze to finish will wait for polled requests to
 948	 * timeout as the poller is preventer from entering the queue and
 949	 * completing them. As long as we prevent new IO from being queued,
 950	 * that should be all that matters.
 951	 */
 952	if (!percpu_ref_tryget(&q->q_usage_counter))
 953		return 0;
 954	if (queue_is_mq(q)) {
 955		ret = blk_mq_poll(q, cookie, iob, flags);
 956	} else {
 957		struct gendisk *disk = q->disk;
 958
 959		if ((q->limits.features & BLK_FEAT_POLL) && disk &&
 960		    disk->fops->poll_bio)
 961			ret = disk->fops->poll_bio(bio, iob, flags);
 962	}
 963	blk_queue_exit(q);
 964	return ret;
 965}
 966EXPORT_SYMBOL_GPL(bio_poll);
 967
 968/*
 969 * Helper to implement file_operations.iopoll.  Requires the bio to be stored
 970 * in iocb->private, and cleared before freeing the bio.
 971 */
 972int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
 973		    unsigned int flags)
 974{
 975	struct bio *bio;
 976	int ret = 0;
 977
 978	/*
 979	 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
 980	 * point to a freshly allocated bio at this point.  If that happens
 981	 * we have a few cases to consider:
 982	 *
 983	 *  1) the bio is beeing initialized and bi_bdev is NULL.  We can just
 984	 *     simply nothing in this case
 985	 *  2) the bio points to a not poll enabled device.  bio_poll will catch
 986	 *     this and return 0
 987	 *  3) the bio points to a poll capable device, including but not
 988	 *     limited to the one that the original bio pointed to.  In this
 989	 *     case we will call into the actual poll method and poll for I/O,
 990	 *     even if we don't need to, but it won't cause harm either.
 991	 *
 992	 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
 993	 * is still allocated. Because partitions hold a reference to the whole
 994	 * device bdev and thus disk, the disk is also still valid.  Grabbing
 995	 * a reference to the queue in bio_poll() ensures the hctxs and requests
 996	 * are still valid as well.
 997	 */
 998	rcu_read_lock();
 999	bio = READ_ONCE(kiocb->private);
1000	if (bio)
1001		ret = bio_poll(bio, iob, flags);
1002	rcu_read_unlock();
1003
1004	return ret;
1005}
1006EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
1007
1008void update_io_ticks(struct block_device *part, unsigned long now, bool end)
1009{
1010	unsigned long stamp;
1011again:
1012	stamp = READ_ONCE(part->bd_stamp);
1013	if (unlikely(time_after(now, stamp)) &&
1014	    likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) &&
1015	    (end || part_in_flight(part)))
1016		__part_stat_add(part, io_ticks, now - stamp);
1017
1018	if (bdev_is_partition(part)) {
1019		part = bdev_whole(part);
1020		goto again;
1021	}
1022}
1023
1024unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
 
1025				 unsigned long start_time)
1026{
 
 
1027	part_stat_lock();
1028	update_io_ticks(bdev, start_time, false);
 
 
1029	part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
1030	part_stat_unlock();
1031
1032	return start_time;
1033}
1034EXPORT_SYMBOL(bdev_start_io_acct);
1035
1036/**
1037 * bio_start_io_acct - start I/O accounting for bio based drivers
1038 * @bio:	bio to start account for
1039 *
1040 * Returns the start time that should be passed back to bio_end_io_acct().
1041 */
1042unsigned long bio_start_io_acct(struct bio *bio)
1043{
1044	return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
 
1045}
1046EXPORT_SYMBOL_GPL(bio_start_io_acct);
1047
1048void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
1049		      unsigned int sectors, unsigned long start_time)
1050{
1051	const int sgrp = op_stat_group(op);
1052	unsigned long now = READ_ONCE(jiffies);
1053	unsigned long duration = now - start_time;
1054
1055	part_stat_lock();
1056	update_io_ticks(bdev, now, true);
1057	part_stat_inc(bdev, ios[sgrp]);
1058	part_stat_add(bdev, sectors[sgrp], sectors);
1059	part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
1060	part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
1061	part_stat_unlock();
1062}
1063EXPORT_SYMBOL(bdev_end_io_acct);
1064
1065void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1066			      struct block_device *orig_bdev)
1067{
1068	bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
1069}
1070EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1071
1072/**
1073 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1074 * @q : the queue of the device being checked
1075 *
1076 * Description:
1077 *    Check if underlying low-level drivers of a device are busy.
1078 *    If the drivers want to export their busy state, they must set own
1079 *    exporting function using blk_queue_lld_busy() first.
1080 *
1081 *    Basically, this function is used only by request stacking drivers
1082 *    to stop dispatching requests to underlying devices when underlying
1083 *    devices are busy.  This behavior helps more I/O merging on the queue
1084 *    of the request stacking driver and prevents I/O throughput regression
1085 *    on burst I/O load.
1086 *
1087 * Return:
1088 *    0 - Not busy (The request stacking driver should dispatch request)
1089 *    1 - Busy (The request stacking driver should stop dispatching request)
1090 */
1091int blk_lld_busy(struct request_queue *q)
1092{
1093	if (queue_is_mq(q) && q->mq_ops->busy)
1094		return q->mq_ops->busy(q);
1095
1096	return 0;
1097}
1098EXPORT_SYMBOL_GPL(blk_lld_busy);
1099
1100int kblockd_schedule_work(struct work_struct *work)
1101{
1102	return queue_work(kblockd_workqueue, work);
1103}
1104EXPORT_SYMBOL(kblockd_schedule_work);
1105
1106int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1107				unsigned long delay)
1108{
1109	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1110}
1111EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1112
1113void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1114{
1115	struct task_struct *tsk = current;
1116
1117	/*
1118	 * If this is a nested plug, don't actually assign it.
1119	 */
1120	if (tsk->plug)
1121		return;
1122
1123	plug->cur_ktime = 0;
1124	rq_list_init(&plug->mq_list);
1125	rq_list_init(&plug->cached_rqs);
1126	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1127	plug->rq_count = 0;
1128	plug->multiple_queues = false;
1129	plug->has_elevator = false;
 
1130	INIT_LIST_HEAD(&plug->cb_list);
1131
1132	/*
1133	 * Store ordering should not be needed here, since a potential
1134	 * preempt will imply a full memory barrier
1135	 */
1136	tsk->plug = plug;
1137}
1138
1139/**
1140 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1141 * @plug:	The &struct blk_plug that needs to be initialized
1142 *
1143 * Description:
1144 *   blk_start_plug() indicates to the block layer an intent by the caller
1145 *   to submit multiple I/O requests in a batch.  The block layer may use
1146 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
1147 *   is called.  However, the block layer may choose to submit requests
1148 *   before a call to blk_finish_plug() if the number of queued I/Os
1149 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1150 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
1151 *   the task schedules (see below).
1152 *
1153 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
1154 *   pending I/O should the task end up blocking between blk_start_plug() and
1155 *   blk_finish_plug(). This is important from a performance perspective, but
1156 *   also ensures that we don't deadlock. For instance, if the task is blocking
1157 *   for a memory allocation, memory reclaim could end up wanting to free a
1158 *   page belonging to that request that is currently residing in our private
1159 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
1160 *   this kind of deadlock.
1161 */
1162void blk_start_plug(struct blk_plug *plug)
1163{
1164	blk_start_plug_nr_ios(plug, 1);
1165}
1166EXPORT_SYMBOL(blk_start_plug);
1167
1168static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1169{
1170	LIST_HEAD(callbacks);
1171
1172	while (!list_empty(&plug->cb_list)) {
1173		list_splice_init(&plug->cb_list, &callbacks);
1174
1175		while (!list_empty(&callbacks)) {
1176			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1177							  struct blk_plug_cb,
1178							  list);
1179			list_del(&cb->list);
1180			cb->callback(cb, from_schedule);
1181		}
1182	}
1183}
1184
1185struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1186				      int size)
1187{
1188	struct blk_plug *plug = current->plug;
1189	struct blk_plug_cb *cb;
1190
1191	if (!plug)
1192		return NULL;
1193
1194	list_for_each_entry(cb, &plug->cb_list, list)
1195		if (cb->callback == unplug && cb->data == data)
1196			return cb;
1197
1198	/* Not currently on the callback list */
1199	BUG_ON(size < sizeof(*cb));
1200	cb = kzalloc(size, GFP_ATOMIC);
1201	if (cb) {
1202		cb->data = data;
1203		cb->callback = unplug;
1204		list_add(&cb->list, &plug->cb_list);
1205	}
1206	return cb;
1207}
1208EXPORT_SYMBOL(blk_check_plugged);
1209
1210void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1211{
1212	if (!list_empty(&plug->cb_list))
1213		flush_plug_callbacks(plug, from_schedule);
1214	blk_mq_flush_plug_list(plug, from_schedule);
 
1215	/*
1216	 * Unconditionally flush out cached requests, even if the unplug
1217	 * event came from schedule. Since we know hold references to the
1218	 * queue for cached requests, we don't want a blocked task holding
1219	 * up a queue freeze/quiesce event.
1220	 */
1221	if (unlikely(!rq_list_empty(&plug->cached_rqs)))
1222		blk_mq_free_plug_rqs(plug);
1223
1224	plug->cur_ktime = 0;
1225	current->flags &= ~PF_BLOCK_TS;
1226}
1227
1228/**
1229 * blk_finish_plug - mark the end of a batch of submitted I/O
1230 * @plug:	The &struct blk_plug passed to blk_start_plug()
1231 *
1232 * Description:
1233 * Indicate that a batch of I/O submissions is complete.  This function
1234 * must be paired with an initial call to blk_start_plug().  The intent
1235 * is to allow the block layer to optimize I/O submission.  See the
1236 * documentation for blk_start_plug() for more information.
1237 */
1238void blk_finish_plug(struct blk_plug *plug)
1239{
1240	if (plug == current->plug) {
1241		__blk_flush_plug(plug, false);
1242		current->plug = NULL;
1243	}
1244}
1245EXPORT_SYMBOL(blk_finish_plug);
1246
1247void blk_io_schedule(void)
1248{
1249	/* Prevent hang_check timer from firing at us during very long I/O */
1250	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1251
1252	if (timeout)
1253		io_schedule_timeout(timeout);
1254	else
1255		io_schedule();
1256}
1257EXPORT_SYMBOL_GPL(blk_io_schedule);
1258
1259int __init blk_dev_init(void)
1260{
1261	BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
1262	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1263			sizeof_field(struct request, cmd_flags));
1264	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1265			sizeof_field(struct bio, bi_opf));
1266
1267	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
1268	kblockd_workqueue = alloc_workqueue("kblockd",
1269					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1270	if (!kblockd_workqueue)
1271		panic("Failed to create kblockd\n");
1272
1273	blk_requestq_cachep = KMEM_CACHE(request_queue, SLAB_PANIC);
 
1274
1275	blk_debugfs_root = debugfs_create_dir("block", NULL);
1276
1277	return 0;
1278}