Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Copyright (C) 1991, 1992  Linus Torvalds
   4 *  Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
   5 *  Copyright (C) 2016 - 2020 Christoph Hellwig
   6 */
   7
   8#include <linux/init.h>
   9#include <linux/mm.h>
  10#include <linux/slab.h>
  11#include <linux/kmod.h>
  12#include <linux/major.h>
  13#include <linux/device_cgroup.h>
  14#include <linux/blkdev.h>
  15#include <linux/blk-integrity.h>
  16#include <linux/backing-dev.h>
  17#include <linux/module.h>
  18#include <linux/blkpg.h>
  19#include <linux/magic.h>
  20#include <linux/buffer_head.h>
  21#include <linux/swap.h>
  22#include <linux/writeback.h>
  23#include <linux/mount.h>
  24#include <linux/pseudo_fs.h>
  25#include <linux/uio.h>
  26#include <linux/namei.h>
  27#include <linux/part_stat.h>
  28#include <linux/uaccess.h>
  29#include <linux/stat.h>
  30#include "../fs/internal.h"
  31#include "blk.h"
  32
  33/* Should we allow writing to mounted block devices? */
  34static bool bdev_allow_write_mounted = IS_ENABLED(CONFIG_BLK_DEV_WRITE_MOUNTED);
  35
  36struct bdev_inode {
  37	struct block_device bdev;
  38	struct inode vfs_inode;
  39};
  40
  41static inline struct bdev_inode *BDEV_I(struct inode *inode)
  42{
  43	return container_of(inode, struct bdev_inode, vfs_inode);
  44}
  45
  46struct block_device *I_BDEV(struct inode *inode)
  47{
  48	return &BDEV_I(inode)->bdev;
  49}
  50EXPORT_SYMBOL(I_BDEV);
  51
  52static void bdev_write_inode(struct block_device *bdev)
  53{
  54	struct inode *inode = bdev->bd_inode;
  55	int ret;
  56
  57	spin_lock(&inode->i_lock);
  58	while (inode->i_state & I_DIRTY) {
  59		spin_unlock(&inode->i_lock);
  60		ret = write_inode_now(inode, true);
  61		if (ret)
  62			pr_warn_ratelimited(
  63	"VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
  64				bdev, ret);
  65		spin_lock(&inode->i_lock);
  66	}
  67	spin_unlock(&inode->i_lock);
  68}
  69
  70/* Kill _all_ buffers and pagecache , dirty or not.. */
  71static void kill_bdev(struct block_device *bdev)
  72{
  73	struct address_space *mapping = bdev->bd_inode->i_mapping;
  74
  75	if (mapping_empty(mapping))
  76		return;
  77
  78	invalidate_bh_lrus();
  79	truncate_inode_pages(mapping, 0);
  80}
  81
  82/* Invalidate clean unused buffers and pagecache. */
  83void invalidate_bdev(struct block_device *bdev)
  84{
  85	struct address_space *mapping = bdev->bd_inode->i_mapping;
  86
  87	if (mapping->nrpages) {
  88		invalidate_bh_lrus();
  89		lru_add_drain_all();	/* make sure all lru add caches are flushed */
  90		invalidate_mapping_pages(mapping, 0, -1);
  91	}
  92}
  93EXPORT_SYMBOL(invalidate_bdev);
  94
  95/*
  96 * Drop all buffers & page cache for given bdev range. This function bails
  97 * with error if bdev has other exclusive owner (such as filesystem).
  98 */
  99int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
 100			loff_t lstart, loff_t lend)
 101{
 102	/*
 103	 * If we don't hold exclusive handle for the device, upgrade to it
 104	 * while we discard the buffer cache to avoid discarding buffers
 105	 * under live filesystem.
 106	 */
 107	if (!(mode & BLK_OPEN_EXCL)) {
 108		int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL);
 109		if (err)
 110			goto invalidate;
 111	}
 112
 113	truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
 114	if (!(mode & BLK_OPEN_EXCL))
 115		bd_abort_claiming(bdev, truncate_bdev_range);
 116	return 0;
 117
 118invalidate:
 119	/*
 120	 * Someone else has handle exclusively open. Try invalidating instead.
 121	 * The 'end' argument is inclusive so the rounding is safe.
 122	 */
 123	return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
 124					     lstart >> PAGE_SHIFT,
 125					     lend >> PAGE_SHIFT);
 126}
 127
 128static void set_init_blocksize(struct block_device *bdev)
 129{
 130	unsigned int bsize = bdev_logical_block_size(bdev);
 131	loff_t size = i_size_read(bdev->bd_inode);
 132
 133	while (bsize < PAGE_SIZE) {
 134		if (size & bsize)
 135			break;
 136		bsize <<= 1;
 137	}
 138	bdev->bd_inode->i_blkbits = blksize_bits(bsize);
 139}
 140
 141int set_blocksize(struct block_device *bdev, int size)
 142{
 143	/* Size must be a power of two, and between 512 and PAGE_SIZE */
 144	if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
 145		return -EINVAL;
 146
 147	/* Size cannot be smaller than the size supported by the device */
 148	if (size < bdev_logical_block_size(bdev))
 149		return -EINVAL;
 150
 151	/* Don't change the size if it is same as current */
 152	if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
 153		sync_blockdev(bdev);
 154		bdev->bd_inode->i_blkbits = blksize_bits(size);
 155		kill_bdev(bdev);
 156	}
 157	return 0;
 158}
 159
 160EXPORT_SYMBOL(set_blocksize);
 161
 162int sb_set_blocksize(struct super_block *sb, int size)
 163{
 164	if (set_blocksize(sb->s_bdev, size))
 165		return 0;
 166	/* If we get here, we know size is power of two
 167	 * and it's value is between 512 and PAGE_SIZE */
 168	sb->s_blocksize = size;
 169	sb->s_blocksize_bits = blksize_bits(size);
 170	return sb->s_blocksize;
 171}
 172
 173EXPORT_SYMBOL(sb_set_blocksize);
 174
 175int sb_min_blocksize(struct super_block *sb, int size)
 176{
 177	int minsize = bdev_logical_block_size(sb->s_bdev);
 178	if (size < minsize)
 179		size = minsize;
 180	return sb_set_blocksize(sb, size);
 181}
 182
 183EXPORT_SYMBOL(sb_min_blocksize);
 184
 185int sync_blockdev_nowait(struct block_device *bdev)
 186{
 187	if (!bdev)
 188		return 0;
 189	return filemap_flush(bdev->bd_inode->i_mapping);
 190}
 191EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
 192
 193/*
 194 * Write out and wait upon all the dirty data associated with a block
 195 * device via its mapping.  Does not take the superblock lock.
 196 */
 197int sync_blockdev(struct block_device *bdev)
 198{
 199	if (!bdev)
 200		return 0;
 201	return filemap_write_and_wait(bdev->bd_inode->i_mapping);
 202}
 203EXPORT_SYMBOL(sync_blockdev);
 204
 205int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
 206{
 207	return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
 208			lstart, lend);
 209}
 210EXPORT_SYMBOL(sync_blockdev_range);
 211
 212/**
 213 * bdev_freeze - lock a filesystem and force it into a consistent state
 214 * @bdev:	blockdevice to lock
 215 *
 216 * If a superblock is found on this device, we take the s_umount semaphore
 217 * on it to make sure nobody unmounts until the snapshot creation is done.
 218 * The reference counter (bd_fsfreeze_count) guarantees that only the last
 219 * unfreeze process can unfreeze the frozen filesystem actually when multiple
 220 * freeze requests arrive simultaneously. It counts up in bdev_freeze() and
 221 * count down in bdev_thaw(). When it becomes 0, thaw_bdev() will unfreeze
 222 * actually.
 223 *
 224 * Return: On success zero is returned, negative error code on failure.
 225 */
 226int bdev_freeze(struct block_device *bdev)
 227{
 228	int error = 0;
 229
 230	mutex_lock(&bdev->bd_fsfreeze_mutex);
 231
 232	if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) {
 233		mutex_unlock(&bdev->bd_fsfreeze_mutex);
 234		return 0;
 235	}
 236
 237	mutex_lock(&bdev->bd_holder_lock);
 238	if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) {
 239		error = bdev->bd_holder_ops->freeze(bdev);
 240		lockdep_assert_not_held(&bdev->bd_holder_lock);
 241	} else {
 242		mutex_unlock(&bdev->bd_holder_lock);
 243		error = sync_blockdev(bdev);
 244	}
 245
 246	if (error)
 247		atomic_dec(&bdev->bd_fsfreeze_count);
 248
 249	mutex_unlock(&bdev->bd_fsfreeze_mutex);
 250	return error;
 251}
 252EXPORT_SYMBOL(bdev_freeze);
 253
 254/**
 255 * bdev_thaw - unlock filesystem
 256 * @bdev:	blockdevice to unlock
 257 *
 258 * Unlocks the filesystem and marks it writeable again after bdev_freeze().
 259 *
 260 * Return: On success zero is returned, negative error code on failure.
 261 */
 262int bdev_thaw(struct block_device *bdev)
 263{
 264	int error = -EINVAL, nr_freeze;
 265
 266	mutex_lock(&bdev->bd_fsfreeze_mutex);
 267
 268	/*
 269	 * If this returns < 0 it means that @bd_fsfreeze_count was
 270	 * already 0 and no decrement was performed.
 271	 */
 272	nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count);
 273	if (nr_freeze < 0)
 274		goto out;
 275
 276	error = 0;
 277	if (nr_freeze > 0)
 278		goto out;
 279
 280	mutex_lock(&bdev->bd_holder_lock);
 281	if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) {
 282		error = bdev->bd_holder_ops->thaw(bdev);
 283		lockdep_assert_not_held(&bdev->bd_holder_lock);
 284	} else {
 285		mutex_unlock(&bdev->bd_holder_lock);
 286	}
 287
 288	if (error)
 289		atomic_inc(&bdev->bd_fsfreeze_count);
 290out:
 291	mutex_unlock(&bdev->bd_fsfreeze_mutex);
 292	return error;
 293}
 294EXPORT_SYMBOL(bdev_thaw);
 295
 296/*
 297 * pseudo-fs
 298 */
 299
 300static  __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock);
 301static struct kmem_cache *bdev_cachep __ro_after_init;
 302
 303static struct inode *bdev_alloc_inode(struct super_block *sb)
 304{
 305	struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL);
 306
 307	if (!ei)
 308		return NULL;
 309	memset(&ei->bdev, 0, sizeof(ei->bdev));
 310	return &ei->vfs_inode;
 311}
 312
 313static void bdev_free_inode(struct inode *inode)
 314{
 315	struct block_device *bdev = I_BDEV(inode);
 316
 317	free_percpu(bdev->bd_stats);
 318	kfree(bdev->bd_meta_info);
 319
 320	if (!bdev_is_partition(bdev)) {
 321		if (bdev->bd_disk && bdev->bd_disk->bdi)
 322			bdi_put(bdev->bd_disk->bdi);
 323		kfree(bdev->bd_disk);
 324	}
 325
 326	if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
 327		blk_free_ext_minor(MINOR(bdev->bd_dev));
 328
 329	kmem_cache_free(bdev_cachep, BDEV_I(inode));
 330}
 331
 332static void init_once(void *data)
 333{
 334	struct bdev_inode *ei = data;
 335
 336	inode_init_once(&ei->vfs_inode);
 337}
 338
 339static void bdev_evict_inode(struct inode *inode)
 340{
 341	truncate_inode_pages_final(&inode->i_data);
 342	invalidate_inode_buffers(inode); /* is it needed here? */
 343	clear_inode(inode);
 344}
 345
 346static const struct super_operations bdev_sops = {
 347	.statfs = simple_statfs,
 348	.alloc_inode = bdev_alloc_inode,
 349	.free_inode = bdev_free_inode,
 350	.drop_inode = generic_delete_inode,
 351	.evict_inode = bdev_evict_inode,
 352};
 353
 354static int bd_init_fs_context(struct fs_context *fc)
 355{
 356	struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
 357	if (!ctx)
 358		return -ENOMEM;
 359	fc->s_iflags |= SB_I_CGROUPWB;
 360	ctx->ops = &bdev_sops;
 361	return 0;
 362}
 363
 364static struct file_system_type bd_type = {
 365	.name		= "bdev",
 366	.init_fs_context = bd_init_fs_context,
 367	.kill_sb	= kill_anon_super,
 368};
 369
 370struct super_block *blockdev_superblock __ro_after_init;
 371EXPORT_SYMBOL_GPL(blockdev_superblock);
 372
 373void __init bdev_cache_init(void)
 374{
 375	int err;
 376	static struct vfsmount *bd_mnt __ro_after_init;
 377
 378	bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
 379			0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
 380				SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
 381			init_once);
 382	err = register_filesystem(&bd_type);
 383	if (err)
 384		panic("Cannot register bdev pseudo-fs");
 385	bd_mnt = kern_mount(&bd_type);
 386	if (IS_ERR(bd_mnt))
 387		panic("Cannot create bdev pseudo-fs");
 388	blockdev_superblock = bd_mnt->mnt_sb;   /* For writeback */
 389}
 390
 391struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
 392{
 393	struct block_device *bdev;
 394	struct inode *inode;
 395
 396	inode = new_inode(blockdev_superblock);
 397	if (!inode)
 398		return NULL;
 399	inode->i_mode = S_IFBLK;
 400	inode->i_rdev = 0;
 401	inode->i_data.a_ops = &def_blk_aops;
 402	mapping_set_gfp_mask(&inode->i_data, GFP_USER);
 403
 404	bdev = I_BDEV(inode);
 405	mutex_init(&bdev->bd_fsfreeze_mutex);
 406	spin_lock_init(&bdev->bd_size_lock);
 407	mutex_init(&bdev->bd_holder_lock);
 408	bdev->bd_partno = partno;
 409	bdev->bd_inode = inode;
 410	bdev->bd_queue = disk->queue;
 411	if (partno)
 412		bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
 413	else
 414		bdev->bd_has_submit_bio = false;
 415	bdev->bd_stats = alloc_percpu(struct disk_stats);
 416	if (!bdev->bd_stats) {
 417		iput(inode);
 418		return NULL;
 419	}
 420	bdev->bd_disk = disk;
 421	return bdev;
 422}
 423
 424void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
 425{
 426	spin_lock(&bdev->bd_size_lock);
 427	i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
 428	bdev->bd_nr_sectors = sectors;
 429	spin_unlock(&bdev->bd_size_lock);
 430}
 431
 432void bdev_add(struct block_device *bdev, dev_t dev)
 433{
 434	if (bdev_stable_writes(bdev))
 435		mapping_set_stable_writes(bdev->bd_inode->i_mapping);
 436	bdev->bd_dev = dev;
 437	bdev->bd_inode->i_rdev = dev;
 438	bdev->bd_inode->i_ino = dev;
 439	insert_inode_hash(bdev->bd_inode);
 440}
 441
 442long nr_blockdev_pages(void)
 443{
 444	struct inode *inode;
 445	long ret = 0;
 446
 447	spin_lock(&blockdev_superblock->s_inode_list_lock);
 448	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
 449		ret += inode->i_mapping->nrpages;
 450	spin_unlock(&blockdev_superblock->s_inode_list_lock);
 451
 452	return ret;
 453}
 454
 455/**
 456 * bd_may_claim - test whether a block device can be claimed
 457 * @bdev: block device of interest
 458 * @holder: holder trying to claim @bdev
 459 * @hops: holder ops
 460 *
 461 * Test whether @bdev can be claimed by @holder.
 462 *
 463 * RETURNS:
 464 * %true if @bdev can be claimed, %false otherwise.
 465 */
 466static bool bd_may_claim(struct block_device *bdev, void *holder,
 467		const struct blk_holder_ops *hops)
 468{
 469	struct block_device *whole = bdev_whole(bdev);
 470
 471	lockdep_assert_held(&bdev_lock);
 472
 473	if (bdev->bd_holder) {
 474		/*
 475		 * The same holder can always re-claim.
 476		 */
 477		if (bdev->bd_holder == holder) {
 478			if (WARN_ON_ONCE(bdev->bd_holder_ops != hops))
 479				return false;
 480			return true;
 481		}
 482		return false;
 483	}
 484
 485	/*
 486	 * If the whole devices holder is set to bd_may_claim, a partition on
 487	 * the device is claimed, but not the whole device.
 488	 */
 489	if (whole != bdev &&
 490	    whole->bd_holder && whole->bd_holder != bd_may_claim)
 491		return false;
 492	return true;
 493}
 494
 495/**
 496 * bd_prepare_to_claim - claim a block device
 497 * @bdev: block device of interest
 498 * @holder: holder trying to claim @bdev
 499 * @hops: holder ops.
 500 *
 501 * Claim @bdev.  This function fails if @bdev is already claimed by another
 502 * holder and waits if another claiming is in progress. return, the caller
 503 * has ownership of bd_claiming and bd_holder[s].
 504 *
 505 * RETURNS:
 506 * 0 if @bdev can be claimed, -EBUSY otherwise.
 507 */
 508int bd_prepare_to_claim(struct block_device *bdev, void *holder,
 509		const struct blk_holder_ops *hops)
 510{
 511	struct block_device *whole = bdev_whole(bdev);
 512
 513	if (WARN_ON_ONCE(!holder))
 514		return -EINVAL;
 515retry:
 516	mutex_lock(&bdev_lock);
 517	/* if someone else claimed, fail */
 518	if (!bd_may_claim(bdev, holder, hops)) {
 519		mutex_unlock(&bdev_lock);
 520		return -EBUSY;
 521	}
 522
 523	/* if claiming is already in progress, wait for it to finish */
 524	if (whole->bd_claiming) {
 525		wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
 526		DEFINE_WAIT(wait);
 527
 528		prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
 529		mutex_unlock(&bdev_lock);
 530		schedule();
 531		finish_wait(wq, &wait);
 532		goto retry;
 533	}
 534
 535	/* yay, all mine */
 536	whole->bd_claiming = holder;
 537	mutex_unlock(&bdev_lock);
 538	return 0;
 539}
 540EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
 541
 542static void bd_clear_claiming(struct block_device *whole, void *holder)
 543{
 544	lockdep_assert_held(&bdev_lock);
 545	/* tell others that we're done */
 546	BUG_ON(whole->bd_claiming != holder);
 547	whole->bd_claiming = NULL;
 548	wake_up_bit(&whole->bd_claiming, 0);
 549}
 550
 551/**
 552 * bd_finish_claiming - finish claiming of a block device
 553 * @bdev: block device of interest
 554 * @holder: holder that has claimed @bdev
 555 * @hops: block device holder operations
 556 *
 557 * Finish exclusive open of a block device. Mark the device as exlusively
 558 * open by the holder and wake up all waiters for exclusive open to finish.
 559 */
 560static void bd_finish_claiming(struct block_device *bdev, void *holder,
 561		const struct blk_holder_ops *hops)
 562{
 563	struct block_device *whole = bdev_whole(bdev);
 564
 565	mutex_lock(&bdev_lock);
 566	BUG_ON(!bd_may_claim(bdev, holder, hops));
 567	/*
 568	 * Note that for a whole device bd_holders will be incremented twice,
 569	 * and bd_holder will be set to bd_may_claim before being set to holder
 570	 */
 571	whole->bd_holders++;
 572	whole->bd_holder = bd_may_claim;
 573	bdev->bd_holders++;
 574	mutex_lock(&bdev->bd_holder_lock);
 575	bdev->bd_holder = holder;
 576	bdev->bd_holder_ops = hops;
 577	mutex_unlock(&bdev->bd_holder_lock);
 578	bd_clear_claiming(whole, holder);
 579	mutex_unlock(&bdev_lock);
 580}
 581
 582/**
 583 * bd_abort_claiming - abort claiming of a block device
 584 * @bdev: block device of interest
 585 * @holder: holder that has claimed @bdev
 586 *
 587 * Abort claiming of a block device when the exclusive open failed. This can be
 588 * also used when exclusive open is not actually desired and we just needed
 589 * to block other exclusive openers for a while.
 590 */
 591void bd_abort_claiming(struct block_device *bdev, void *holder)
 592{
 593	mutex_lock(&bdev_lock);
 594	bd_clear_claiming(bdev_whole(bdev), holder);
 595	mutex_unlock(&bdev_lock);
 596}
 597EXPORT_SYMBOL(bd_abort_claiming);
 598
 599static void bd_end_claim(struct block_device *bdev, void *holder)
 600{
 601	struct block_device *whole = bdev_whole(bdev);
 602	bool unblock = false;
 603
 604	/*
 605	 * Release a claim on the device.  The holder fields are protected with
 606	 * bdev_lock.  open_mutex is used to synchronize disk_holder unlinking.
 607	 */
 608	mutex_lock(&bdev_lock);
 609	WARN_ON_ONCE(bdev->bd_holder != holder);
 610	WARN_ON_ONCE(--bdev->bd_holders < 0);
 611	WARN_ON_ONCE(--whole->bd_holders < 0);
 612	if (!bdev->bd_holders) {
 613		mutex_lock(&bdev->bd_holder_lock);
 614		bdev->bd_holder = NULL;
 615		bdev->bd_holder_ops = NULL;
 616		mutex_unlock(&bdev->bd_holder_lock);
 617		if (bdev->bd_write_holder)
 618			unblock = true;
 619	}
 620	if (!whole->bd_holders)
 621		whole->bd_holder = NULL;
 622	mutex_unlock(&bdev_lock);
 623
 624	/*
 625	 * If this was the last claim, remove holder link and unblock evpoll if
 626	 * it was a write holder.
 627	 */
 628	if (unblock) {
 629		disk_unblock_events(bdev->bd_disk);
 630		bdev->bd_write_holder = false;
 631	}
 632}
 633
 634static void blkdev_flush_mapping(struct block_device *bdev)
 635{
 636	WARN_ON_ONCE(bdev->bd_holders);
 637	sync_blockdev(bdev);
 638	kill_bdev(bdev);
 639	bdev_write_inode(bdev);
 640}
 641
 642static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
 643{
 644	struct gendisk *disk = bdev->bd_disk;
 645	int ret;
 646
 647	if (disk->fops->open) {
 648		ret = disk->fops->open(disk, mode);
 649		if (ret) {
 650			/* avoid ghost partitions on a removed medium */
 651			if (ret == -ENOMEDIUM &&
 652			     test_bit(GD_NEED_PART_SCAN, &disk->state))
 653				bdev_disk_changed(disk, true);
 654			return ret;
 655		}
 656	}
 657
 658	if (!atomic_read(&bdev->bd_openers))
 659		set_init_blocksize(bdev);
 660	if (test_bit(GD_NEED_PART_SCAN, &disk->state))
 661		bdev_disk_changed(disk, false);
 662	atomic_inc(&bdev->bd_openers);
 663	return 0;
 664}
 665
 666static void blkdev_put_whole(struct block_device *bdev)
 667{
 668	if (atomic_dec_and_test(&bdev->bd_openers))
 669		blkdev_flush_mapping(bdev);
 670	if (bdev->bd_disk->fops->release)
 671		bdev->bd_disk->fops->release(bdev->bd_disk);
 672}
 673
 674static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
 675{
 676	struct gendisk *disk = part->bd_disk;
 677	int ret;
 678
 679	ret = blkdev_get_whole(bdev_whole(part), mode);
 680	if (ret)
 681		return ret;
 682
 683	ret = -ENXIO;
 684	if (!bdev_nr_sectors(part))
 685		goto out_blkdev_put;
 686
 687	if (!atomic_read(&part->bd_openers)) {
 688		disk->open_partitions++;
 689		set_init_blocksize(part);
 690	}
 691	atomic_inc(&part->bd_openers);
 692	return 0;
 693
 694out_blkdev_put:
 695	blkdev_put_whole(bdev_whole(part));
 696	return ret;
 697}
 698
 699static void blkdev_put_part(struct block_device *part)
 700{
 701	struct block_device *whole = bdev_whole(part);
 702
 703	if (atomic_dec_and_test(&part->bd_openers)) {
 704		blkdev_flush_mapping(part);
 705		whole->bd_disk->open_partitions--;
 706	}
 707	blkdev_put_whole(whole);
 708}
 709
 710struct block_device *blkdev_get_no_open(dev_t dev)
 711{
 712	struct block_device *bdev;
 713	struct inode *inode;
 714
 715	inode = ilookup(blockdev_superblock, dev);
 716	if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
 717		blk_request_module(dev);
 718		inode = ilookup(blockdev_superblock, dev);
 719		if (inode)
 720			pr_warn_ratelimited(
 721"block device autoloading is deprecated and will be removed.\n");
 722	}
 723	if (!inode)
 724		return NULL;
 725
 726	/* switch from the inode reference to a device mode one: */
 727	bdev = &BDEV_I(inode)->bdev;
 728	if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
 729		bdev = NULL;
 730	iput(inode);
 731	return bdev;
 732}
 733
 734void blkdev_put_no_open(struct block_device *bdev)
 735{
 736	put_device(&bdev->bd_device);
 737}
 738
 739static bool bdev_writes_blocked(struct block_device *bdev)
 740{
 741	return bdev->bd_writers == -1;
 742}
 743
 744static void bdev_block_writes(struct block_device *bdev)
 745{
 746	bdev->bd_writers = -1;
 747}
 748
 749static void bdev_unblock_writes(struct block_device *bdev)
 750{
 751	bdev->bd_writers = 0;
 752}
 753
 754static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode)
 755{
 756	if (bdev_allow_write_mounted)
 757		return true;
 758	/* Writes blocked? */
 759	if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev))
 760		return false;
 761	if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0)
 762		return false;
 763	return true;
 764}
 765
 766static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
 767{
 768	if (bdev_allow_write_mounted)
 769		return;
 770
 771	/* Claim exclusive or shared write access. */
 772	if (mode & BLK_OPEN_RESTRICT_WRITES)
 773		bdev_block_writes(bdev);
 774	else if (mode & BLK_OPEN_WRITE)
 775		bdev->bd_writers++;
 776}
 777
 778static void bdev_yield_write_access(struct block_device *bdev, blk_mode_t mode)
 779{
 780	if (bdev_allow_write_mounted)
 781		return;
 782
 783	/* Yield exclusive or shared write access. */
 784	if (mode & BLK_OPEN_RESTRICT_WRITES)
 785		bdev_unblock_writes(bdev);
 786	else if (mode & BLK_OPEN_WRITE)
 787		bdev->bd_writers--;
 788}
 789
 790/**
 791 * bdev_open_by_dev - open a block device by device number
 792 * @dev: device number of block device to open
 793 * @mode: open mode (BLK_OPEN_*)
 794 * @holder: exclusive holder identifier
 795 * @hops: holder operations
 796 *
 797 * Open the block device described by device number @dev. If @holder is not
 798 * %NULL, the block device is opened with exclusive access.  Exclusive opens may
 799 * nest for the same @holder.
 800 *
 801 * Use this interface ONLY if you really do not have anything better - i.e. when
 802 * you are behind a truly sucky interface and all you are given is a device
 803 * number.  Everything else should use bdev_open_by_path().
 804 *
 805 * CONTEXT:
 806 * Might sleep.
 807 *
 808 * RETURNS:
 809 * Handle with a reference to the block_device on success, ERR_PTR(-errno) on
 810 * failure.
 811 */
 812struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
 813				     const struct blk_holder_ops *hops)
 814{
 815	struct bdev_handle *handle = kmalloc(sizeof(struct bdev_handle),
 816					     GFP_KERNEL);
 817	struct block_device *bdev;
 818	bool unblock_events = true;
 819	struct gendisk *disk;
 820	int ret;
 821
 822	if (!handle)
 823		return ERR_PTR(-ENOMEM);
 824
 825	ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
 826			MAJOR(dev), MINOR(dev),
 827			((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) |
 828			((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0));
 829	if (ret)
 830		goto free_handle;
 831
 832	/* Blocking writes requires exclusive opener */
 833	if (mode & BLK_OPEN_RESTRICT_WRITES && !holder) {
 834		ret = -EINVAL;
 835		goto free_handle;
 836	}
 837
 838	bdev = blkdev_get_no_open(dev);
 839	if (!bdev) {
 840		ret = -ENXIO;
 841		goto free_handle;
 842	}
 843	disk = bdev->bd_disk;
 844
 845	if (holder) {
 846		mode |= BLK_OPEN_EXCL;
 847		ret = bd_prepare_to_claim(bdev, holder, hops);
 848		if (ret)
 849			goto put_blkdev;
 850	} else {
 851		if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL)) {
 852			ret = -EIO;
 853			goto put_blkdev;
 854		}
 855	}
 856
 857	disk_block_events(disk);
 858
 859	mutex_lock(&disk->open_mutex);
 860	ret = -ENXIO;
 861	if (!disk_live(disk))
 862		goto abort_claiming;
 863	if (!try_module_get(disk->fops->owner))
 864		goto abort_claiming;
 865	ret = -EBUSY;
 866	if (!bdev_may_open(bdev, mode))
 867		goto abort_claiming;
 868	if (bdev_is_partition(bdev))
 869		ret = blkdev_get_part(bdev, mode);
 870	else
 871		ret = blkdev_get_whole(bdev, mode);
 872	if (ret)
 873		goto put_module;
 874	bdev_claim_write_access(bdev, mode);
 875	if (holder) {
 876		bd_finish_claiming(bdev, holder, hops);
 877
 878		/*
 879		 * Block event polling for write claims if requested.  Any write
 880		 * holder makes the write_holder state stick until all are
 881		 * released.  This is good enough and tracking individual
 882		 * writeable reference is too fragile given the way @mode is
 883		 * used in blkdev_get/put().
 884		 */
 885		if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder &&
 886		    (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
 887			bdev->bd_write_holder = true;
 888			unblock_events = false;
 889		}
 890	}
 891	mutex_unlock(&disk->open_mutex);
 892
 893	if (unblock_events)
 894		disk_unblock_events(disk);
 895	handle->bdev = bdev;
 896	handle->holder = holder;
 897	handle->mode = mode;
 898	return handle;
 899put_module:
 900	module_put(disk->fops->owner);
 901abort_claiming:
 902	if (holder)
 903		bd_abort_claiming(bdev, holder);
 904	mutex_unlock(&disk->open_mutex);
 905	disk_unblock_events(disk);
 906put_blkdev:
 907	blkdev_put_no_open(bdev);
 908free_handle:
 909	kfree(handle);
 910	return ERR_PTR(ret);
 911}
 912EXPORT_SYMBOL(bdev_open_by_dev);
 913
 914/**
 915 * bdev_open_by_path - open a block device by name
 916 * @path: path to the block device to open
 917 * @mode: open mode (BLK_OPEN_*)
 918 * @holder: exclusive holder identifier
 919 * @hops: holder operations
 920 *
 921 * Open the block device described by the device file at @path.  If @holder is
 922 * not %NULL, the block device is opened with exclusive access.  Exclusive opens
 923 * may nest for the same @holder.
 924 *
 925 * CONTEXT:
 926 * Might sleep.
 927 *
 928 * RETURNS:
 929 * Handle with a reference to the block_device on success, ERR_PTR(-errno) on
 930 * failure.
 931 */
 932struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
 933		void *holder, const struct blk_holder_ops *hops)
 934{
 935	struct bdev_handle *handle;
 936	dev_t dev;
 937	int error;
 938
 939	error = lookup_bdev(path, &dev);
 940	if (error)
 941		return ERR_PTR(error);
 942
 943	handle = bdev_open_by_dev(dev, mode, holder, hops);
 944	if (!IS_ERR(handle) && (mode & BLK_OPEN_WRITE) &&
 945	    bdev_read_only(handle->bdev)) {
 946		bdev_release(handle);
 947		return ERR_PTR(-EACCES);
 948	}
 949
 950	return handle;
 951}
 952EXPORT_SYMBOL(bdev_open_by_path);
 953
 954void bdev_release(struct bdev_handle *handle)
 955{
 956	struct block_device *bdev = handle->bdev;
 957	struct gendisk *disk = bdev->bd_disk;
 958
 959	/*
 960	 * Sync early if it looks like we're the last one.  If someone else
 961	 * opens the block device between now and the decrement of bd_openers
 962	 * then we did a sync that we didn't need to, but that's not the end
 963	 * of the world and we want to avoid long (could be several minute)
 964	 * syncs while holding the mutex.
 965	 */
 966	if (atomic_read(&bdev->bd_openers) == 1)
 967		sync_blockdev(bdev);
 968
 969	mutex_lock(&disk->open_mutex);
 970	bdev_yield_write_access(bdev, handle->mode);
 971
 972	if (handle->holder)
 973		bd_end_claim(bdev, handle->holder);
 974
 975	/*
 976	 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
 977	 * event.  This is to ensure detection of media removal commanded
 978	 * from userland - e.g. eject(1).
 979	 */
 980	disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
 981
 982	if (bdev_is_partition(bdev))
 983		blkdev_put_part(bdev);
 984	else
 985		blkdev_put_whole(bdev);
 986	mutex_unlock(&disk->open_mutex);
 987
 988	module_put(disk->fops->owner);
 989	blkdev_put_no_open(bdev);
 990	kfree(handle);
 991}
 992EXPORT_SYMBOL(bdev_release);
 993
 994/**
 995 * lookup_bdev() - Look up a struct block_device by name.
 996 * @pathname: Name of the block device in the filesystem.
 997 * @dev: Pointer to the block device's dev_t, if found.
 998 *
 999 * Lookup the block device's dev_t at @pathname in the current
1000 * namespace if possible and return it in @dev.
1001 *
1002 * Context: May sleep.
1003 * Return: 0 if succeeded, negative errno otherwise.
1004 */
1005int lookup_bdev(const char *pathname, dev_t *dev)
1006{
1007	struct inode *inode;
1008	struct path path;
1009	int error;
1010
1011	if (!pathname || !*pathname)
1012		return -EINVAL;
1013
1014	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
1015	if (error)
1016		return error;
1017
1018	inode = d_backing_inode(path.dentry);
1019	error = -ENOTBLK;
1020	if (!S_ISBLK(inode->i_mode))
1021		goto out_path_put;
1022	error = -EACCES;
1023	if (!may_open_dev(&path))
1024		goto out_path_put;
1025
1026	*dev = inode->i_rdev;
1027	error = 0;
1028out_path_put:
1029	path_put(&path);
1030	return error;
1031}
1032EXPORT_SYMBOL(lookup_bdev);
1033
1034/**
1035 * bdev_mark_dead - mark a block device as dead
1036 * @bdev: block device to operate on
1037 * @surprise: indicate a surprise removal
1038 *
1039 * Tell the file system that this devices or media is dead.  If @surprise is set
1040 * to %true the device or media is already gone, if not we are preparing for an
1041 * orderly removal.
1042 *
1043 * This calls into the file system, which then typicall syncs out all dirty data
1044 * and writes back inodes and then invalidates any cached data in the inodes on
1045 * the file system.  In addition we also invalidate the block device mapping.
1046 */
1047void bdev_mark_dead(struct block_device *bdev, bool surprise)
1048{
1049	mutex_lock(&bdev->bd_holder_lock);
1050	if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
1051		bdev->bd_holder_ops->mark_dead(bdev, surprise);
1052	else {
1053		mutex_unlock(&bdev->bd_holder_lock);
1054		sync_blockdev(bdev);
1055	}
1056
1057	invalidate_bdev(bdev);
1058}
1059/*
1060 * New drivers should not use this directly.  There are some drivers however
1061 * that needs this for historical reasons. For example, the DASD driver has
1062 * historically had a shutdown to offline mode that doesn't actually remove the
1063 * gendisk that otherwise looks a lot like a safe device removal.
1064 */
1065EXPORT_SYMBOL_GPL(bdev_mark_dead);
1066
1067void sync_bdevs(bool wait)
1068{
1069	struct inode *inode, *old_inode = NULL;
1070
1071	spin_lock(&blockdev_superblock->s_inode_list_lock);
1072	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1073		struct address_space *mapping = inode->i_mapping;
1074		struct block_device *bdev;
1075
1076		spin_lock(&inode->i_lock);
1077		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1078		    mapping->nrpages == 0) {
1079			spin_unlock(&inode->i_lock);
1080			continue;
1081		}
1082		__iget(inode);
1083		spin_unlock(&inode->i_lock);
1084		spin_unlock(&blockdev_superblock->s_inode_list_lock);
1085		/*
1086		 * We hold a reference to 'inode' so it couldn't have been
1087		 * removed from s_inodes list while we dropped the
1088		 * s_inode_list_lock  We cannot iput the inode now as we can
1089		 * be holding the last reference and we cannot iput it under
1090		 * s_inode_list_lock. So we keep the reference and iput it
1091		 * later.
1092		 */
1093		iput(old_inode);
1094		old_inode = inode;
1095		bdev = I_BDEV(inode);
1096
1097		mutex_lock(&bdev->bd_disk->open_mutex);
1098		if (!atomic_read(&bdev->bd_openers)) {
1099			; /* skip */
1100		} else if (wait) {
1101			/*
1102			 * We keep the error status of individual mapping so
1103			 * that applications can catch the writeback error using
1104			 * fsync(2). See filemap_fdatawait_keep_errors() for
1105			 * details.
1106			 */
1107			filemap_fdatawait_keep_errors(inode->i_mapping);
1108		} else {
1109			filemap_fdatawrite(inode->i_mapping);
1110		}
1111		mutex_unlock(&bdev->bd_disk->open_mutex);
1112
1113		spin_lock(&blockdev_superblock->s_inode_list_lock);
1114	}
1115	spin_unlock(&blockdev_superblock->s_inode_list_lock);
1116	iput(old_inode);
1117}
1118
1119/*
1120 * Handle STATX_DIOALIGN for block devices.
1121 *
1122 * Note that the inode passed to this is the inode of a block device node file,
1123 * not the block device's internal inode.  Therefore it is *not* valid to use
1124 * I_BDEV() here; the block device has to be looked up by i_rdev instead.
1125 */
1126void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
1127{
1128	struct block_device *bdev;
1129
1130	bdev = blkdev_get_no_open(inode->i_rdev);
1131	if (!bdev)
1132		return;
1133
1134	stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
1135	stat->dio_offset_align = bdev_logical_block_size(bdev);
1136	stat->result_mask |= STATX_DIOALIGN;
1137
1138	blkdev_put_no_open(bdev);
1139}
1140
1141static int __init setup_bdev_allow_write_mounted(char *str)
1142{
1143	if (kstrtobool(str, &bdev_allow_write_mounted))
1144		pr_warn("Invalid option string for bdev_allow_write_mounted:"
1145			" '%s'\n", str);
1146	return 1;
1147}
1148__setup("bdev_allow_write_mounted=", setup_bdev_allow_write_mounted);