Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/fs/buffer.c
   4 *
   5 *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
   6 */
   7
   8/*
   9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
  10 *
  11 * Removed a lot of unnecessary code and simplified things now that
  12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
  13 *
  14 * Speed up hash, lru, and free list operations.  Use gfp() for allocating
  15 * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
  16 *
  17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
  18 *
  19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
  20 */
  21
  22#include <linux/kernel.h>
  23#include <linux/sched/signal.h>
  24#include <linux/syscalls.h>
  25#include <linux/fs.h>
  26#include <linux/iomap.h>
  27#include <linux/mm.h>
  28#include <linux/percpu.h>
  29#include <linux/slab.h>
  30#include <linux/capability.h>
  31#include <linux/blkdev.h>
  32#include <linux/file.h>
  33#include <linux/quotaops.h>
  34#include <linux/highmem.h>
  35#include <linux/export.h>
  36#include <linux/backing-dev.h>
  37#include <linux/writeback.h>
  38#include <linux/hash.h>
  39#include <linux/suspend.h>
  40#include <linux/buffer_head.h>
  41#include <linux/task_io_accounting_ops.h>
  42#include <linux/bio.h>
  43#include <linux/cpu.h>
  44#include <linux/bitops.h>
  45#include <linux/mpage.h>
  46#include <linux/bit_spinlock.h>
  47#include <linux/pagevec.h>
  48#include <linux/sched/mm.h>
  49#include <trace/events/block.h>
  50#include <linux/fscrypt.h>
  51#include <linux/fsverity.h>
  52#include <linux/sched/isolation.h>
  53
  54#include "internal.h"
  55
  56static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
  57static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
  58			  enum rw_hint hint, struct writeback_control *wbc);
  59
  60#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
  61
  62inline void touch_buffer(struct buffer_head *bh)
  63{
  64	trace_block_touch_buffer(bh);
  65	folio_mark_accessed(bh->b_folio);
  66}
  67EXPORT_SYMBOL(touch_buffer);
  68
  69void __lock_buffer(struct buffer_head *bh)
  70{
  71	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
  72}
  73EXPORT_SYMBOL(__lock_buffer);
  74
  75void unlock_buffer(struct buffer_head *bh)
  76{
  77	clear_bit_unlock(BH_Lock, &bh->b_state);
  78	smp_mb__after_atomic();
  79	wake_up_bit(&bh->b_state, BH_Lock);
  80}
  81EXPORT_SYMBOL(unlock_buffer);
  82
  83/*
  84 * Returns if the folio has dirty or writeback buffers. If all the buffers
  85 * are unlocked and clean then the folio_test_dirty information is stale. If
  86 * any of the buffers are locked, it is assumed they are locked for IO.
  87 */
  88void buffer_check_dirty_writeback(struct folio *folio,
  89				     bool *dirty, bool *writeback)
  90{
  91	struct buffer_head *head, *bh;
  92	*dirty = false;
  93	*writeback = false;
  94
  95	BUG_ON(!folio_test_locked(folio));
  96
  97	head = folio_buffers(folio);
  98	if (!head)
  99		return;
 100
 101	if (folio_test_writeback(folio))
 102		*writeback = true;
 103
 104	bh = head;
 105	do {
 106		if (buffer_locked(bh))
 107			*writeback = true;
 108
 109		if (buffer_dirty(bh))
 110			*dirty = true;
 111
 112		bh = bh->b_this_page;
 113	} while (bh != head);
 114}
 
 115
 116/*
 117 * Block until a buffer comes unlocked.  This doesn't stop it
 118 * from becoming locked again - you have to lock it yourself
 119 * if you want to preserve its state.
 120 */
 121void __wait_on_buffer(struct buffer_head * bh)
 122{
 123	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
 124}
 125EXPORT_SYMBOL(__wait_on_buffer);
 126
 127static void buffer_io_error(struct buffer_head *bh, char *msg)
 128{
 129	if (!test_bit(BH_Quiet, &bh->b_state))
 130		printk_ratelimited(KERN_ERR
 131			"Buffer I/O error on dev %pg, logical block %llu%s\n",
 132			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
 133}
 134
 135/*
 136 * End-of-IO handler helper function which does not touch the bh after
 137 * unlocking it.
 138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
 139 * a race there is benign: unlock_buffer() only use the bh's address for
 140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
 141 * itself.
 142 */
 143static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
 144{
 145	if (uptodate) {
 146		set_buffer_uptodate(bh);
 147	} else {
 148		/* This happens, due to failed read-ahead attempts. */
 149		clear_buffer_uptodate(bh);
 150	}
 151	unlock_buffer(bh);
 152}
 153
 154/*
 155 * Default synchronous end-of-IO handler..  Just mark it up-to-date and
 156 * unlock the buffer.
 157 */
 158void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
 159{
 160	__end_buffer_read_notouch(bh, uptodate);
 161	put_bh(bh);
 162}
 163EXPORT_SYMBOL(end_buffer_read_sync);
 164
 165void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 166{
 167	if (uptodate) {
 168		set_buffer_uptodate(bh);
 169	} else {
 170		buffer_io_error(bh, ", lost sync page write");
 171		mark_buffer_write_io_error(bh);
 172		clear_buffer_uptodate(bh);
 173	}
 174	unlock_buffer(bh);
 175	put_bh(bh);
 176}
 177EXPORT_SYMBOL(end_buffer_write_sync);
 178
 179/*
 180 * Various filesystems appear to want __find_get_block to be non-blocking.
 181 * But it's the page lock which protects the buffers.  To get around this,
 182 * we get exclusion from try_to_free_buffers with the blockdev mapping's
 183 * i_private_lock.
 184 *
 185 * Hack idea: for the blockdev mapping, i_private_lock contention
 186 * may be quite high.  This code could TryLock the page, and if that
 187 * succeeds, there is no need to take i_private_lock.
 188 */
 189static struct buffer_head *
 190__find_get_block_slow(struct block_device *bdev, sector_t block)
 191{
 192	struct address_space *bd_mapping = bdev->bd_mapping;
 193	const int blkbits = bd_mapping->host->i_blkbits;
 194	struct buffer_head *ret = NULL;
 195	pgoff_t index;
 196	struct buffer_head *bh;
 197	struct buffer_head *head;
 198	struct folio *folio;
 199	int all_mapped = 1;
 200	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
 201
 202	index = ((loff_t)block << blkbits) / PAGE_SIZE;
 203	folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
 204	if (IS_ERR(folio))
 205		goto out;
 206
 207	spin_lock(&bd_mapping->i_private_lock);
 208	head = folio_buffers(folio);
 209	if (!head)
 210		goto out_unlock;
 
 211	bh = head;
 212	do {
 213		if (!buffer_mapped(bh))
 214			all_mapped = 0;
 215		else if (bh->b_blocknr == block) {
 216			ret = bh;
 217			get_bh(bh);
 218			goto out_unlock;
 219		}
 220		bh = bh->b_this_page;
 221	} while (bh != head);
 222
 223	/* we might be here because some of the buffers on this page are
 224	 * not mapped.  This is due to various races between
 225	 * file io on the block device and getblk.  It gets dealt with
 226	 * elsewhere, don't buffer_error if we had some unmapped buffers
 227	 */
 228	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
 229	if (all_mapped && __ratelimit(&last_warned)) {
 230		printk("__find_get_block_slow() failed. block=%llu, "
 231		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
 232		       "device %pg blocksize: %d\n",
 233		       (unsigned long long)block,
 234		       (unsigned long long)bh->b_blocknr,
 235		       bh->b_state, bh->b_size, bdev,
 236		       1 << blkbits);
 237	}
 238out_unlock:
 239	spin_unlock(&bd_mapping->i_private_lock);
 240	folio_put(folio);
 241out:
 242	return ret;
 243}
 244
 245static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
 246{
 247	unsigned long flags;
 248	struct buffer_head *first;
 249	struct buffer_head *tmp;
 250	struct folio *folio;
 251	int folio_uptodate = 1;
 252
 253	BUG_ON(!buffer_async_read(bh));
 254
 255	folio = bh->b_folio;
 256	if (uptodate) {
 257		set_buffer_uptodate(bh);
 258	} else {
 259		clear_buffer_uptodate(bh);
 260		buffer_io_error(bh, ", async page read");
 
 261	}
 262
 263	/*
 264	 * Be _very_ careful from here on. Bad things can happen if
 265	 * two buffer heads end IO at almost the same time and both
 266	 * decide that the page is now completely done.
 267	 */
 268	first = folio_buffers(folio);
 269	spin_lock_irqsave(&first->b_uptodate_lock, flags);
 270	clear_buffer_async_read(bh);
 271	unlock_buffer(bh);
 272	tmp = bh;
 273	do {
 274		if (!buffer_uptodate(tmp))
 275			folio_uptodate = 0;
 276		if (buffer_async_read(tmp)) {
 277			BUG_ON(!buffer_locked(tmp));
 278			goto still_busy;
 279		}
 280		tmp = tmp->b_this_page;
 281	} while (tmp != bh);
 282	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
 283
 284	folio_end_read(folio, folio_uptodate);
 
 
 
 
 
 
 285	return;
 286
 287still_busy:
 288	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
 289	return;
 290}
 291
 292struct postprocess_bh_ctx {
 293	struct work_struct work;
 294	struct buffer_head *bh;
 295};
 296
 297static void verify_bh(struct work_struct *work)
 298{
 299	struct postprocess_bh_ctx *ctx =
 300		container_of(work, struct postprocess_bh_ctx, work);
 301	struct buffer_head *bh = ctx->bh;
 302	bool valid;
 303
 304	valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
 305	end_buffer_async_read(bh, valid);
 306	kfree(ctx);
 307}
 308
 309static bool need_fsverity(struct buffer_head *bh)
 310{
 311	struct folio *folio = bh->b_folio;
 312	struct inode *inode = folio->mapping->host;
 313
 314	return fsverity_active(inode) &&
 315		/* needed by ext4 */
 316		folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
 317}
 318
 319static void decrypt_bh(struct work_struct *work)
 320{
 321	struct postprocess_bh_ctx *ctx =
 322		container_of(work, struct postprocess_bh_ctx, work);
 323	struct buffer_head *bh = ctx->bh;
 324	int err;
 325
 326	err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
 327					       bh_offset(bh));
 328	if (err == 0 && need_fsverity(bh)) {
 329		/*
 330		 * We use different work queues for decryption and for verity
 331		 * because verity may require reading metadata pages that need
 332		 * decryption, and we shouldn't recurse to the same workqueue.
 333		 */
 334		INIT_WORK(&ctx->work, verify_bh);
 335		fsverity_enqueue_verify_work(&ctx->work);
 336		return;
 337	}
 338	end_buffer_async_read(bh, err == 0);
 339	kfree(ctx);
 340}
 341
 342/*
 343 * I/O completion handler for block_read_full_folio() - pages
 344 * which come unlocked at the end of I/O.
 345 */
 346static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
 347{
 348	struct inode *inode = bh->b_folio->mapping->host;
 349	bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
 350	bool verify = need_fsverity(bh);
 351
 352	/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
 353	if (uptodate && (decrypt || verify)) {
 354		struct postprocess_bh_ctx *ctx =
 355			kmalloc(sizeof(*ctx), GFP_ATOMIC);
 356
 357		if (ctx) {
 
 358			ctx->bh = bh;
 359			if (decrypt) {
 360				INIT_WORK(&ctx->work, decrypt_bh);
 361				fscrypt_enqueue_decrypt_work(&ctx->work);
 362			} else {
 363				INIT_WORK(&ctx->work, verify_bh);
 364				fsverity_enqueue_verify_work(&ctx->work);
 365			}
 366			return;
 367		}
 368		uptodate = 0;
 369	}
 370	end_buffer_async_read(bh, uptodate);
 371}
 372
 373/*
 374 * Completion handler for block_write_full_folio() - folios which are unlocked
 375 * during I/O, and which have the writeback flag cleared upon I/O completion.
 376 */
 377static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
 378{
 379	unsigned long flags;
 380	struct buffer_head *first;
 381	struct buffer_head *tmp;
 382	struct folio *folio;
 383
 384	BUG_ON(!buffer_async_write(bh));
 385
 386	folio = bh->b_folio;
 387	if (uptodate) {
 388		set_buffer_uptodate(bh);
 389	} else {
 390		buffer_io_error(bh, ", lost async page write");
 391		mark_buffer_write_io_error(bh);
 392		clear_buffer_uptodate(bh);
 
 393	}
 394
 395	first = folio_buffers(folio);
 396	spin_lock_irqsave(&first->b_uptodate_lock, flags);
 397
 398	clear_buffer_async_write(bh);
 399	unlock_buffer(bh);
 400	tmp = bh->b_this_page;
 401	while (tmp != bh) {
 402		if (buffer_async_write(tmp)) {
 403			BUG_ON(!buffer_locked(tmp));
 404			goto still_busy;
 405		}
 406		tmp = tmp->b_this_page;
 407	}
 408	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
 409	folio_end_writeback(folio);
 410	return;
 411
 412still_busy:
 413	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
 414	return;
 415}
 
 416
 417/*
 418 * If a page's buffers are under async readin (end_buffer_async_read
 419 * completion) then there is a possibility that another thread of
 420 * control could lock one of the buffers after it has completed
 421 * but while some of the other buffers have not completed.  This
 422 * locked buffer would confuse end_buffer_async_read() into not unlocking
 423 * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
 424 * that this buffer is not under async I/O.
 425 *
 426 * The page comes unlocked when it has no locked buffer_async buffers
 427 * left.
 428 *
 429 * PageLocked prevents anyone starting new async I/O reads any of
 430 * the buffers.
 431 *
 432 * PageWriteback is used to prevent simultaneous writeout of the same
 433 * page.
 434 *
 435 * PageLocked prevents anyone from starting writeback of a page which is
 436 * under read I/O (PageWriteback is only ever set against a locked page).
 437 */
 438static void mark_buffer_async_read(struct buffer_head *bh)
 439{
 440	bh->b_end_io = end_buffer_async_read_io;
 441	set_buffer_async_read(bh);
 442}
 443
 444static void mark_buffer_async_write_endio(struct buffer_head *bh,
 445					  bh_end_io_t *handler)
 446{
 447	bh->b_end_io = handler;
 448	set_buffer_async_write(bh);
 449}
 450
 451void mark_buffer_async_write(struct buffer_head *bh)
 452{
 453	mark_buffer_async_write_endio(bh, end_buffer_async_write);
 454}
 455EXPORT_SYMBOL(mark_buffer_async_write);
 456
 457
 458/*
 459 * fs/buffer.c contains helper functions for buffer-backed address space's
 460 * fsync functions.  A common requirement for buffer-based filesystems is
 461 * that certain data from the backing blockdev needs to be written out for
 462 * a successful fsync().  For example, ext2 indirect blocks need to be
 463 * written back and waited upon before fsync() returns.
 464 *
 465 * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
 466 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
 467 * management of a list of dependent buffers at ->i_mapping->i_private_list.
 468 *
 469 * Locking is a little subtle: try_to_free_buffers() will remove buffers
 470 * from their controlling inode's queue when they are being freed.  But
 471 * try_to_free_buffers() will be operating against the *blockdev* mapping
 472 * at the time, not against the S_ISREG file which depends on those buffers.
 473 * So the locking for i_private_list is via the i_private_lock in the address_space
 474 * which backs the buffers.  Which is different from the address_space 
 475 * against which the buffers are listed.  So for a particular address_space,
 476 * mapping->i_private_lock does *not* protect mapping->i_private_list!  In fact,
 477 * mapping->i_private_list will always be protected by the backing blockdev's
 478 * ->i_private_lock.
 479 *
 480 * Which introduces a requirement: all buffers on an address_space's
 481 * ->i_private_list must be from the same address_space: the blockdev's.
 482 *
 483 * address_spaces which do not place buffers at ->i_private_list via these
 484 * utility functions are free to use i_private_lock and i_private_list for
 485 * whatever they want.  The only requirement is that list_empty(i_private_list)
 486 * be true at clear_inode() time.
 487 *
 488 * FIXME: clear_inode should not call invalidate_inode_buffers().  The
 489 * filesystems should do that.  invalidate_inode_buffers() should just go
 490 * BUG_ON(!list_empty).
 491 *
 492 * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
 493 * take an address_space, not an inode.  And it should be called
 494 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
 495 * queued up.
 496 *
 497 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
 498 * list if it is already on a list.  Because if the buffer is on a list,
 499 * it *must* already be on the right one.  If not, the filesystem is being
 500 * silly.  This will save a ton of locking.  But first we have to ensure
 501 * that buffers are taken *off* the old inode's list when they are freed
 502 * (presumably in truncate).  That requires careful auditing of all
 503 * filesystems (do it inside bforget()).  It could also be done by bringing
 504 * b_inode back.
 505 */
 506
 507/*
 508 * The buffer's backing address_space's i_private_lock must be held
 509 */
 510static void __remove_assoc_queue(struct buffer_head *bh)
 511{
 512	list_del_init(&bh->b_assoc_buffers);
 513	WARN_ON(!bh->b_assoc_map);
 514	bh->b_assoc_map = NULL;
 515}
 516
 517int inode_has_buffers(struct inode *inode)
 518{
 519	return !list_empty(&inode->i_data.i_private_list);
 520}
 521
 522/*
 523 * osync is designed to support O_SYNC io.  It waits synchronously for
 524 * all already-submitted IO to complete, but does not queue any new
 525 * writes to the disk.
 526 *
 527 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
 528 * as you dirty the buffers, and then use osync_inode_buffers to wait for
 529 * completion.  Any other dirty buffers which are not yet queued for
 530 * write will not be flushed to disk by the osync.
 531 */
 532static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
 533{
 534	struct buffer_head *bh;
 535	struct list_head *p;
 536	int err = 0;
 537
 538	spin_lock(lock);
 539repeat:
 540	list_for_each_prev(p, list) {
 541		bh = BH_ENTRY(p);
 542		if (buffer_locked(bh)) {
 543			get_bh(bh);
 544			spin_unlock(lock);
 545			wait_on_buffer(bh);
 546			if (!buffer_uptodate(bh))
 547				err = -EIO;
 548			brelse(bh);
 549			spin_lock(lock);
 550			goto repeat;
 551		}
 552	}
 553	spin_unlock(lock);
 554	return err;
 555}
 556
 
 
 
 
 
 
 557/**
 558 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
 559 * @mapping: the mapping which wants those buffers written
 560 *
 561 * Starts I/O against the buffers at mapping->i_private_list, and waits upon
 562 * that I/O.
 563 *
 564 * Basically, this is a convenience function for fsync().
 565 * @mapping is a file or directory which needs those buffers to be written for
 566 * a successful fsync().
 567 */
 568int sync_mapping_buffers(struct address_space *mapping)
 569{
 570	struct address_space *buffer_mapping = mapping->i_private_data;
 571
 572	if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
 573		return 0;
 574
 575	return fsync_buffers_list(&buffer_mapping->i_private_lock,
 576					&mapping->i_private_list);
 577}
 578EXPORT_SYMBOL(sync_mapping_buffers);
 579
 580/**
 581 * generic_buffers_fsync_noflush - generic buffer fsync implementation
 582 * for simple filesystems with no inode lock
 583 *
 584 * @file:	file to synchronize
 585 * @start:	start offset in bytes
 586 * @end:	end offset in bytes (inclusive)
 587 * @datasync:	only synchronize essential metadata if true
 588 *
 589 * This is a generic implementation of the fsync method for simple
 590 * filesystems which track all non-inode metadata in the buffers list
 591 * hanging off the address_space structure.
 592 */
 593int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
 594				  bool datasync)
 595{
 596	struct inode *inode = file->f_mapping->host;
 597	int err;
 598	int ret;
 599
 600	err = file_write_and_wait_range(file, start, end);
 601	if (err)
 602		return err;
 603
 604	ret = sync_mapping_buffers(inode->i_mapping);
 605	if (!(inode->i_state & I_DIRTY_ALL))
 606		goto out;
 607	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
 608		goto out;
 609
 610	err = sync_inode_metadata(inode, 1);
 611	if (ret == 0)
 612		ret = err;
 613
 614out:
 615	/* check and advance again to catch errors after syncing out buffers */
 616	err = file_check_and_advance_wb_err(file);
 617	if (ret == 0)
 618		ret = err;
 619	return ret;
 620}
 621EXPORT_SYMBOL(generic_buffers_fsync_noflush);
 622
 623/**
 624 * generic_buffers_fsync - generic buffer fsync implementation
 625 * for simple filesystems with no inode lock
 626 *
 627 * @file:	file to synchronize
 628 * @start:	start offset in bytes
 629 * @end:	end offset in bytes (inclusive)
 630 * @datasync:	only synchronize essential metadata if true
 631 *
 632 * This is a generic implementation of the fsync method for simple
 633 * filesystems which track all non-inode metadata in the buffers list
 634 * hanging off the address_space structure. This also makes sure that
 635 * a device cache flush operation is called at the end.
 636 */
 637int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
 638			  bool datasync)
 639{
 640	struct inode *inode = file->f_mapping->host;
 641	int ret;
 642
 643	ret = generic_buffers_fsync_noflush(file, start, end, datasync);
 644	if (!ret)
 645		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
 646	return ret;
 647}
 648EXPORT_SYMBOL(generic_buffers_fsync);
 649
 650/*
 651 * Called when we've recently written block `bblock', and it is known that
 652 * `bblock' was for a buffer_boundary() buffer.  This means that the block at
 653 * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
 654 * dirty, schedule it for IO.  So that indirects merge nicely with their data.
 655 */
 656void write_boundary_block(struct block_device *bdev,
 657			sector_t bblock, unsigned blocksize)
 658{
 659	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
 660	if (bh) {
 661		if (buffer_dirty(bh))
 662			write_dirty_buffer(bh, 0);
 663		put_bh(bh);
 664	}
 665}
 666
 667void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
 668{
 669	struct address_space *mapping = inode->i_mapping;
 670	struct address_space *buffer_mapping = bh->b_folio->mapping;
 671
 672	mark_buffer_dirty(bh);
 673	if (!mapping->i_private_data) {
 674		mapping->i_private_data = buffer_mapping;
 675	} else {
 676		BUG_ON(mapping->i_private_data != buffer_mapping);
 677	}
 678	if (!bh->b_assoc_map) {
 679		spin_lock(&buffer_mapping->i_private_lock);
 680		list_move_tail(&bh->b_assoc_buffers,
 681				&mapping->i_private_list);
 682		bh->b_assoc_map = mapping;
 683		spin_unlock(&buffer_mapping->i_private_lock);
 684	}
 685}
 686EXPORT_SYMBOL(mark_buffer_dirty_inode);
 687
 688/**
 689 * block_dirty_folio - Mark a folio as dirty.
 690 * @mapping: The address space containing this folio.
 691 * @folio: The folio to mark dirty.
 692 *
 693 * Filesystems which use buffer_heads can use this function as their
 694 * ->dirty_folio implementation.  Some filesystems need to do a little
 695 * work before calling this function.  Filesystems which do not use
 696 * buffer_heads should call filemap_dirty_folio() instead.
 697 *
 698 * If the folio has buffers, the uptodate buffers are set dirty, to
 699 * preserve dirty-state coherency between the folio and the buffers.
 700 * Buffers added to a dirty folio are created dirty.
 701 *
 702 * The buffers are dirtied before the folio is dirtied.  There's a small
 703 * race window in which writeback may see the folio cleanness but not the
 704 * buffer dirtiness.  That's fine.  If this code were to set the folio
 705 * dirty before the buffers, writeback could clear the folio dirty flag,
 706 * see a bunch of clean buffers and we'd end up with dirty buffers/clean
 707 * folio on the dirty folio list.
 708 *
 709 * We use i_private_lock to lock against try_to_free_buffers() while
 710 * using the folio's buffer list.  This also prevents clean buffers
 711 * being added to the folio after it was set dirty.
 712 *
 713 * Context: May only be called from process context.  Does not sleep.
 714 * Caller must ensure that @folio cannot be truncated during this call,
 715 * typically by holding the folio lock or having a page in the folio
 716 * mapped and holding the page table lock.
 717 *
 718 * Return: True if the folio was dirtied; false if it was already dirtied.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 719 */
 720bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
 721{
 722	struct buffer_head *head;
 723	bool newly_dirty;
 724
 725	spin_lock(&mapping->i_private_lock);
 726	head = folio_buffers(folio);
 727	if (head) {
 728		struct buffer_head *bh = head;
 729
 730		do {
 731			set_buffer_dirty(bh);
 732			bh = bh->b_this_page;
 733		} while (bh != head);
 734	}
 735	/*
 736	 * Lock out page's memcg migration to keep PageDirty
 737	 * synchronized with per-memcg dirty page counters.
 738	 */
 
 739	newly_dirty = !folio_test_set_dirty(folio);
 740	spin_unlock(&mapping->i_private_lock);
 741
 742	if (newly_dirty)
 743		__folio_mark_dirty(folio, mapping, 1);
 744
 
 
 745	if (newly_dirty)
 746		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 747
 748	return newly_dirty;
 749}
 750EXPORT_SYMBOL(block_dirty_folio);
 751
 752/*
 753 * Write out and wait upon a list of buffers.
 754 *
 755 * We have conflicting pressures: we want to make sure that all
 756 * initially dirty buffers get waited on, but that any subsequently
 757 * dirtied buffers don't.  After all, we don't want fsync to last
 758 * forever if somebody is actively writing to the file.
 759 *
 760 * Do this in two main stages: first we copy dirty buffers to a
 761 * temporary inode list, queueing the writes as we go.  Then we clean
 762 * up, waiting for those writes to complete.
 763 * 
 764 * During this second stage, any subsequent updates to the file may end
 765 * up refiling the buffer on the original inode's dirty list again, so
 766 * there is a chance we will end up with a buffer queued for write but
 767 * not yet completed on that list.  So, as a final cleanup we go through
 768 * the osync code to catch these locked, dirty buffers without requeuing
 769 * any newly dirty buffers for write.
 770 */
 771static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 772{
 773	struct buffer_head *bh;
 
 774	struct address_space *mapping;
 775	int err = 0, err2;
 776	struct blk_plug plug;
 777	LIST_HEAD(tmp);
 778
 
 779	blk_start_plug(&plug);
 780
 781	spin_lock(lock);
 782	while (!list_empty(list)) {
 783		bh = BH_ENTRY(list->next);
 784		mapping = bh->b_assoc_map;
 785		__remove_assoc_queue(bh);
 786		/* Avoid race with mark_buffer_dirty_inode() which does
 787		 * a lockless check and we rely on seeing the dirty bit */
 788		smp_mb();
 789		if (buffer_dirty(bh) || buffer_locked(bh)) {
 790			list_add(&bh->b_assoc_buffers, &tmp);
 791			bh->b_assoc_map = mapping;
 792			if (buffer_dirty(bh)) {
 793				get_bh(bh);
 794				spin_unlock(lock);
 795				/*
 796				 * Ensure any pending I/O completes so that
 797				 * write_dirty_buffer() actually writes the
 798				 * current contents - it is a noop if I/O is
 799				 * still in flight on potentially older
 800				 * contents.
 801				 */
 802				write_dirty_buffer(bh, REQ_SYNC);
 803
 804				/*
 805				 * Kick off IO for the previous mapping. Note
 806				 * that we will not run the very last mapping,
 807				 * wait_on_buffer() will do that for us
 808				 * through sync_buffer().
 809				 */
 810				brelse(bh);
 811				spin_lock(lock);
 812			}
 813		}
 814	}
 815
 816	spin_unlock(lock);
 817	blk_finish_plug(&plug);
 818	spin_lock(lock);
 819
 820	while (!list_empty(&tmp)) {
 821		bh = BH_ENTRY(tmp.prev);
 822		get_bh(bh);
 823		mapping = bh->b_assoc_map;
 824		__remove_assoc_queue(bh);
 825		/* Avoid race with mark_buffer_dirty_inode() which does
 826		 * a lockless check and we rely on seeing the dirty bit */
 827		smp_mb();
 828		if (buffer_dirty(bh)) {
 829			list_add(&bh->b_assoc_buffers,
 830				 &mapping->i_private_list);
 831			bh->b_assoc_map = mapping;
 832		}
 833		spin_unlock(lock);
 834		wait_on_buffer(bh);
 835		if (!buffer_uptodate(bh))
 836			err = -EIO;
 837		brelse(bh);
 838		spin_lock(lock);
 839	}
 840	
 841	spin_unlock(lock);
 842	err2 = osync_buffers_list(lock, list);
 843	if (err)
 844		return err;
 845	else
 846		return err2;
 847}
 848
 849/*
 850 * Invalidate any and all dirty buffers on a given inode.  We are
 851 * probably unmounting the fs, but that doesn't mean we have already
 852 * done a sync().  Just drop the buffers from the inode list.
 853 *
 854 * NOTE: we take the inode's blockdev's mapping's i_private_lock.  Which
 855 * assumes that all the buffers are against the blockdev.
 
 856 */
 857void invalidate_inode_buffers(struct inode *inode)
 858{
 859	if (inode_has_buffers(inode)) {
 860		struct address_space *mapping = &inode->i_data;
 861		struct list_head *list = &mapping->i_private_list;
 862		struct address_space *buffer_mapping = mapping->i_private_data;
 863
 864		spin_lock(&buffer_mapping->i_private_lock);
 865		while (!list_empty(list))
 866			__remove_assoc_queue(BH_ENTRY(list->next));
 867		spin_unlock(&buffer_mapping->i_private_lock);
 868	}
 869}
 870EXPORT_SYMBOL(invalidate_inode_buffers);
 871
 872/*
 873 * Remove any clean buffers from the inode's buffer list.  This is called
 874 * when we're trying to free the inode itself.  Those buffers can pin it.
 875 *
 876 * Returns true if all buffers were removed.
 877 */
 878int remove_inode_buffers(struct inode *inode)
 879{
 880	int ret = 1;
 881
 882	if (inode_has_buffers(inode)) {
 883		struct address_space *mapping = &inode->i_data;
 884		struct list_head *list = &mapping->i_private_list;
 885		struct address_space *buffer_mapping = mapping->i_private_data;
 886
 887		spin_lock(&buffer_mapping->i_private_lock);
 888		while (!list_empty(list)) {
 889			struct buffer_head *bh = BH_ENTRY(list->next);
 890			if (buffer_dirty(bh)) {
 891				ret = 0;
 892				break;
 893			}
 894			__remove_assoc_queue(bh);
 895		}
 896		spin_unlock(&buffer_mapping->i_private_lock);
 897	}
 898	return ret;
 899}
 900
 901/*
 902 * Create the appropriate buffers when given a folio for data area and
 903 * the size of each buffer.. Use the bh->b_this_page linked list to
 904 * follow the buffers created.  Return NULL if unable to create more
 905 * buffers.
 906 *
 907 * The retry flag is used to differentiate async IO (paging, swapping)
 908 * which may not fail from ordinary buffer allocations.
 909 */
 910struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
 911					gfp_t gfp)
 912{
 913	struct buffer_head *bh, *head;
 
 914	long offset;
 915	struct mem_cgroup *memcg, *old_memcg;
 916
 917	/* The folio lock pins the memcg */
 918	memcg = folio_memcg(folio);
 
 
 
 919	old_memcg = set_active_memcg(memcg);
 920
 921	head = NULL;
 922	offset = folio_size(folio);
 923	while ((offset -= size) >= 0) {
 924		bh = alloc_buffer_head(gfp);
 925		if (!bh)
 926			goto no_grow;
 927
 928		bh->b_this_page = head;
 929		bh->b_blocknr = -1;
 930		head = bh;
 931
 932		bh->b_size = size;
 933
 934		/* Link the buffer to its folio */
 935		folio_set_bh(bh, folio, offset);
 936	}
 937out:
 938	set_active_memcg(old_memcg);
 939	return head;
 940/*
 941 * In case anything failed, we just free everything we got.
 942 */
 943no_grow:
 944	if (head) {
 945		do {
 946			bh = head;
 947			head = head->b_this_page;
 948			free_buffer_head(bh);
 949		} while (head);
 950	}
 951
 952	goto out;
 953}
 954EXPORT_SYMBOL_GPL(folio_alloc_buffers);
 955
 956struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
 957{
 958	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
 959
 960	return folio_alloc_buffers(page_folio(page), size, gfp);
 961}
 962EXPORT_SYMBOL_GPL(alloc_page_buffers);
 963
 964static inline void link_dev_buffers(struct folio *folio,
 965		struct buffer_head *head)
 966{
 967	struct buffer_head *bh, *tail;
 968
 969	bh = head;
 970	do {
 971		tail = bh;
 972		bh = bh->b_this_page;
 973	} while (bh);
 974	tail->b_this_page = head;
 975	folio_attach_private(folio, head);
 976}
 977
 978static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
 979{
 980	sector_t retval = ~((sector_t)0);
 981	loff_t sz = bdev_nr_bytes(bdev);
 982
 983	if (sz) {
 984		unsigned int sizebits = blksize_bits(size);
 985		retval = (sz >> sizebits);
 986	}
 987	return retval;
 988}
 989
 990/*
 991 * Initialise the state of a blockdev folio's buffers.
 992 */ 
 993static sector_t folio_init_buffers(struct folio *folio,
 994		struct block_device *bdev, unsigned size)
 
 995{
 996	struct buffer_head *head = folio_buffers(folio);
 997	struct buffer_head *bh = head;
 998	bool uptodate = folio_test_uptodate(folio);
 999	sector_t block = div_u64(folio_pos(folio), size);
1000	sector_t end_block = blkdev_max_block(bdev, size);
1001
1002	do {
1003		if (!buffer_mapped(bh)) {
1004			bh->b_end_io = NULL;
1005			bh->b_private = NULL;
1006			bh->b_bdev = bdev;
1007			bh->b_blocknr = block;
1008			if (uptodate)
1009				set_buffer_uptodate(bh);
1010			if (block < end_block)
1011				set_buffer_mapped(bh);
1012		}
1013		block++;
1014		bh = bh->b_this_page;
1015	} while (bh != head);
1016
1017	/*
1018	 * Caller needs to validate requested block against end of device.
1019	 */
1020	return end_block;
1021}
1022
1023/*
1024 * Create the page-cache folio that contains the requested block.
1025 *
1026 * This is used purely for blockdev mappings.
1027 *
1028 * Returns false if we have a failure which cannot be cured by retrying
1029 * without sleeping.  Returns true if we succeeded, or the caller should retry.
1030 */
1031static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1032		pgoff_t index, unsigned size, gfp_t gfp)
 
1033{
1034	struct address_space *mapping = bdev->bd_mapping;
1035	struct folio *folio;
1036	struct buffer_head *bh;
1037	sector_t end_block = 0;
 
 
1038
1039	folio = __filemap_get_folio(mapping, index,
1040			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1041	if (IS_ERR(folio))
1042		return false;
1043
1044	bh = folio_buffers(folio);
1045	if (bh) {
1046		if (bh->b_size == size) {
1047			end_block = folio_init_buffers(folio, bdev, size);
1048			goto unlock;
1049		}
 
1050
1051		/*
1052		 * Retrying may succeed; for example the folio may finish
1053		 * writeback, or buffers may be cleaned.  This should not
1054		 * happen very often; maybe we have old buffers attached to
1055		 * this blockdev's page cache and we're trying to change
1056		 * the block size?
1057		 */
1058		if (!try_to_free_buffers(folio)) {
1059			end_block = ~0ULL;
1060			goto unlock;
 
1061		}
 
 
1062	}
1063
1064	bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
1065	if (!bh)
1066		goto unlock;
 
1067
1068	/*
1069	 * Link the folio to the buffers and initialise them.  Take the
1070	 * lock to be atomic wrt __find_get_block(), which does not
1071	 * run under the folio lock.
1072	 */
1073	spin_lock(&mapping->i_private_lock);
1074	link_dev_buffers(folio, bh);
1075	end_block = folio_init_buffers(folio, bdev, size);
1076	spin_unlock(&mapping->i_private_lock);
1077unlock:
1078	folio_unlock(folio);
1079	folio_put(folio);
1080	return block < end_block;
 
 
 
1081}
1082
1083/*
1084 * Create buffers for the specified block device block's folio.  If
1085 * that folio was dirty, the buffers are set dirty also.  Returns false
1086 * if we've hit a permanent error.
1087 */
1088static bool grow_buffers(struct block_device *bdev, sector_t block,
1089		unsigned size, gfp_t gfp)
1090{
1091	loff_t pos;
 
 
 
 
1092
1093	/*
1094	 * Check for a block which lies outside our maximum possible
1095	 * pagecache index.
1096	 */
1097	if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1098		printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
 
1099			__func__, (unsigned long long)block,
1100			bdev);
1101		return false;
1102	}
1103
1104	/* Create a folio with the proper size buffers */
1105	return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1106}
1107
1108static struct buffer_head *
1109__getblk_slow(struct block_device *bdev, sector_t block,
1110	     unsigned size, gfp_t gfp)
1111{
1112	/* Size must be multiple of hard sectorsize */
1113	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1114			(size < 512 || size > PAGE_SIZE))) {
1115		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1116					size);
1117		printk(KERN_ERR "logical block size: %d\n",
1118					bdev_logical_block_size(bdev));
1119
1120		dump_stack();
1121		return NULL;
1122	}
1123
1124	for (;;) {
1125		struct buffer_head *bh;
 
1126
1127		bh = __find_get_block(bdev, block, size);
1128		if (bh)
1129			return bh;
1130
1131		if (!grow_buffers(bdev, block, size, gfp))
 
1132			return NULL;
1133	}
1134}
1135
1136/*
1137 * The relationship between dirty buffers and dirty pages:
1138 *
1139 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1140 * the page is tagged dirty in the page cache.
1141 *
1142 * At all times, the dirtiness of the buffers represents the dirtiness of
1143 * subsections of the page.  If the page has buffers, the page dirty bit is
1144 * merely a hint about the true dirty state.
1145 *
1146 * When a page is set dirty in its entirety, all its buffers are marked dirty
1147 * (if the page has buffers).
1148 *
1149 * When a buffer is marked dirty, its page is dirtied, but the page's other
1150 * buffers are not.
1151 *
1152 * Also.  When blockdev buffers are explicitly read with bread(), they
1153 * individually become uptodate.  But their backing page remains not
1154 * uptodate - even if all of its buffers are uptodate.  A subsequent
1155 * block_read_full_folio() against that folio will discover all the uptodate
1156 * buffers, will set the folio uptodate and will perform no I/O.
1157 */
1158
1159/**
1160 * mark_buffer_dirty - mark a buffer_head as needing writeout
1161 * @bh: the buffer_head to mark dirty
1162 *
1163 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1164 * its backing page dirty, then tag the page as dirty in the page cache
1165 * and then attach the address_space's inode to its superblock's dirty
1166 * inode list.
1167 *
1168 * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->i_private_lock,
1169 * i_pages lock and mapping->host->i_lock.
1170 */
1171void mark_buffer_dirty(struct buffer_head *bh)
1172{
1173	WARN_ON_ONCE(!buffer_uptodate(bh));
1174
1175	trace_block_dirty_buffer(bh);
1176
1177	/*
1178	 * Very *carefully* optimize the it-is-already-dirty case.
1179	 *
1180	 * Don't let the final "is it dirty" escape to before we
1181	 * perhaps modified the buffer.
1182	 */
1183	if (buffer_dirty(bh)) {
1184		smp_mb();
1185		if (buffer_dirty(bh))
1186			return;
1187	}
1188
1189	if (!test_set_buffer_dirty(bh)) {
1190		struct folio *folio = bh->b_folio;
1191		struct address_space *mapping = NULL;
1192
1193		if (!folio_test_set_dirty(folio)) {
1194			mapping = folio->mapping;
 
1195			if (mapping)
1196				__folio_mark_dirty(folio, mapping, 0);
1197		}
 
1198		if (mapping)
1199			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1200	}
1201}
1202EXPORT_SYMBOL(mark_buffer_dirty);
1203
1204void mark_buffer_write_io_error(struct buffer_head *bh)
1205{
 
 
1206	set_buffer_write_io_error(bh);
1207	/* FIXME: do we need to set this in both places? */
1208	if (bh->b_folio && bh->b_folio->mapping)
1209		mapping_set_error(bh->b_folio->mapping, -EIO);
1210	if (bh->b_assoc_map) {
1211		mapping_set_error(bh->b_assoc_map, -EIO);
1212		errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1213	}
 
 
 
1214}
1215EXPORT_SYMBOL(mark_buffer_write_io_error);
1216
1217/**
1218 * __brelse - Release a buffer.
1219 * @bh: The buffer to release.
1220 *
1221 * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
 
1222 */
1223void __brelse(struct buffer_head *bh)
1224{
1225	if (atomic_read(&bh->b_count)) {
1226		put_bh(bh);
1227		return;
1228	}
1229	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1230}
1231EXPORT_SYMBOL(__brelse);
1232
1233/**
1234 * __bforget - Discard any dirty data in a buffer.
1235 * @bh: The buffer to forget.
1236 *
1237 * This variant of bforget() can be called if @bh is guaranteed to not
1238 * be NULL.
1239 */
1240void __bforget(struct buffer_head *bh)
1241{
1242	clear_buffer_dirty(bh);
1243	if (bh->b_assoc_map) {
1244		struct address_space *buffer_mapping = bh->b_folio->mapping;
1245
1246		spin_lock(&buffer_mapping->i_private_lock);
1247		list_del_init(&bh->b_assoc_buffers);
1248		bh->b_assoc_map = NULL;
1249		spin_unlock(&buffer_mapping->i_private_lock);
1250	}
1251	__brelse(bh);
1252}
1253EXPORT_SYMBOL(__bforget);
1254
1255static struct buffer_head *__bread_slow(struct buffer_head *bh)
1256{
1257	lock_buffer(bh);
1258	if (buffer_uptodate(bh)) {
1259		unlock_buffer(bh);
1260		return bh;
1261	} else {
1262		get_bh(bh);
1263		bh->b_end_io = end_buffer_read_sync;
1264		submit_bh(REQ_OP_READ, bh);
1265		wait_on_buffer(bh);
1266		if (buffer_uptodate(bh))
1267			return bh;
1268	}
1269	brelse(bh);
1270	return NULL;
1271}
1272
1273/*
1274 * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1275 * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1276 * refcount elevated by one when they're in an LRU.  A buffer can only appear
1277 * once in a particular CPU's LRU.  A single buffer can be present in multiple
1278 * CPU's LRUs at the same time.
1279 *
1280 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1281 * sb_find_get_block().
1282 *
1283 * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1284 * a local interrupt disable for that.
1285 */
1286
1287#define BH_LRU_SIZE	16
1288
1289struct bh_lru {
1290	struct buffer_head *bhs[BH_LRU_SIZE];
1291};
1292
1293static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1294
1295#ifdef CONFIG_SMP
1296#define bh_lru_lock()	local_irq_disable()
1297#define bh_lru_unlock()	local_irq_enable()
1298#else
1299#define bh_lru_lock()	preempt_disable()
1300#define bh_lru_unlock()	preempt_enable()
1301#endif
1302
1303static inline void check_irqs_on(void)
1304{
1305#ifdef irqs_disabled
1306	BUG_ON(irqs_disabled());
1307#endif
1308}
1309
1310/*
1311 * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1312 * inserted at the front, and the buffer_head at the back if any is evicted.
1313 * Or, if already in the LRU it is moved to the front.
1314 */
1315static void bh_lru_install(struct buffer_head *bh)
1316{
1317	struct buffer_head *evictee = bh;
1318	struct bh_lru *b;
1319	int i;
1320
1321	check_irqs_on();
1322	bh_lru_lock();
1323
1324	/*
1325	 * the refcount of buffer_head in bh_lru prevents dropping the
1326	 * attached page(i.e., try_to_free_buffers) so it could cause
1327	 * failing page migration.
1328	 * Skip putting upcoming bh into bh_lru until migration is done.
1329	 */
1330	if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1331		bh_lru_unlock();
1332		return;
1333	}
1334
1335	b = this_cpu_ptr(&bh_lrus);
1336	for (i = 0; i < BH_LRU_SIZE; i++) {
1337		swap(evictee, b->bhs[i]);
1338		if (evictee == bh) {
1339			bh_lru_unlock();
1340			return;
1341		}
1342	}
1343
1344	get_bh(bh);
1345	bh_lru_unlock();
1346	brelse(evictee);
1347}
1348
1349/*
1350 * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1351 */
1352static struct buffer_head *
1353lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1354{
1355	struct buffer_head *ret = NULL;
1356	unsigned int i;
1357
1358	check_irqs_on();
1359	bh_lru_lock();
1360	if (cpu_is_isolated(smp_processor_id())) {
1361		bh_lru_unlock();
1362		return NULL;
1363	}
1364	for (i = 0; i < BH_LRU_SIZE; i++) {
1365		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1366
1367		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1368		    bh->b_size == size) {
1369			if (i) {
1370				while (i) {
1371					__this_cpu_write(bh_lrus.bhs[i],
1372						__this_cpu_read(bh_lrus.bhs[i - 1]));
1373					i--;
1374				}
1375				__this_cpu_write(bh_lrus.bhs[0], bh);
1376			}
1377			get_bh(bh);
1378			ret = bh;
1379			break;
1380		}
1381	}
1382	bh_lru_unlock();
1383	return ret;
1384}
1385
1386/*
1387 * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1388 * it in the LRU and mark it as accessed.  If it is not present then return
1389 * NULL
1390 */
1391struct buffer_head *
1392__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1393{
1394	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1395
1396	if (bh == NULL) {
1397		/* __find_get_block_slow will mark the page accessed */
1398		bh = __find_get_block_slow(bdev, block);
1399		if (bh)
1400			bh_lru_install(bh);
1401	} else
1402		touch_buffer(bh);
1403
1404	return bh;
1405}
1406EXPORT_SYMBOL(__find_get_block);
1407
1408/**
1409 * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1410 * @bdev: The block device.
1411 * @block: The block number.
1412 * @size: The size of buffer_heads for this @bdev.
1413 * @gfp: The memory allocation flags to use.
1414 *
1415 * The returned buffer head has its reference count incremented, but is
1416 * not locked.  The caller should call brelse() when it has finished
1417 * with the buffer.  The buffer may not be uptodate.  If needed, the
1418 * caller can bring it uptodate either by reading it or overwriting it.
1419 *
1420 * Return: The buffer head, or NULL if memory could not be allocated.
 
1421 */
1422struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1423		unsigned size, gfp_t gfp)
 
1424{
1425	struct buffer_head *bh = __find_get_block(bdev, block, size);
1426
1427	might_alloc(gfp);
1428	if (bh)
1429		return bh;
1430
1431	return __getblk_slow(bdev, block, size, gfp);
1432}
1433EXPORT_SYMBOL(bdev_getblk);
1434
1435/*
1436 * Do async read-ahead on a buffer..
1437 */
1438void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1439{
1440	struct buffer_head *bh = bdev_getblk(bdev, block, size,
1441			GFP_NOWAIT | __GFP_MOVABLE);
1442
1443	if (likely(bh)) {
1444		bh_readahead(bh, REQ_RAHEAD);
1445		brelse(bh);
1446	}
1447}
1448EXPORT_SYMBOL(__breadahead);
1449
1450/**
1451 * __bread_gfp() - Read a block.
1452 * @bdev: The block device to read from.
1453 * @block: Block number in units of block size.
1454 * @size: The block size of this device in bytes.
1455 * @gfp: Not page allocation flags; see below.
1456 *
1457 * You are not expected to call this function.  You should use one of
1458 * sb_bread(), sb_bread_unmovable() or __bread().
1459 *
1460 * Read a specified block, and return the buffer head that refers to it.
1461 * If @gfp is 0, the memory will be allocated using the block device's
1462 * default GFP flags.  If @gfp is __GFP_MOVABLE, the memory may be
1463 * allocated from a movable area.  Do not pass in a complete set of
1464 * GFP flags.
1465 *
1466 * The returned buffer head has its refcount increased.  The caller should
1467 * call brelse() when it has finished with the buffer.
1468 *
1469 * Context: May sleep waiting for I/O.
1470 * Return: NULL if the block was unreadable.
1471 */
1472struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
1473		unsigned size, gfp_t gfp)
 
1474{
1475	struct buffer_head *bh;
1476
1477	gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
1478
1479	/*
1480	 * Prefer looping in the allocator rather than here, at least that
1481	 * code knows what it's doing.
1482	 */
1483	gfp |= __GFP_NOFAIL;
1484
1485	bh = bdev_getblk(bdev, block, size, gfp);
1486
1487	if (likely(bh) && !buffer_uptodate(bh))
1488		bh = __bread_slow(bh);
1489	return bh;
1490}
1491EXPORT_SYMBOL(__bread_gfp);
1492
1493static void __invalidate_bh_lrus(struct bh_lru *b)
1494{
1495	int i;
1496
1497	for (i = 0; i < BH_LRU_SIZE; i++) {
1498		brelse(b->bhs[i]);
1499		b->bhs[i] = NULL;
1500	}
1501}
1502/*
1503 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1504 * This doesn't race because it runs in each cpu either in irq
1505 * or with preempt disabled.
1506 */
1507static void invalidate_bh_lru(void *arg)
1508{
1509	struct bh_lru *b = &get_cpu_var(bh_lrus);
1510
1511	__invalidate_bh_lrus(b);
1512	put_cpu_var(bh_lrus);
1513}
1514
1515bool has_bh_in_lru(int cpu, void *dummy)
1516{
1517	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1518	int i;
1519	
1520	for (i = 0; i < BH_LRU_SIZE; i++) {
1521		if (b->bhs[i])
1522			return true;
1523	}
1524
1525	return false;
1526}
1527
1528void invalidate_bh_lrus(void)
1529{
1530	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1531}
1532EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1533
1534/*
1535 * It's called from workqueue context so we need a bh_lru_lock to close
1536 * the race with preemption/irq.
1537 */
1538void invalidate_bh_lrus_cpu(void)
1539{
1540	struct bh_lru *b;
1541
1542	bh_lru_lock();
1543	b = this_cpu_ptr(&bh_lrus);
1544	__invalidate_bh_lrus(b);
1545	bh_lru_unlock();
1546}
1547
1548void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1549		  unsigned long offset)
1550{
1551	bh->b_folio = folio;
1552	BUG_ON(offset >= folio_size(folio));
1553	if (folio_test_highmem(folio))
1554		/*
1555		 * This catches illegal uses and preserves the offset:
1556		 */
1557		bh->b_data = (char *)(0 + offset);
1558	else
1559		bh->b_data = folio_address(folio) + offset;
1560}
1561EXPORT_SYMBOL(folio_set_bh);
1562
1563/*
1564 * Called when truncating a buffer on a page completely.
1565 */
1566
1567/* Bits that are cleared during an invalidate */
1568#define BUFFER_FLAGS_DISCARD \
1569	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1570	 1 << BH_Delay | 1 << BH_Unwritten)
1571
1572static void discard_buffer(struct buffer_head * bh)
1573{
1574	unsigned long b_state;
1575
1576	lock_buffer(bh);
1577	clear_buffer_dirty(bh);
1578	bh->b_bdev = NULL;
1579	b_state = READ_ONCE(bh->b_state);
1580	do {
1581	} while (!try_cmpxchg(&bh->b_state, &b_state,
1582			      b_state & ~BUFFER_FLAGS_DISCARD));
1583	unlock_buffer(bh);
1584}
1585
1586/**
1587 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1588 * @folio: The folio which is affected.
1589 * @offset: start of the range to invalidate
1590 * @length: length of the range to invalidate
1591 *
1592 * block_invalidate_folio() is called when all or part of the folio has been
1593 * invalidated by a truncate operation.
1594 *
1595 * block_invalidate_folio() does not have to release all buffers, but it must
1596 * ensure that no dirty buffer is left outside @offset and that no I/O
1597 * is underway against any of the blocks which are outside the truncation
1598 * point.  Because the caller is about to free (and possibly reuse) those
1599 * blocks on-disk.
1600 */
1601void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1602{
1603	struct buffer_head *head, *bh, *next;
1604	size_t curr_off = 0;
1605	size_t stop = length + offset;
1606
1607	BUG_ON(!folio_test_locked(folio));
1608
1609	/*
1610	 * Check for overflow
1611	 */
1612	BUG_ON(stop > folio_size(folio) || stop < length);
1613
1614	head = folio_buffers(folio);
1615	if (!head)
1616		return;
1617
1618	bh = head;
1619	do {
1620		size_t next_off = curr_off + bh->b_size;
1621		next = bh->b_this_page;
1622
1623		/*
1624		 * Are we still fully in range ?
1625		 */
1626		if (next_off > stop)
1627			goto out;
1628
1629		/*
1630		 * is this block fully invalidated?
1631		 */
1632		if (offset <= curr_off)
1633			discard_buffer(bh);
1634		curr_off = next_off;
1635		bh = next;
1636	} while (bh != head);
1637
1638	/*
1639	 * We release buffers only if the entire folio is being invalidated.
1640	 * The get_block cached value has been unconditionally invalidated,
1641	 * so real IO is not possible anymore.
1642	 */
1643	if (length == folio_size(folio))
1644		filemap_release_folio(folio, 0);
1645out:
1646	folio_clear_mappedtodisk(folio);
1647	return;
1648}
1649EXPORT_SYMBOL(block_invalidate_folio);
1650
 
1651/*
1652 * We attach and possibly dirty the buffers atomically wrt
1653 * block_dirty_folio() via i_private_lock.  try_to_free_buffers
1654 * is already excluded via the folio lock.
1655 */
1656struct buffer_head *create_empty_buffers(struct folio *folio,
1657		unsigned long blocksize, unsigned long b_state)
1658{
1659	struct buffer_head *bh, *head, *tail;
1660	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1661
1662	head = folio_alloc_buffers(folio, blocksize, gfp);
1663	bh = head;
1664	do {
1665		bh->b_state |= b_state;
1666		tail = bh;
1667		bh = bh->b_this_page;
1668	} while (bh);
1669	tail->b_this_page = head;
1670
1671	spin_lock(&folio->mapping->i_private_lock);
1672	if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1673		bh = head;
1674		do {
1675			if (folio_test_dirty(folio))
1676				set_buffer_dirty(bh);
1677			if (folio_test_uptodate(folio))
1678				set_buffer_uptodate(bh);
1679			bh = bh->b_this_page;
1680		} while (bh != head);
1681	}
1682	folio_attach_private(folio, head);
1683	spin_unlock(&folio->mapping->i_private_lock);
1684
1685	return head;
1686}
1687EXPORT_SYMBOL(create_empty_buffers);
1688
1689/**
1690 * clean_bdev_aliases: clean a range of buffers in block device
1691 * @bdev: Block device to clean buffers in
1692 * @block: Start of a range of blocks to clean
1693 * @len: Number of blocks to clean
1694 *
1695 * We are taking a range of blocks for data and we don't want writeback of any
1696 * buffer-cache aliases starting from return from this function and until the
1697 * moment when something will explicitly mark the buffer dirty (hopefully that
1698 * will not happen until we will free that block ;-) We don't even need to mark
1699 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1700 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1701 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1702 * would confuse anyone who might pick it with bread() afterwards...
1703 *
1704 * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1705 * writeout I/O going on against recently-freed buffers.  We don't wait on that
1706 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1707 * need to.  That happens here.
1708 */
1709void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1710{
1711	struct address_space *bd_mapping = bdev->bd_mapping;
1712	const int blkbits = bd_mapping->host->i_blkbits;
1713	struct folio_batch fbatch;
1714	pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
1715	pgoff_t end;
1716	int i, count;
1717	struct buffer_head *bh;
1718	struct buffer_head *head;
1719
1720	end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
1721	folio_batch_init(&fbatch);
1722	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1723		count = folio_batch_count(&fbatch);
1724		for (i = 0; i < count; i++) {
1725			struct folio *folio = fbatch.folios[i];
1726
1727			if (!folio_buffers(folio))
1728				continue;
1729			/*
1730			 * We use folio lock instead of bd_mapping->i_private_lock
1731			 * to pin buffers here since we can afford to sleep and
1732			 * it scales better than a global spinlock lock.
1733			 */
1734			folio_lock(folio);
1735			/* Recheck when the folio is locked which pins bhs */
1736			head = folio_buffers(folio);
1737			if (!head)
1738				goto unlock_page;
1739			bh = head;
1740			do {
1741				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1742					goto next;
1743				if (bh->b_blocknr >= block + len)
1744					break;
1745				clear_buffer_dirty(bh);
1746				wait_on_buffer(bh);
1747				clear_buffer_req(bh);
1748next:
1749				bh = bh->b_this_page;
1750			} while (bh != head);
1751unlock_page:
1752			folio_unlock(folio);
1753		}
1754		folio_batch_release(&fbatch);
1755		cond_resched();
1756		/* End of range already reached? */
1757		if (index > end || !index)
1758			break;
1759	}
1760}
1761EXPORT_SYMBOL(clean_bdev_aliases);
1762
1763static struct buffer_head *folio_create_buffers(struct folio *folio,
1764						struct inode *inode,
1765						unsigned int b_state)
 
 
 
 
 
 
1766{
1767	struct buffer_head *bh;
 
1768
1769	BUG_ON(!folio_test_locked(folio));
 
 
1770
1771	bh = folio_buffers(folio);
1772	if (!bh)
1773		bh = create_empty_buffers(folio,
1774				1 << READ_ONCE(inode->i_blkbits), b_state);
1775	return bh;
1776}
1777
1778/*
1779 * NOTE! All mapped/uptodate combinations are valid:
1780 *
1781 *	Mapped	Uptodate	Meaning
1782 *
1783 *	No	No		"unknown" - must do get_block()
1784 *	No	Yes		"hole" - zero-filled
1785 *	Yes	No		"allocated" - allocated on disk, not read in
1786 *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1787 *
1788 * "Dirty" is valid only with the last case (mapped+uptodate).
1789 */
1790
1791/*
1792 * While block_write_full_folio is writing back the dirty buffers under
1793 * the page lock, whoever dirtied the buffers may decide to clean them
1794 * again at any time.  We handle that by only looking at the buffer
1795 * state inside lock_buffer().
1796 *
1797 * If block_write_full_folio() is called for regular writeback
1798 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1799 * locked buffer.   This only can happen if someone has written the buffer
1800 * directly, with submit_bh().  At the address_space level PageWriteback
1801 * prevents this contention from occurring.
1802 *
1803 * If block_write_full_folio() is called with wbc->sync_mode ==
1804 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1805 * causes the writes to be flagged as synchronous writes.
1806 */
1807int __block_write_full_folio(struct inode *inode, struct folio *folio,
1808			get_block_t *get_block, struct writeback_control *wbc)
 
1809{
1810	int err;
1811	sector_t block;
1812	sector_t last_block;
1813	struct buffer_head *bh, *head;
1814	size_t blocksize;
1815	int nr_underway = 0;
1816	blk_opf_t write_flags = wbc_to_write_flags(wbc);
1817
1818	head = folio_create_buffers(folio, inode,
1819				    (1 << BH_Dirty) | (1 << BH_Uptodate));
1820
1821	/*
1822	 * Be very careful.  We have no exclusion from block_dirty_folio
1823	 * here, and the (potentially unmapped) buffers may become dirty at
1824	 * any time.  If a buffer becomes dirty here after we've inspected it
1825	 * then we just miss that fact, and the folio stays dirty.
1826	 *
1827	 * Buffers outside i_size may be dirtied by block_dirty_folio;
1828	 * handle that here by just cleaning them.
1829	 */
1830
1831	bh = head;
1832	blocksize = bh->b_size;
 
1833
1834	block = div_u64(folio_pos(folio), blocksize);
1835	last_block = div_u64(i_size_read(inode) - 1, blocksize);
1836
1837	/*
1838	 * Get all the dirty buffers mapped to disk addresses and
1839	 * handle any aliases from the underlying blockdev's mapping.
1840	 */
1841	do {
1842		if (block > last_block) {
1843			/*
1844			 * mapped buffers outside i_size will occur, because
1845			 * this folio can be outside i_size when there is a
1846			 * truncate in progress.
1847			 */
1848			/*
1849			 * The buffer was zeroed by block_write_full_folio()
1850			 */
1851			clear_buffer_dirty(bh);
1852			set_buffer_uptodate(bh);
1853		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1854			   buffer_dirty(bh)) {
1855			WARN_ON(bh->b_size != blocksize);
1856			err = get_block(inode, block, bh, 1);
1857			if (err)
1858				goto recover;
1859			clear_buffer_delay(bh);
1860			if (buffer_new(bh)) {
1861				/* blockdev mappings never come here */
1862				clear_buffer_new(bh);
1863				clean_bdev_bh_alias(bh);
1864			}
1865		}
1866		bh = bh->b_this_page;
1867		block++;
1868	} while (bh != head);
1869
1870	do {
1871		if (!buffer_mapped(bh))
1872			continue;
1873		/*
1874		 * If it's a fully non-blocking write attempt and we cannot
1875		 * lock the buffer then redirty the folio.  Note that this can
1876		 * potentially cause a busy-wait loop from writeback threads
1877		 * and kswapd activity, but those code paths have their own
1878		 * higher-level throttling.
1879		 */
1880		if (wbc->sync_mode != WB_SYNC_NONE) {
1881			lock_buffer(bh);
1882		} else if (!trylock_buffer(bh)) {
1883			folio_redirty_for_writepage(wbc, folio);
1884			continue;
1885		}
1886		if (test_clear_buffer_dirty(bh)) {
1887			mark_buffer_async_write_endio(bh,
1888				end_buffer_async_write);
1889		} else {
1890			unlock_buffer(bh);
1891		}
1892	} while ((bh = bh->b_this_page) != head);
1893
1894	/*
1895	 * The folio and its buffers are protected by the writeback flag,
1896	 * so we can drop the bh refcounts early.
1897	 */
1898	BUG_ON(folio_test_writeback(folio));
1899	folio_start_writeback(folio);
1900
1901	do {
1902		struct buffer_head *next = bh->b_this_page;
1903		if (buffer_async_write(bh)) {
1904			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1905				      inode->i_write_hint, wbc);
1906			nr_underway++;
1907		}
1908		bh = next;
1909	} while (bh != head);
1910	folio_unlock(folio);
1911
1912	err = 0;
1913done:
1914	if (nr_underway == 0) {
1915		/*
1916		 * The folio was marked dirty, but the buffers were
1917		 * clean.  Someone wrote them back by hand with
1918		 * write_dirty_buffer/submit_bh.  A rare case.
1919		 */
1920		folio_end_writeback(folio);
1921
1922		/*
1923		 * The folio and buffer_heads can be released at any time from
1924		 * here on.
1925		 */
1926	}
1927	return err;
1928
1929recover:
1930	/*
1931	 * ENOSPC, or some other error.  We may already have added some
1932	 * blocks to the file, so we need to write these out to avoid
1933	 * exposing stale data.
1934	 * The folio is currently locked and not marked for writeback
1935	 */
1936	bh = head;
1937	/* Recovery: lock and submit the mapped buffers */
1938	do {
1939		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1940		    !buffer_delay(bh)) {
1941			lock_buffer(bh);
1942			mark_buffer_async_write_endio(bh,
1943				end_buffer_async_write);
1944		} else {
1945			/*
1946			 * The buffer may have been set dirty during
1947			 * attachment to a dirty folio.
1948			 */
1949			clear_buffer_dirty(bh);
1950		}
1951	} while ((bh = bh->b_this_page) != head);
1952	BUG_ON(folio_test_writeback(folio));
1953	mapping_set_error(folio->mapping, err);
1954	folio_start_writeback(folio);
 
1955	do {
1956		struct buffer_head *next = bh->b_this_page;
1957		if (buffer_async_write(bh)) {
1958			clear_buffer_dirty(bh);
1959			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1960				      inode->i_write_hint, wbc);
1961			nr_underway++;
1962		}
1963		bh = next;
1964	} while (bh != head);
1965	folio_unlock(folio);
1966	goto done;
1967}
1968EXPORT_SYMBOL(__block_write_full_folio);
1969
1970/*
1971 * If a folio has any new buffers, zero them out here, and mark them uptodate
1972 * and dirty so they'll be written out (in order to prevent uninitialised
1973 * block data from leaking). And clear the new bit.
1974 */
1975void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1976{
1977	size_t block_start, block_end;
1978	struct buffer_head *head, *bh;
1979
1980	BUG_ON(!folio_test_locked(folio));
1981	head = folio_buffers(folio);
1982	if (!head)
1983		return;
1984
1985	bh = head;
1986	block_start = 0;
1987	do {
1988		block_end = block_start + bh->b_size;
1989
1990		if (buffer_new(bh)) {
1991			if (block_end > from && block_start < to) {
1992				if (!folio_test_uptodate(folio)) {
1993					size_t start, xend;
1994
1995					start = max(from, block_start);
1996					xend = min(to, block_end);
1997
1998					folio_zero_segment(folio, start, xend);
1999					set_buffer_uptodate(bh);
2000				}
2001
2002				clear_buffer_new(bh);
2003				mark_buffer_dirty(bh);
2004			}
2005		}
2006
2007		block_start = block_end;
2008		bh = bh->b_this_page;
2009	} while (bh != head);
2010}
2011EXPORT_SYMBOL(folio_zero_new_buffers);
2012
2013static int
2014iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2015		const struct iomap *iomap)
2016{
2017	loff_t offset = (loff_t)block << inode->i_blkbits;
2018
2019	bh->b_bdev = iomap->bdev;
2020
2021	/*
2022	 * Block points to offset in file we need to map, iomap contains
2023	 * the offset at which the map starts. If the map ends before the
2024	 * current block, then do not map the buffer and let the caller
2025	 * handle it.
2026	 */
2027	if (offset >= iomap->offset + iomap->length)
2028		return -EIO;
2029
2030	switch (iomap->type) {
2031	case IOMAP_HOLE:
2032		/*
2033		 * If the buffer is not up to date or beyond the current EOF,
2034		 * we need to mark it as new to ensure sub-block zeroing is
2035		 * executed if necessary.
2036		 */
2037		if (!buffer_uptodate(bh) ||
2038		    (offset >= i_size_read(inode)))
2039			set_buffer_new(bh);
2040		return 0;
2041	case IOMAP_DELALLOC:
2042		if (!buffer_uptodate(bh) ||
2043		    (offset >= i_size_read(inode)))
2044			set_buffer_new(bh);
2045		set_buffer_uptodate(bh);
2046		set_buffer_mapped(bh);
2047		set_buffer_delay(bh);
2048		return 0;
2049	case IOMAP_UNWRITTEN:
2050		/*
2051		 * For unwritten regions, we always need to ensure that regions
2052		 * in the block we are not writing to are zeroed. Mark the
2053		 * buffer as new to ensure this.
2054		 */
2055		set_buffer_new(bh);
2056		set_buffer_unwritten(bh);
2057		fallthrough;
2058	case IOMAP_MAPPED:
2059		if ((iomap->flags & IOMAP_F_NEW) ||
2060		    offset >= i_size_read(inode)) {
2061			/*
2062			 * This can happen if truncating the block device races
2063			 * with the check in the caller as i_size updates on
2064			 * block devices aren't synchronized by i_rwsem for
2065			 * block devices.
2066			 */
2067			if (S_ISBLK(inode->i_mode))
2068				return -EIO;
2069			set_buffer_new(bh);
2070		}
2071		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2072				inode->i_blkbits;
2073		set_buffer_mapped(bh);
2074		return 0;
2075	default:
2076		WARN_ON_ONCE(1);
2077		return -EIO;
2078	}
2079}
2080
2081int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2082		get_block_t *get_block, const struct iomap *iomap)
2083{
2084	size_t from = offset_in_folio(folio, pos);
2085	size_t to = from + len;
2086	struct inode *inode = folio->mapping->host;
2087	size_t block_start, block_end;
2088	sector_t block;
2089	int err = 0;
2090	size_t blocksize;
2091	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2092
2093	BUG_ON(!folio_test_locked(folio));
2094	BUG_ON(to > folio_size(folio));
 
2095	BUG_ON(from > to);
2096
2097	head = folio_create_buffers(folio, inode, 0);
2098	blocksize = head->b_size;
2099	block = div_u64(folio_pos(folio), blocksize);
2100
2101	for (bh = head, block_start = 0; bh != head || !block_start;
 
 
2102	    block++, block_start=block_end, bh = bh->b_this_page) {
2103		block_end = block_start + blocksize;
2104		if (block_end <= from || block_start >= to) {
2105			if (folio_test_uptodate(folio)) {
2106				if (!buffer_uptodate(bh))
2107					set_buffer_uptodate(bh);
2108			}
2109			continue;
2110		}
2111		if (buffer_new(bh))
2112			clear_buffer_new(bh);
2113		if (!buffer_mapped(bh)) {
2114			WARN_ON(bh->b_size != blocksize);
2115			if (get_block)
2116				err = get_block(inode, block, bh, 1);
2117			else
2118				err = iomap_to_bh(inode, block, bh, iomap);
2119			if (err)
2120				break;
 
2121
2122			if (buffer_new(bh)) {
2123				clean_bdev_bh_alias(bh);
2124				if (folio_test_uptodate(folio)) {
2125					clear_buffer_new(bh);
2126					set_buffer_uptodate(bh);
2127					mark_buffer_dirty(bh);
2128					continue;
2129				}
2130				if (block_end > to || block_start < from)
2131					folio_zero_segments(folio,
2132						to, block_end,
2133						block_start, from);
2134				continue;
2135			}
2136		}
2137		if (folio_test_uptodate(folio)) {
2138			if (!buffer_uptodate(bh))
2139				set_buffer_uptodate(bh);
2140			continue; 
2141		}
2142		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2143		    !buffer_unwritten(bh) &&
2144		     (block_start < from || block_end > to)) {
2145			bh_read_nowait(bh, 0);
2146			*wait_bh++=bh;
2147		}
2148	}
2149	/*
2150	 * If we issued read requests - let them complete.
2151	 */
2152	while(wait_bh > wait) {
2153		wait_on_buffer(*--wait_bh);
2154		if (!buffer_uptodate(*wait_bh))
2155			err = -EIO;
2156	}
2157	if (unlikely(err))
2158		folio_zero_new_buffers(folio, from, to);
2159	return err;
2160}
2161
2162int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2163		get_block_t *get_block)
2164{
2165	return __block_write_begin_int(folio, pos, len, get_block, NULL);
 
2166}
2167EXPORT_SYMBOL(__block_write_begin);
2168
2169static void __block_commit_write(struct folio *folio, size_t from, size_t to)
 
2170{
2171	size_t block_start, block_end;
2172	bool partial = false;
2173	unsigned blocksize;
2174	struct buffer_head *bh, *head;
2175
2176	bh = head = folio_buffers(folio);
2177	if (!bh)
2178		return;
2179	blocksize = bh->b_size;
2180
2181	block_start = 0;
2182	do {
2183		block_end = block_start + blocksize;
2184		if (block_end <= from || block_start >= to) {
2185			if (!buffer_uptodate(bh))
2186				partial = true;
2187		} else {
2188			set_buffer_uptodate(bh);
2189			mark_buffer_dirty(bh);
2190		}
2191		if (buffer_new(bh))
2192			clear_buffer_new(bh);
2193
2194		block_start = block_end;
2195		bh = bh->b_this_page;
2196	} while (bh != head);
2197
2198	/*
2199	 * If this is a partial write which happened to make all buffers
2200	 * uptodate then we can optimize away a bogus read_folio() for
2201	 * the next read(). Here we 'discover' whether the folio went
2202	 * uptodate as a result of this (potentially partial) write.
2203	 */
2204	if (!partial)
2205		folio_mark_uptodate(folio);
 
2206}
2207
2208/*
2209 * block_write_begin takes care of the basic task of block allocation and
2210 * bringing partial write blocks uptodate first.
2211 *
2212 * The filesystem needs to handle block truncation upon failure.
2213 */
2214int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2215		struct folio **foliop, get_block_t *get_block)
2216{
2217	pgoff_t index = pos >> PAGE_SHIFT;
2218	struct folio *folio;
2219	int status;
2220
2221	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2222			mapping_gfp_mask(mapping));
2223	if (IS_ERR(folio))
2224		return PTR_ERR(folio);
2225
2226	status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2227	if (unlikely(status)) {
2228		folio_unlock(folio);
2229		folio_put(folio);
2230		folio = NULL;
2231	}
2232
2233	*foliop = folio;
2234	return status;
2235}
2236EXPORT_SYMBOL(block_write_begin);
2237
2238int block_write_end(struct file *file, struct address_space *mapping,
2239			loff_t pos, unsigned len, unsigned copied,
2240			struct folio *folio, void *fsdata)
2241{
2242	size_t start = pos - folio_pos(folio);
 
 
 
2243
2244	if (unlikely(copied < len)) {
2245		/*
2246		 * The buffers that were written will now be uptodate, so
2247		 * we don't have to worry about a read_folio reading them
2248		 * and overwriting a partial write. However if we have
2249		 * encountered a short write and only partially written
2250		 * into a buffer, it will not be marked uptodate, so a
2251		 * read_folio might come in and destroy our partial write.
2252		 *
2253		 * Do the simplest thing, and just treat any short write to a
2254		 * non uptodate folio as a zero-length write, and force the
2255		 * caller to redo the whole thing.
2256		 */
2257		if (!folio_test_uptodate(folio))
2258			copied = 0;
2259
2260		folio_zero_new_buffers(folio, start+copied, start+len);
2261	}
2262	flush_dcache_folio(folio);
2263
2264	/* This could be a short (even 0-length) commit */
2265	__block_commit_write(folio, start, start + copied);
2266
2267	return copied;
2268}
2269EXPORT_SYMBOL(block_write_end);
2270
2271int generic_write_end(struct file *file, struct address_space *mapping,
2272			loff_t pos, unsigned len, unsigned copied,
2273			struct folio *folio, void *fsdata)
2274{
2275	struct inode *inode = mapping->host;
2276	loff_t old_size = inode->i_size;
2277	bool i_size_changed = false;
2278
2279	copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
2280
2281	/*
2282	 * No need to use i_size_read() here, the i_size cannot change under us
2283	 * because we hold i_rwsem.
2284	 *
2285	 * But it's important to update i_size while still holding folio lock:
2286	 * page writeout could otherwise come in and zero beyond i_size.
2287	 */
2288	if (pos + copied > inode->i_size) {
2289		i_size_write(inode, pos + copied);
2290		i_size_changed = true;
2291	}
2292
2293	folio_unlock(folio);
2294	folio_put(folio);
2295
2296	if (old_size < pos)
2297		pagecache_isize_extended(inode, old_size, pos);
2298	/*
2299	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2300	 * makes the holding time of page lock longer. Second, it forces lock
2301	 * ordering of page lock and transaction start for journaling
2302	 * filesystems.
2303	 */
2304	if (i_size_changed)
2305		mark_inode_dirty(inode);
2306	return copied;
2307}
2308EXPORT_SYMBOL(generic_write_end);
2309
2310/*
2311 * block_is_partially_uptodate checks whether buffers within a folio are
2312 * uptodate or not.
2313 *
2314 * Returns true if all buffers which correspond to the specified part
2315 * of the folio are uptodate.
2316 */
2317bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2318{
2319	unsigned block_start, block_end, blocksize;
2320	unsigned to;
2321	struct buffer_head *bh, *head;
2322	bool ret = true;
2323
2324	head = folio_buffers(folio);
2325	if (!head)
2326		return false;
2327	blocksize = head->b_size;
2328	to = min_t(unsigned, folio_size(folio) - from, count);
2329	to = from + to;
2330	if (from < blocksize && to > folio_size(folio) - blocksize)
2331		return false;
2332
2333	bh = head;
2334	block_start = 0;
2335	do {
2336		block_end = block_start + blocksize;
2337		if (block_end > from && block_start < to) {
2338			if (!buffer_uptodate(bh)) {
2339				ret = false;
2340				break;
2341			}
2342			if (block_end >= to)
2343				break;
2344		}
2345		block_start = block_end;
2346		bh = bh->b_this_page;
2347	} while (bh != head);
2348
2349	return ret;
2350}
2351EXPORT_SYMBOL(block_is_partially_uptodate);
2352
2353/*
2354 * Generic "read_folio" function for block devices that have the normal
2355 * get_block functionality. This is most of the block device filesystems.
2356 * Reads the folio asynchronously --- the unlock_buffer() and
2357 * set/clear_buffer_uptodate() functions propagate buffer state into the
2358 * folio once IO has completed.
2359 */
2360int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2361{
2362	struct inode *inode = folio->mapping->host;
2363	sector_t iblock, lblock;
2364	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2365	size_t blocksize;
2366	int nr, i;
2367	int fully_mapped = 1;
2368	bool page_error = false;
2369	loff_t limit = i_size_read(inode);
2370
2371	/* This is needed for ext4. */
2372	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2373		limit = inode->i_sb->s_maxbytes;
2374
2375	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2376
2377	head = folio_create_buffers(folio, inode, 0);
2378	blocksize = head->b_size;
 
2379
2380	iblock = div_u64(folio_pos(folio), blocksize);
2381	lblock = div_u64(limit + blocksize - 1, blocksize);
2382	bh = head;
2383	nr = 0;
2384	i = 0;
2385
2386	do {
2387		if (buffer_uptodate(bh))
2388			continue;
2389
2390		if (!buffer_mapped(bh)) {
2391			int err = 0;
2392
2393			fully_mapped = 0;
2394			if (iblock < lblock) {
2395				WARN_ON(bh->b_size != blocksize);
2396				err = get_block(inode, iblock, bh, 0);
2397				if (err)
 
2398					page_error = true;
 
2399			}
2400			if (!buffer_mapped(bh)) {
2401				folio_zero_range(folio, i * blocksize,
2402						blocksize);
2403				if (!err)
2404					set_buffer_uptodate(bh);
2405				continue;
2406			}
2407			/*
2408			 * get_block() might have updated the buffer
2409			 * synchronously
2410			 */
2411			if (buffer_uptodate(bh))
2412				continue;
2413		}
2414		arr[nr++] = bh;
2415	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2416
2417	if (fully_mapped)
2418		folio_set_mappedtodisk(folio);
2419
2420	if (!nr) {
2421		/*
2422		 * All buffers are uptodate or get_block() returned an
2423		 * error when trying to map them - we can finish the read.
2424		 */
2425		folio_end_read(folio, !page_error);
 
 
2426		return 0;
2427	}
2428
2429	/* Stage two: lock the buffers */
2430	for (i = 0; i < nr; i++) {
2431		bh = arr[i];
2432		lock_buffer(bh);
2433		mark_buffer_async_read(bh);
2434	}
2435
2436	/*
2437	 * Stage 3: start the IO.  Check for uptodateness
2438	 * inside the buffer lock in case another process reading
2439	 * the underlying blockdev brought it uptodate (the sct fix).
2440	 */
2441	for (i = 0; i < nr; i++) {
2442		bh = arr[i];
2443		if (buffer_uptodate(bh))
2444			end_buffer_async_read(bh, 1);
2445		else
2446			submit_bh(REQ_OP_READ, bh);
2447	}
2448	return 0;
2449}
2450EXPORT_SYMBOL(block_read_full_folio);
2451
2452/* utility function for filesystems that need to do work on expanding
2453 * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2454 * deal with the hole.  
2455 */
2456int generic_cont_expand_simple(struct inode *inode, loff_t size)
2457{
2458	struct address_space *mapping = inode->i_mapping;
2459	const struct address_space_operations *aops = mapping->a_ops;
2460	struct folio *folio;
2461	void *fsdata = NULL;
2462	int err;
2463
2464	err = inode_newsize_ok(inode, size);
2465	if (err)
2466		goto out;
2467
2468	err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
2469	if (err)
2470		goto out;
2471
2472	err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
2473	BUG_ON(err > 0);
2474
2475out:
2476	return err;
2477}
2478EXPORT_SYMBOL(generic_cont_expand_simple);
2479
2480static int cont_expand_zero(struct file *file, struct address_space *mapping,
2481			    loff_t pos, loff_t *bytes)
2482{
2483	struct inode *inode = mapping->host;
2484	const struct address_space_operations *aops = mapping->a_ops;
2485	unsigned int blocksize = i_blocksize(inode);
2486	struct folio *folio;
2487	void *fsdata = NULL;
2488	pgoff_t index, curidx;
2489	loff_t curpos;
2490	unsigned zerofrom, offset, len;
2491	int err = 0;
2492
2493	index = pos >> PAGE_SHIFT;
2494	offset = pos & ~PAGE_MASK;
2495
2496	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2497		zerofrom = curpos & ~PAGE_MASK;
2498		if (zerofrom & (blocksize-1)) {
2499			*bytes |= (blocksize-1);
2500			(*bytes)++;
2501		}
2502		len = PAGE_SIZE - zerofrom;
2503
2504		err = aops->write_begin(file, mapping, curpos, len,
2505					    &folio, &fsdata);
2506		if (err)
2507			goto out;
2508		folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2509		err = aops->write_end(file, mapping, curpos, len, len,
2510						folio, fsdata);
2511		if (err < 0)
2512			goto out;
2513		BUG_ON(err != len);
2514		err = 0;
2515
2516		balance_dirty_pages_ratelimited(mapping);
2517
2518		if (fatal_signal_pending(current)) {
2519			err = -EINTR;
2520			goto out;
2521		}
2522	}
2523
2524	/* page covers the boundary, find the boundary offset */
2525	if (index == curidx) {
2526		zerofrom = curpos & ~PAGE_MASK;
2527		/* if we will expand the thing last block will be filled */
2528		if (offset <= zerofrom) {
2529			goto out;
2530		}
2531		if (zerofrom & (blocksize-1)) {
2532			*bytes |= (blocksize-1);
2533			(*bytes)++;
2534		}
2535		len = offset - zerofrom;
2536
2537		err = aops->write_begin(file, mapping, curpos, len,
2538					    &folio, &fsdata);
2539		if (err)
2540			goto out;
2541		folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2542		err = aops->write_end(file, mapping, curpos, len, len,
2543						folio, fsdata);
2544		if (err < 0)
2545			goto out;
2546		BUG_ON(err != len);
2547		err = 0;
2548	}
2549out:
2550	return err;
2551}
2552
2553/*
2554 * For moronic filesystems that do not allow holes in file.
2555 * We may have to extend the file.
2556 */
2557int cont_write_begin(struct file *file, struct address_space *mapping,
2558			loff_t pos, unsigned len,
2559			struct folio **foliop, void **fsdata,
2560			get_block_t *get_block, loff_t *bytes)
2561{
2562	struct inode *inode = mapping->host;
2563	unsigned int blocksize = i_blocksize(inode);
2564	unsigned int zerofrom;
2565	int err;
2566
2567	err = cont_expand_zero(file, mapping, pos, bytes);
2568	if (err)
2569		return err;
2570
2571	zerofrom = *bytes & ~PAGE_MASK;
2572	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2573		*bytes |= (blocksize-1);
2574		(*bytes)++;
2575	}
2576
2577	return block_write_begin(mapping, pos, len, foliop, get_block);
2578}
2579EXPORT_SYMBOL(cont_write_begin);
2580
2581void block_commit_write(struct page *page, unsigned from, unsigned to)
2582{
2583	struct folio *folio = page_folio(page);
2584	__block_commit_write(folio, from, to);
 
2585}
2586EXPORT_SYMBOL(block_commit_write);
2587
2588/*
2589 * block_page_mkwrite() is not allowed to change the file size as it gets
2590 * called from a page fault handler when a page is first dirtied. Hence we must
2591 * be careful to check for EOF conditions here. We set the page up correctly
2592 * for a written page which means we get ENOSPC checking when writing into
2593 * holes and correct delalloc and unwritten extent mapping on filesystems that
2594 * support these features.
2595 *
2596 * We are not allowed to take the i_mutex here so we have to play games to
2597 * protect against truncate races as the page could now be beyond EOF.  Because
2598 * truncate writes the inode size before removing pages, once we have the
2599 * page lock we can determine safely if the page is beyond EOF. If it is not
2600 * beyond EOF, then the page is guaranteed safe against truncation until we
2601 * unlock the page.
2602 *
2603 * Direct callers of this function should protect against filesystem freezing
2604 * using sb_start_pagefault() - sb_end_pagefault() functions.
2605 */
2606int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2607			 get_block_t get_block)
2608{
2609	struct folio *folio = page_folio(vmf->page);
2610	struct inode *inode = file_inode(vma->vm_file);
2611	unsigned long end;
2612	loff_t size;
2613	int ret;
2614
2615	folio_lock(folio);
2616	size = i_size_read(inode);
2617	if ((folio->mapping != inode->i_mapping) ||
2618	    (folio_pos(folio) >= size)) {
2619		/* We overload EFAULT to mean page got truncated */
2620		ret = -EFAULT;
2621		goto out_unlock;
2622	}
2623
2624	end = folio_size(folio);
2625	/* folio is wholly or partially inside EOF */
2626	if (folio_pos(folio) + end > size)
2627		end = size - folio_pos(folio);
2628
2629	ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2630	if (unlikely(ret))
2631		goto out_unlock;
2632
2633	__block_commit_write(folio, 0, end);
 
 
2634
2635	folio_mark_dirty(folio);
2636	folio_wait_stable(folio);
 
 
2637	return 0;
2638out_unlock:
2639	folio_unlock(folio);
2640	return ret;
2641}
2642EXPORT_SYMBOL(block_page_mkwrite);
2643
2644int block_truncate_page(struct address_space *mapping,
2645			loff_t from, get_block_t *get_block)
2646{
2647	pgoff_t index = from >> PAGE_SHIFT;
 
2648	unsigned blocksize;
2649	sector_t iblock;
2650	size_t offset, length, pos;
2651	struct inode *inode = mapping->host;
2652	struct folio *folio;
2653	struct buffer_head *bh;
2654	int err = 0;
2655
2656	blocksize = i_blocksize(inode);
2657	length = from & (blocksize - 1);
2658
2659	/* Block boundary? Nothing to do */
2660	if (!length)
2661		return 0;
2662
2663	length = blocksize - length;
2664	iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
 
 
 
 
 
2665
2666	folio = filemap_grab_folio(mapping, index);
2667	if (IS_ERR(folio))
2668		return PTR_ERR(folio);
2669
2670	bh = folio_buffers(folio);
2671	if (!bh)
2672		bh = create_empty_buffers(folio, blocksize, 0);
2673
2674	/* Find the buffer that contains "offset" */
2675	offset = offset_in_folio(folio, from);
2676	pos = blocksize;
2677	while (offset >= pos) {
2678		bh = bh->b_this_page;
2679		iblock++;
2680		pos += blocksize;
2681	}
2682
 
2683	if (!buffer_mapped(bh)) {
2684		WARN_ON(bh->b_size != blocksize);
2685		err = get_block(inode, iblock, bh, 0);
2686		if (err)
2687			goto unlock;
2688		/* unmapped? It's a hole - nothing to do */
2689		if (!buffer_mapped(bh))
2690			goto unlock;
2691	}
2692
2693	/* Ok, it's mapped. Make sure it's up-to-date */
2694	if (folio_test_uptodate(folio))
2695		set_buffer_uptodate(bh);
2696
2697	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2698		err = bh_read(bh, 0);
2699		/* Uhhuh. Read error. Complain and punt. */
2700		if (err < 0)
2701			goto unlock;
2702	}
2703
2704	folio_zero_range(folio, offset, length);
2705	mark_buffer_dirty(bh);
 
2706
2707unlock:
2708	folio_unlock(folio);
2709	folio_put(folio);
2710
2711	return err;
2712}
2713EXPORT_SYMBOL(block_truncate_page);
2714
2715/*
2716 * The generic ->writepage function for buffer-backed address_spaces
2717 */
2718int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2719		void *get_block)
2720{
2721	struct inode * const inode = folio->mapping->host;
2722	loff_t i_size = i_size_read(inode);
 
 
2723
2724	/* Is the folio fully inside i_size? */
2725	if (folio_pos(folio) + folio_size(folio) <= i_size)
2726		return __block_write_full_folio(inode, folio, get_block, wbc);
2727
2728	/* Is the folio fully outside i_size? (truncate in progress) */
2729	if (folio_pos(folio) >= i_size) {
2730		folio_unlock(folio);
 
 
2731		return 0; /* don't care */
2732	}
2733
2734	/*
2735	 * The folio straddles i_size.  It must be zeroed out on each and every
2736	 * writepage invocation because it may be mmapped.  "A file is mapped
2737	 * in multiples of the page size.  For a file that is not a multiple of
2738	 * the page size, the remaining memory is zeroed when mapped, and
2739	 * writes to that region are not written out to the file."
2740	 */
2741	folio_zero_segment(folio, offset_in_folio(folio, i_size),
2742			folio_size(folio));
2743	return __block_write_full_folio(inode, folio, get_block, wbc);
2744}
 
2745
2746sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2747			    get_block_t *get_block)
2748{
2749	struct inode *inode = mapping->host;
2750	struct buffer_head tmp = {
2751		.b_size = i_blocksize(inode),
2752	};
2753
2754	get_block(inode, block, &tmp, 0);
2755	return tmp.b_blocknr;
2756}
2757EXPORT_SYMBOL(generic_block_bmap);
2758
2759static void end_bio_bh_io_sync(struct bio *bio)
2760{
2761	struct buffer_head *bh = bio->bi_private;
2762
2763	if (unlikely(bio_flagged(bio, BIO_QUIET)))
2764		set_bit(BH_Quiet, &bh->b_state);
2765
2766	bh->b_end_io(bh, !bio->bi_status);
2767	bio_put(bio);
2768}
2769
2770static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2771			  enum rw_hint write_hint,
2772			  struct writeback_control *wbc)
2773{
2774	const enum req_op op = opf & REQ_OP_MASK;
2775	struct bio *bio;
2776
2777	BUG_ON(!buffer_locked(bh));
2778	BUG_ON(!buffer_mapped(bh));
2779	BUG_ON(!bh->b_end_io);
2780	BUG_ON(buffer_delay(bh));
2781	BUG_ON(buffer_unwritten(bh));
2782
2783	/*
2784	 * Only clear out a write error when rewriting
2785	 */
2786	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2787		clear_buffer_write_io_error(bh);
2788
2789	if (buffer_meta(bh))
2790		opf |= REQ_META;
2791	if (buffer_prio(bh))
2792		opf |= REQ_PRIO;
2793
2794	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2795
2796	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2797
2798	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2799	bio->bi_write_hint = write_hint;
2800
2801	bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
 
2802
2803	bio->bi_end_io = end_bio_bh_io_sync;
2804	bio->bi_private = bh;
2805
2806	/* Take care of bh's that straddle the end of the device */
2807	guard_bio_eod(bio);
2808
2809	if (wbc) {
2810		wbc_init_bio(wbc, bio);
2811		wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
2812	}
2813
2814	submit_bio(bio);
2815}
2816
2817void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2818{
2819	submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
2820}
2821EXPORT_SYMBOL(submit_bh);
2822
2823void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2824{
2825	lock_buffer(bh);
2826	if (!test_clear_buffer_dirty(bh)) {
2827		unlock_buffer(bh);
2828		return;
2829	}
2830	bh->b_end_io = end_buffer_write_sync;
2831	get_bh(bh);
2832	submit_bh(REQ_OP_WRITE | op_flags, bh);
2833}
2834EXPORT_SYMBOL(write_dirty_buffer);
2835
2836/*
2837 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2838 * and then start new I/O and then wait upon it.  The caller must have a ref on
2839 * the buffer_head.
2840 */
2841int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2842{
2843	WARN_ON(atomic_read(&bh->b_count) < 1);
2844	lock_buffer(bh);
2845	if (test_clear_buffer_dirty(bh)) {
2846		/*
2847		 * The bh should be mapped, but it might not be if the
2848		 * device was hot-removed. Not much we can do but fail the I/O.
2849		 */
2850		if (!buffer_mapped(bh)) {
2851			unlock_buffer(bh);
2852			return -EIO;
2853		}
2854
2855		get_bh(bh);
2856		bh->b_end_io = end_buffer_write_sync;
2857		submit_bh(REQ_OP_WRITE | op_flags, bh);
2858		wait_on_buffer(bh);
2859		if (!buffer_uptodate(bh))
2860			return -EIO;
2861	} else {
2862		unlock_buffer(bh);
2863	}
2864	return 0;
2865}
2866EXPORT_SYMBOL(__sync_dirty_buffer);
2867
2868int sync_dirty_buffer(struct buffer_head *bh)
2869{
2870	return __sync_dirty_buffer(bh, REQ_SYNC);
2871}
2872EXPORT_SYMBOL(sync_dirty_buffer);
2873
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2874static inline int buffer_busy(struct buffer_head *bh)
2875{
2876	return atomic_read(&bh->b_count) |
2877		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2878}
2879
2880static bool
2881drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2882{
2883	struct buffer_head *head = folio_buffers(folio);
2884	struct buffer_head *bh;
2885
2886	bh = head;
2887	do {
2888		if (buffer_busy(bh))
2889			goto failed;
2890		bh = bh->b_this_page;
2891	} while (bh != head);
2892
2893	do {
2894		struct buffer_head *next = bh->b_this_page;
2895
2896		if (bh->b_assoc_map)
2897			__remove_assoc_queue(bh);
2898		bh = next;
2899	} while (bh != head);
2900	*buffers_to_free = head;
2901	folio_detach_private(folio);
2902	return true;
2903failed:
2904	return false;
2905}
2906
2907/**
2908 * try_to_free_buffers - Release buffers attached to this folio.
2909 * @folio: The folio.
2910 *
2911 * If any buffers are in use (dirty, under writeback, elevated refcount),
2912 * no buffers will be freed.
2913 *
2914 * If the folio is dirty but all the buffers are clean then we need to
2915 * be sure to mark the folio clean as well.  This is because the folio
2916 * may be against a block device, and a later reattachment of buffers
2917 * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2918 * filesystem data on the same device.
2919 *
2920 * The same applies to regular filesystem folios: if all the buffers are
2921 * clean then we set the folio clean and proceed.  To do that, we require
2922 * total exclusion from block_dirty_folio().  That is obtained with
2923 * i_private_lock.
2924 *
2925 * Exclusion against try_to_free_buffers may be obtained by either
2926 * locking the folio or by holding its mapping's i_private_lock.
2927 *
2928 * Context: Process context.  @folio must be locked.  Will not sleep.
2929 * Return: true if all buffers attached to this folio were freed.
2930 */
2931bool try_to_free_buffers(struct folio *folio)
2932{
2933	struct address_space * const mapping = folio->mapping;
2934	struct buffer_head *buffers_to_free = NULL;
2935	bool ret = 0;
2936
2937	BUG_ON(!folio_test_locked(folio));
2938	if (folio_test_writeback(folio))
2939		return false;
2940
2941	if (mapping == NULL) {		/* can this still happen? */
2942		ret = drop_buffers(folio, &buffers_to_free);
2943		goto out;
2944	}
2945
2946	spin_lock(&mapping->i_private_lock);
2947	ret = drop_buffers(folio, &buffers_to_free);
2948
2949	/*
2950	 * If the filesystem writes its buffers by hand (eg ext3)
2951	 * then we can have clean buffers against a dirty folio.  We
2952	 * clean the folio here; otherwise the VM will never notice
2953	 * that the filesystem did any IO at all.
2954	 *
2955	 * Also, during truncate, discard_buffer will have marked all
2956	 * the folio's buffers clean.  We discover that here and clean
2957	 * the folio also.
2958	 *
2959	 * i_private_lock must be held over this entire operation in order
2960	 * to synchronise against block_dirty_folio and prevent the
2961	 * dirty bit from being lost.
2962	 */
2963	if (ret)
2964		folio_cancel_dirty(folio);
2965	spin_unlock(&mapping->i_private_lock);
2966out:
2967	if (buffers_to_free) {
2968		struct buffer_head *bh = buffers_to_free;
2969
2970		do {
2971			struct buffer_head *next = bh->b_this_page;
2972			free_buffer_head(bh);
2973			bh = next;
2974		} while (bh != buffers_to_free);
2975	}
2976	return ret;
2977}
2978EXPORT_SYMBOL(try_to_free_buffers);
2979
2980/*
2981 * Buffer-head allocation
2982 */
2983static struct kmem_cache *bh_cachep __ro_after_init;
2984
2985/*
2986 * Once the number of bh's in the machine exceeds this level, we start
2987 * stripping them in writeback.
2988 */
2989static unsigned long max_buffer_heads __ro_after_init;
2990
2991int buffer_heads_over_limit;
2992
2993struct bh_accounting {
2994	int nr;			/* Number of live bh's */
2995	int ratelimit;		/* Limit cacheline bouncing */
2996};
2997
2998static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2999
3000static void recalc_bh_state(void)
3001{
3002	int i;
3003	int tot = 0;
3004
3005	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3006		return;
3007	__this_cpu_write(bh_accounting.ratelimit, 0);
3008	for_each_online_cpu(i)
3009		tot += per_cpu(bh_accounting, i).nr;
3010	buffer_heads_over_limit = (tot > max_buffer_heads);
3011}
3012
3013struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3014{
3015	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3016	if (ret) {
3017		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3018		spin_lock_init(&ret->b_uptodate_lock);
3019		preempt_disable();
3020		__this_cpu_inc(bh_accounting.nr);
3021		recalc_bh_state();
3022		preempt_enable();
3023	}
3024	return ret;
3025}
3026EXPORT_SYMBOL(alloc_buffer_head);
3027
3028void free_buffer_head(struct buffer_head *bh)
3029{
3030	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3031	kmem_cache_free(bh_cachep, bh);
3032	preempt_disable();
3033	__this_cpu_dec(bh_accounting.nr);
3034	recalc_bh_state();
3035	preempt_enable();
3036}
3037EXPORT_SYMBOL(free_buffer_head);
3038
3039static int buffer_exit_cpu_dead(unsigned int cpu)
3040{
3041	int i;
3042	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3043
3044	for (i = 0; i < BH_LRU_SIZE; i++) {
3045		brelse(b->bhs[i]);
3046		b->bhs[i] = NULL;
3047	}
3048	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3049	per_cpu(bh_accounting, cpu).nr = 0;
3050	return 0;
3051}
3052
3053/**
3054 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3055 * @bh: struct buffer_head
3056 *
3057 * Return true if the buffer is up-to-date and false,
3058 * with the buffer locked, if not.
3059 */
3060int bh_uptodate_or_lock(struct buffer_head *bh)
3061{
3062	if (!buffer_uptodate(bh)) {
3063		lock_buffer(bh);
3064		if (!buffer_uptodate(bh))
3065			return 0;
3066		unlock_buffer(bh);
3067	}
3068	return 1;
3069}
3070EXPORT_SYMBOL(bh_uptodate_or_lock);
3071
3072/**
3073 * __bh_read - Submit read for a locked buffer
3074 * @bh: struct buffer_head
3075 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3076 * @wait: wait until reading finish
3077 *
3078 * Returns zero on success or don't wait, and -EIO on error.
3079 */
3080int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3081{
3082	int ret = 0;
3083
3084	BUG_ON(!buffer_locked(bh));
3085
3086	get_bh(bh);
3087	bh->b_end_io = end_buffer_read_sync;
3088	submit_bh(REQ_OP_READ | op_flags, bh);
3089	if (wait) {
3090		wait_on_buffer(bh);
3091		if (!buffer_uptodate(bh))
3092			ret = -EIO;
3093	}
3094	return ret;
3095}
3096EXPORT_SYMBOL(__bh_read);
3097
3098/**
3099 * __bh_read_batch - Submit read for a batch of unlocked buffers
3100 * @nr: entry number of the buffer batch
3101 * @bhs: a batch of struct buffer_head
3102 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3103 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3104 *              buffer that cannot lock.
3105 *
3106 * Returns zero on success or don't wait, and -EIO on error.
3107 */
3108void __bh_read_batch(int nr, struct buffer_head *bhs[],
3109		     blk_opf_t op_flags, bool force_lock)
3110{
3111	int i;
3112
3113	for (i = 0; i < nr; i++) {
3114		struct buffer_head *bh = bhs[i];
3115
3116		if (buffer_uptodate(bh))
3117			continue;
3118
3119		if (force_lock)
3120			lock_buffer(bh);
3121		else
3122			if (!trylock_buffer(bh))
3123				continue;
3124
3125		if (buffer_uptodate(bh)) {
3126			unlock_buffer(bh);
3127			continue;
3128		}
3129
3130		bh->b_end_io = end_buffer_read_sync;
3131		get_bh(bh);
3132		submit_bh(REQ_OP_READ | op_flags, bh);
3133	}
3134}
3135EXPORT_SYMBOL(__bh_read_batch);
3136
3137void __init buffer_init(void)
3138{
3139	unsigned long nrpages;
3140	int ret;
3141
3142	bh_cachep = KMEM_CACHE(buffer_head,
3143				SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
 
 
 
 
3144	/*
3145	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3146	 */
3147	nrpages = (nr_free_buffer_pages() * 10) / 100;
3148	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3149	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3150					NULL, buffer_exit_cpu_dead);
3151	WARN_ON(ret < 0);
3152}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/fs/buffer.c
   4 *
   5 *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
   6 */
   7
   8/*
   9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
  10 *
  11 * Removed a lot of unnecessary code and simplified things now that
  12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
  13 *
  14 * Speed up hash, lru, and free list operations.  Use gfp() for allocating
  15 * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
  16 *
  17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
  18 *
  19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
  20 */
  21
  22#include <linux/kernel.h>
  23#include <linux/sched/signal.h>
  24#include <linux/syscalls.h>
  25#include <linux/fs.h>
  26#include <linux/iomap.h>
  27#include <linux/mm.h>
  28#include <linux/percpu.h>
  29#include <linux/slab.h>
  30#include <linux/capability.h>
  31#include <linux/blkdev.h>
  32#include <linux/file.h>
  33#include <linux/quotaops.h>
  34#include <linux/highmem.h>
  35#include <linux/export.h>
  36#include <linux/backing-dev.h>
  37#include <linux/writeback.h>
  38#include <linux/hash.h>
  39#include <linux/suspend.h>
  40#include <linux/buffer_head.h>
  41#include <linux/task_io_accounting_ops.h>
  42#include <linux/bio.h>
  43#include <linux/cpu.h>
  44#include <linux/bitops.h>
  45#include <linux/mpage.h>
  46#include <linux/bit_spinlock.h>
  47#include <linux/pagevec.h>
  48#include <linux/sched/mm.h>
  49#include <trace/events/block.h>
  50#include <linux/fscrypt.h>
 
 
  51
  52#include "internal.h"
  53
  54static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
  55static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
  56			  struct writeback_control *wbc);
  57
  58#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
  59
  60inline void touch_buffer(struct buffer_head *bh)
  61{
  62	trace_block_touch_buffer(bh);
  63	mark_page_accessed(bh->b_page);
  64}
  65EXPORT_SYMBOL(touch_buffer);
  66
  67void __lock_buffer(struct buffer_head *bh)
  68{
  69	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
  70}
  71EXPORT_SYMBOL(__lock_buffer);
  72
  73void unlock_buffer(struct buffer_head *bh)
  74{
  75	clear_bit_unlock(BH_Lock, &bh->b_state);
  76	smp_mb__after_atomic();
  77	wake_up_bit(&bh->b_state, BH_Lock);
  78}
  79EXPORT_SYMBOL(unlock_buffer);
  80
  81/*
  82 * Returns if the folio has dirty or writeback buffers. If all the buffers
  83 * are unlocked and clean then the folio_test_dirty information is stale. If
  84 * any of the buffers are locked, it is assumed they are locked for IO.
  85 */
  86void buffer_check_dirty_writeback(struct folio *folio,
  87				     bool *dirty, bool *writeback)
  88{
  89	struct buffer_head *head, *bh;
  90	*dirty = false;
  91	*writeback = false;
  92
  93	BUG_ON(!folio_test_locked(folio));
  94
  95	head = folio_buffers(folio);
  96	if (!head)
  97		return;
  98
  99	if (folio_test_writeback(folio))
 100		*writeback = true;
 101
 102	bh = head;
 103	do {
 104		if (buffer_locked(bh))
 105			*writeback = true;
 106
 107		if (buffer_dirty(bh))
 108			*dirty = true;
 109
 110		bh = bh->b_this_page;
 111	} while (bh != head);
 112}
 113EXPORT_SYMBOL(buffer_check_dirty_writeback);
 114
 115/*
 116 * Block until a buffer comes unlocked.  This doesn't stop it
 117 * from becoming locked again - you have to lock it yourself
 118 * if you want to preserve its state.
 119 */
 120void __wait_on_buffer(struct buffer_head * bh)
 121{
 122	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
 123}
 124EXPORT_SYMBOL(__wait_on_buffer);
 125
 126static void buffer_io_error(struct buffer_head *bh, char *msg)
 127{
 128	if (!test_bit(BH_Quiet, &bh->b_state))
 129		printk_ratelimited(KERN_ERR
 130			"Buffer I/O error on dev %pg, logical block %llu%s\n",
 131			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
 132}
 133
 134/*
 135 * End-of-IO handler helper function which does not touch the bh after
 136 * unlocking it.
 137 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
 138 * a race there is benign: unlock_buffer() only use the bh's address for
 139 * hashing after unlocking the buffer, so it doesn't actually touch the bh
 140 * itself.
 141 */
 142static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
 143{
 144	if (uptodate) {
 145		set_buffer_uptodate(bh);
 146	} else {
 147		/* This happens, due to failed read-ahead attempts. */
 148		clear_buffer_uptodate(bh);
 149	}
 150	unlock_buffer(bh);
 151}
 152
 153/*
 154 * Default synchronous end-of-IO handler..  Just mark it up-to-date and
 155 * unlock the buffer.
 156 */
 157void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
 158{
 159	__end_buffer_read_notouch(bh, uptodate);
 160	put_bh(bh);
 161}
 162EXPORT_SYMBOL(end_buffer_read_sync);
 163
 164void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 165{
 166	if (uptodate) {
 167		set_buffer_uptodate(bh);
 168	} else {
 169		buffer_io_error(bh, ", lost sync page write");
 170		mark_buffer_write_io_error(bh);
 171		clear_buffer_uptodate(bh);
 172	}
 173	unlock_buffer(bh);
 174	put_bh(bh);
 175}
 176EXPORT_SYMBOL(end_buffer_write_sync);
 177
 178/*
 179 * Various filesystems appear to want __find_get_block to be non-blocking.
 180 * But it's the page lock which protects the buffers.  To get around this,
 181 * we get exclusion from try_to_free_buffers with the blockdev mapping's
 182 * private_lock.
 183 *
 184 * Hack idea: for the blockdev mapping, private_lock contention
 185 * may be quite high.  This code could TryLock the page, and if that
 186 * succeeds, there is no need to take private_lock.
 187 */
 188static struct buffer_head *
 189__find_get_block_slow(struct block_device *bdev, sector_t block)
 190{
 191	struct inode *bd_inode = bdev->bd_inode;
 192	struct address_space *bd_mapping = bd_inode->i_mapping;
 193	struct buffer_head *ret = NULL;
 194	pgoff_t index;
 195	struct buffer_head *bh;
 196	struct buffer_head *head;
 197	struct page *page;
 198	int all_mapped = 1;
 199	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
 200
 201	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
 202	page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
 203	if (!page)
 204		goto out;
 205
 206	spin_lock(&bd_mapping->private_lock);
 207	if (!page_has_buffers(page))
 
 208		goto out_unlock;
 209	head = page_buffers(page);
 210	bh = head;
 211	do {
 212		if (!buffer_mapped(bh))
 213			all_mapped = 0;
 214		else if (bh->b_blocknr == block) {
 215			ret = bh;
 216			get_bh(bh);
 217			goto out_unlock;
 218		}
 219		bh = bh->b_this_page;
 220	} while (bh != head);
 221
 222	/* we might be here because some of the buffers on this page are
 223	 * not mapped.  This is due to various races between
 224	 * file io on the block device and getblk.  It gets dealt with
 225	 * elsewhere, don't buffer_error if we had some unmapped buffers
 226	 */
 227	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
 228	if (all_mapped && __ratelimit(&last_warned)) {
 229		printk("__find_get_block_slow() failed. block=%llu, "
 230		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
 231		       "device %pg blocksize: %d\n",
 232		       (unsigned long long)block,
 233		       (unsigned long long)bh->b_blocknr,
 234		       bh->b_state, bh->b_size, bdev,
 235		       1 << bd_inode->i_blkbits);
 236	}
 237out_unlock:
 238	spin_unlock(&bd_mapping->private_lock);
 239	put_page(page);
 240out:
 241	return ret;
 242}
 243
 244static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
 245{
 246	unsigned long flags;
 247	struct buffer_head *first;
 248	struct buffer_head *tmp;
 249	struct page *page;
 250	int page_uptodate = 1;
 251
 252	BUG_ON(!buffer_async_read(bh));
 253
 254	page = bh->b_page;
 255	if (uptodate) {
 256		set_buffer_uptodate(bh);
 257	} else {
 258		clear_buffer_uptodate(bh);
 259		buffer_io_error(bh, ", async page read");
 260		SetPageError(page);
 261	}
 262
 263	/*
 264	 * Be _very_ careful from here on. Bad things can happen if
 265	 * two buffer heads end IO at almost the same time and both
 266	 * decide that the page is now completely done.
 267	 */
 268	first = page_buffers(page);
 269	spin_lock_irqsave(&first->b_uptodate_lock, flags);
 270	clear_buffer_async_read(bh);
 271	unlock_buffer(bh);
 272	tmp = bh;
 273	do {
 274		if (!buffer_uptodate(tmp))
 275			page_uptodate = 0;
 276		if (buffer_async_read(tmp)) {
 277			BUG_ON(!buffer_locked(tmp));
 278			goto still_busy;
 279		}
 280		tmp = tmp->b_this_page;
 281	} while (tmp != bh);
 282	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
 283
 284	/*
 285	 * If all of the buffers are uptodate then we can set the page
 286	 * uptodate.
 287	 */
 288	if (page_uptodate)
 289		SetPageUptodate(page);
 290	unlock_page(page);
 291	return;
 292
 293still_busy:
 294	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
 295	return;
 296}
 297
 298struct decrypt_bh_ctx {
 299	struct work_struct work;
 300	struct buffer_head *bh;
 301};
 302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 303static void decrypt_bh(struct work_struct *work)
 304{
 305	struct decrypt_bh_ctx *ctx =
 306		container_of(work, struct decrypt_bh_ctx, work);
 307	struct buffer_head *bh = ctx->bh;
 308	int err;
 309
 310	err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size,
 311					       bh_offset(bh));
 
 
 
 
 
 
 
 
 
 
 312	end_buffer_async_read(bh, err == 0);
 313	kfree(ctx);
 314}
 315
 316/*
 317 * I/O completion handler for block_read_full_folio() - pages
 318 * which come unlocked at the end of I/O.
 319 */
 320static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
 321{
 322	/* Decrypt if needed */
 323	if (uptodate &&
 324	    fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) {
 325		struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
 
 
 
 
 326
 327		if (ctx) {
 328			INIT_WORK(&ctx->work, decrypt_bh);
 329			ctx->bh = bh;
 330			fscrypt_enqueue_decrypt_work(&ctx->work);
 
 
 
 
 
 
 331			return;
 332		}
 333		uptodate = 0;
 334	}
 335	end_buffer_async_read(bh, uptodate);
 336}
 337
 338/*
 339 * Completion handler for block_write_full_page() - pages which are unlocked
 340 * during I/O, and which have PageWriteback cleared upon I/O completion.
 341 */
 342void end_buffer_async_write(struct buffer_head *bh, int uptodate)
 343{
 344	unsigned long flags;
 345	struct buffer_head *first;
 346	struct buffer_head *tmp;
 347	struct page *page;
 348
 349	BUG_ON(!buffer_async_write(bh));
 350
 351	page = bh->b_page;
 352	if (uptodate) {
 353		set_buffer_uptodate(bh);
 354	} else {
 355		buffer_io_error(bh, ", lost async page write");
 356		mark_buffer_write_io_error(bh);
 357		clear_buffer_uptodate(bh);
 358		SetPageError(page);
 359	}
 360
 361	first = page_buffers(page);
 362	spin_lock_irqsave(&first->b_uptodate_lock, flags);
 363
 364	clear_buffer_async_write(bh);
 365	unlock_buffer(bh);
 366	tmp = bh->b_this_page;
 367	while (tmp != bh) {
 368		if (buffer_async_write(tmp)) {
 369			BUG_ON(!buffer_locked(tmp));
 370			goto still_busy;
 371		}
 372		tmp = tmp->b_this_page;
 373	}
 374	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
 375	end_page_writeback(page);
 376	return;
 377
 378still_busy:
 379	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
 380	return;
 381}
 382EXPORT_SYMBOL(end_buffer_async_write);
 383
 384/*
 385 * If a page's buffers are under async readin (end_buffer_async_read
 386 * completion) then there is a possibility that another thread of
 387 * control could lock one of the buffers after it has completed
 388 * but while some of the other buffers have not completed.  This
 389 * locked buffer would confuse end_buffer_async_read() into not unlocking
 390 * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
 391 * that this buffer is not under async I/O.
 392 *
 393 * The page comes unlocked when it has no locked buffer_async buffers
 394 * left.
 395 *
 396 * PageLocked prevents anyone starting new async I/O reads any of
 397 * the buffers.
 398 *
 399 * PageWriteback is used to prevent simultaneous writeout of the same
 400 * page.
 401 *
 402 * PageLocked prevents anyone from starting writeback of a page which is
 403 * under read I/O (PageWriteback is only ever set against a locked page).
 404 */
 405static void mark_buffer_async_read(struct buffer_head *bh)
 406{
 407	bh->b_end_io = end_buffer_async_read_io;
 408	set_buffer_async_read(bh);
 409}
 410
 411static void mark_buffer_async_write_endio(struct buffer_head *bh,
 412					  bh_end_io_t *handler)
 413{
 414	bh->b_end_io = handler;
 415	set_buffer_async_write(bh);
 416}
 417
 418void mark_buffer_async_write(struct buffer_head *bh)
 419{
 420	mark_buffer_async_write_endio(bh, end_buffer_async_write);
 421}
 422EXPORT_SYMBOL(mark_buffer_async_write);
 423
 424
 425/*
 426 * fs/buffer.c contains helper functions for buffer-backed address space's
 427 * fsync functions.  A common requirement for buffer-based filesystems is
 428 * that certain data from the backing blockdev needs to be written out for
 429 * a successful fsync().  For example, ext2 indirect blocks need to be
 430 * written back and waited upon before fsync() returns.
 431 *
 432 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
 433 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
 434 * management of a list of dependent buffers at ->i_mapping->private_list.
 435 *
 436 * Locking is a little subtle: try_to_free_buffers() will remove buffers
 437 * from their controlling inode's queue when they are being freed.  But
 438 * try_to_free_buffers() will be operating against the *blockdev* mapping
 439 * at the time, not against the S_ISREG file which depends on those buffers.
 440 * So the locking for private_list is via the private_lock in the address_space
 441 * which backs the buffers.  Which is different from the address_space 
 442 * against which the buffers are listed.  So for a particular address_space,
 443 * mapping->private_lock does *not* protect mapping->private_list!  In fact,
 444 * mapping->private_list will always be protected by the backing blockdev's
 445 * ->private_lock.
 446 *
 447 * Which introduces a requirement: all buffers on an address_space's
 448 * ->private_list must be from the same address_space: the blockdev's.
 449 *
 450 * address_spaces which do not place buffers at ->private_list via these
 451 * utility functions are free to use private_lock and private_list for
 452 * whatever they want.  The only requirement is that list_empty(private_list)
 453 * be true at clear_inode() time.
 454 *
 455 * FIXME: clear_inode should not call invalidate_inode_buffers().  The
 456 * filesystems should do that.  invalidate_inode_buffers() should just go
 457 * BUG_ON(!list_empty).
 458 *
 459 * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
 460 * take an address_space, not an inode.  And it should be called
 461 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
 462 * queued up.
 463 *
 464 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
 465 * list if it is already on a list.  Because if the buffer is on a list,
 466 * it *must* already be on the right one.  If not, the filesystem is being
 467 * silly.  This will save a ton of locking.  But first we have to ensure
 468 * that buffers are taken *off* the old inode's list when they are freed
 469 * (presumably in truncate).  That requires careful auditing of all
 470 * filesystems (do it inside bforget()).  It could also be done by bringing
 471 * b_inode back.
 472 */
 473
 474/*
 475 * The buffer's backing address_space's private_lock must be held
 476 */
 477static void __remove_assoc_queue(struct buffer_head *bh)
 478{
 479	list_del_init(&bh->b_assoc_buffers);
 480	WARN_ON(!bh->b_assoc_map);
 481	bh->b_assoc_map = NULL;
 482}
 483
 484int inode_has_buffers(struct inode *inode)
 485{
 486	return !list_empty(&inode->i_data.private_list);
 487}
 488
 489/*
 490 * osync is designed to support O_SYNC io.  It waits synchronously for
 491 * all already-submitted IO to complete, but does not queue any new
 492 * writes to the disk.
 493 *
 494 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
 495 * as you dirty the buffers, and then use osync_inode_buffers to wait for
 496 * completion.  Any other dirty buffers which are not yet queued for
 497 * write will not be flushed to disk by the osync.
 498 */
 499static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
 500{
 501	struct buffer_head *bh;
 502	struct list_head *p;
 503	int err = 0;
 504
 505	spin_lock(lock);
 506repeat:
 507	list_for_each_prev(p, list) {
 508		bh = BH_ENTRY(p);
 509		if (buffer_locked(bh)) {
 510			get_bh(bh);
 511			spin_unlock(lock);
 512			wait_on_buffer(bh);
 513			if (!buffer_uptodate(bh))
 514				err = -EIO;
 515			brelse(bh);
 516			spin_lock(lock);
 517			goto repeat;
 518		}
 519	}
 520	spin_unlock(lock);
 521	return err;
 522}
 523
 524void emergency_thaw_bdev(struct super_block *sb)
 525{
 526	while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
 527		printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
 528}
 529
 530/**
 531 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
 532 * @mapping: the mapping which wants those buffers written
 533 *
 534 * Starts I/O against the buffers at mapping->private_list, and waits upon
 535 * that I/O.
 536 *
 537 * Basically, this is a convenience function for fsync().
 538 * @mapping is a file or directory which needs those buffers to be written for
 539 * a successful fsync().
 540 */
 541int sync_mapping_buffers(struct address_space *mapping)
 542{
 543	struct address_space *buffer_mapping = mapping->private_data;
 544
 545	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
 546		return 0;
 547
 548	return fsync_buffers_list(&buffer_mapping->private_lock,
 549					&mapping->private_list);
 550}
 551EXPORT_SYMBOL(sync_mapping_buffers);
 552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 553/*
 554 * Called when we've recently written block `bblock', and it is known that
 555 * `bblock' was for a buffer_boundary() buffer.  This means that the block at
 556 * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
 557 * dirty, schedule it for IO.  So that indirects merge nicely with their data.
 558 */
 559void write_boundary_block(struct block_device *bdev,
 560			sector_t bblock, unsigned blocksize)
 561{
 562	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
 563	if (bh) {
 564		if (buffer_dirty(bh))
 565			write_dirty_buffer(bh, 0);
 566		put_bh(bh);
 567	}
 568}
 569
 570void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
 571{
 572	struct address_space *mapping = inode->i_mapping;
 573	struct address_space *buffer_mapping = bh->b_page->mapping;
 574
 575	mark_buffer_dirty(bh);
 576	if (!mapping->private_data) {
 577		mapping->private_data = buffer_mapping;
 578	} else {
 579		BUG_ON(mapping->private_data != buffer_mapping);
 580	}
 581	if (!bh->b_assoc_map) {
 582		spin_lock(&buffer_mapping->private_lock);
 583		list_move_tail(&bh->b_assoc_buffers,
 584				&mapping->private_list);
 585		bh->b_assoc_map = mapping;
 586		spin_unlock(&buffer_mapping->private_lock);
 587	}
 588}
 589EXPORT_SYMBOL(mark_buffer_dirty_inode);
 590
 591/*
 592 * Add a page to the dirty page list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593 *
 594 * It is a sad fact of life that this function is called from several places
 595 * deeply under spinlocking.  It may not sleep.
 596 *
 597 * If the page has buffers, the uptodate buffers are set dirty, to preserve
 598 * dirty-state coherency between the page and the buffers.  It the page does
 599 * not have buffers then when they are later attached they will all be set
 600 * dirty.
 601 *
 602 * The buffers are dirtied before the page is dirtied.  There's a small race
 603 * window in which a writepage caller may see the page cleanness but not the
 604 * buffer dirtiness.  That's fine.  If this code were to set the page dirty
 605 * before the buffers, a concurrent writepage caller could clear the page dirty
 606 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
 607 * page on the dirty page list.
 608 *
 609 * We use private_lock to lock against try_to_free_buffers while using the
 610 * page's buffer list.  Also use this to protect against clean buffers being
 611 * added to the page after it was set dirty.
 612 *
 613 * FIXME: may need to call ->reservepage here as well.  That's rather up to the
 614 * address_space though.
 615 */
 616bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
 617{
 618	struct buffer_head *head;
 619	bool newly_dirty;
 620
 621	spin_lock(&mapping->private_lock);
 622	head = folio_buffers(folio);
 623	if (head) {
 624		struct buffer_head *bh = head;
 625
 626		do {
 627			set_buffer_dirty(bh);
 628			bh = bh->b_this_page;
 629		} while (bh != head);
 630	}
 631	/*
 632	 * Lock out page's memcg migration to keep PageDirty
 633	 * synchronized with per-memcg dirty page counters.
 634	 */
 635	folio_memcg_lock(folio);
 636	newly_dirty = !folio_test_set_dirty(folio);
 637	spin_unlock(&mapping->private_lock);
 638
 639	if (newly_dirty)
 640		__folio_mark_dirty(folio, mapping, 1);
 641
 642	folio_memcg_unlock(folio);
 643
 644	if (newly_dirty)
 645		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 646
 647	return newly_dirty;
 648}
 649EXPORT_SYMBOL(block_dirty_folio);
 650
 651/*
 652 * Write out and wait upon a list of buffers.
 653 *
 654 * We have conflicting pressures: we want to make sure that all
 655 * initially dirty buffers get waited on, but that any subsequently
 656 * dirtied buffers don't.  After all, we don't want fsync to last
 657 * forever if somebody is actively writing to the file.
 658 *
 659 * Do this in two main stages: first we copy dirty buffers to a
 660 * temporary inode list, queueing the writes as we go.  Then we clean
 661 * up, waiting for those writes to complete.
 662 * 
 663 * During this second stage, any subsequent updates to the file may end
 664 * up refiling the buffer on the original inode's dirty list again, so
 665 * there is a chance we will end up with a buffer queued for write but
 666 * not yet completed on that list.  So, as a final cleanup we go through
 667 * the osync code to catch these locked, dirty buffers without requeuing
 668 * any newly dirty buffers for write.
 669 */
 670static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 671{
 672	struct buffer_head *bh;
 673	struct list_head tmp;
 674	struct address_space *mapping;
 675	int err = 0, err2;
 676	struct blk_plug plug;
 
 677
 678	INIT_LIST_HEAD(&tmp);
 679	blk_start_plug(&plug);
 680
 681	spin_lock(lock);
 682	while (!list_empty(list)) {
 683		bh = BH_ENTRY(list->next);
 684		mapping = bh->b_assoc_map;
 685		__remove_assoc_queue(bh);
 686		/* Avoid race with mark_buffer_dirty_inode() which does
 687		 * a lockless check and we rely on seeing the dirty bit */
 688		smp_mb();
 689		if (buffer_dirty(bh) || buffer_locked(bh)) {
 690			list_add(&bh->b_assoc_buffers, &tmp);
 691			bh->b_assoc_map = mapping;
 692			if (buffer_dirty(bh)) {
 693				get_bh(bh);
 694				spin_unlock(lock);
 695				/*
 696				 * Ensure any pending I/O completes so that
 697				 * write_dirty_buffer() actually writes the
 698				 * current contents - it is a noop if I/O is
 699				 * still in flight on potentially older
 700				 * contents.
 701				 */
 702				write_dirty_buffer(bh, REQ_SYNC);
 703
 704				/*
 705				 * Kick off IO for the previous mapping. Note
 706				 * that we will not run the very last mapping,
 707				 * wait_on_buffer() will do that for us
 708				 * through sync_buffer().
 709				 */
 710				brelse(bh);
 711				spin_lock(lock);
 712			}
 713		}
 714	}
 715
 716	spin_unlock(lock);
 717	blk_finish_plug(&plug);
 718	spin_lock(lock);
 719
 720	while (!list_empty(&tmp)) {
 721		bh = BH_ENTRY(tmp.prev);
 722		get_bh(bh);
 723		mapping = bh->b_assoc_map;
 724		__remove_assoc_queue(bh);
 725		/* Avoid race with mark_buffer_dirty_inode() which does
 726		 * a lockless check and we rely on seeing the dirty bit */
 727		smp_mb();
 728		if (buffer_dirty(bh)) {
 729			list_add(&bh->b_assoc_buffers,
 730				 &mapping->private_list);
 731			bh->b_assoc_map = mapping;
 732		}
 733		spin_unlock(lock);
 734		wait_on_buffer(bh);
 735		if (!buffer_uptodate(bh))
 736			err = -EIO;
 737		brelse(bh);
 738		spin_lock(lock);
 739	}
 740	
 741	spin_unlock(lock);
 742	err2 = osync_buffers_list(lock, list);
 743	if (err)
 744		return err;
 745	else
 746		return err2;
 747}
 748
 749/*
 750 * Invalidate any and all dirty buffers on a given inode.  We are
 751 * probably unmounting the fs, but that doesn't mean we have already
 752 * done a sync().  Just drop the buffers from the inode list.
 753 *
 754 * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
 755 * assumes that all the buffers are against the blockdev.  Not true
 756 * for reiserfs.
 757 */
 758void invalidate_inode_buffers(struct inode *inode)
 759{
 760	if (inode_has_buffers(inode)) {
 761		struct address_space *mapping = &inode->i_data;
 762		struct list_head *list = &mapping->private_list;
 763		struct address_space *buffer_mapping = mapping->private_data;
 764
 765		spin_lock(&buffer_mapping->private_lock);
 766		while (!list_empty(list))
 767			__remove_assoc_queue(BH_ENTRY(list->next));
 768		spin_unlock(&buffer_mapping->private_lock);
 769	}
 770}
 771EXPORT_SYMBOL(invalidate_inode_buffers);
 772
 773/*
 774 * Remove any clean buffers from the inode's buffer list.  This is called
 775 * when we're trying to free the inode itself.  Those buffers can pin it.
 776 *
 777 * Returns true if all buffers were removed.
 778 */
 779int remove_inode_buffers(struct inode *inode)
 780{
 781	int ret = 1;
 782
 783	if (inode_has_buffers(inode)) {
 784		struct address_space *mapping = &inode->i_data;
 785		struct list_head *list = &mapping->private_list;
 786		struct address_space *buffer_mapping = mapping->private_data;
 787
 788		spin_lock(&buffer_mapping->private_lock);
 789		while (!list_empty(list)) {
 790			struct buffer_head *bh = BH_ENTRY(list->next);
 791			if (buffer_dirty(bh)) {
 792				ret = 0;
 793				break;
 794			}
 795			__remove_assoc_queue(bh);
 796		}
 797		spin_unlock(&buffer_mapping->private_lock);
 798	}
 799	return ret;
 800}
 801
 802/*
 803 * Create the appropriate buffers when given a page for data area and
 804 * the size of each buffer.. Use the bh->b_this_page linked list to
 805 * follow the buffers created.  Return NULL if unable to create more
 806 * buffers.
 807 *
 808 * The retry flag is used to differentiate async IO (paging, swapping)
 809 * which may not fail from ordinary buffer allocations.
 810 */
 811struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
 812		bool retry)
 813{
 814	struct buffer_head *bh, *head;
 815	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
 816	long offset;
 817	struct mem_cgroup *memcg, *old_memcg;
 818
 819	if (retry)
 820		gfp |= __GFP_NOFAIL;
 821
 822	/* The page lock pins the memcg */
 823	memcg = page_memcg(page);
 824	old_memcg = set_active_memcg(memcg);
 825
 826	head = NULL;
 827	offset = PAGE_SIZE;
 828	while ((offset -= size) >= 0) {
 829		bh = alloc_buffer_head(gfp);
 830		if (!bh)
 831			goto no_grow;
 832
 833		bh->b_this_page = head;
 834		bh->b_blocknr = -1;
 835		head = bh;
 836
 837		bh->b_size = size;
 838
 839		/* Link the buffer to its page */
 840		set_bh_page(bh, page, offset);
 841	}
 842out:
 843	set_active_memcg(old_memcg);
 844	return head;
 845/*
 846 * In case anything failed, we just free everything we got.
 847 */
 848no_grow:
 849	if (head) {
 850		do {
 851			bh = head;
 852			head = head->b_this_page;
 853			free_buffer_head(bh);
 854		} while (head);
 855	}
 856
 857	goto out;
 858}
 
 
 
 
 
 
 
 
 859EXPORT_SYMBOL_GPL(alloc_page_buffers);
 860
 861static inline void
 862link_dev_buffers(struct page *page, struct buffer_head *head)
 863{
 864	struct buffer_head *bh, *tail;
 865
 866	bh = head;
 867	do {
 868		tail = bh;
 869		bh = bh->b_this_page;
 870	} while (bh);
 871	tail->b_this_page = head;
 872	attach_page_private(page, head);
 873}
 874
 875static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
 876{
 877	sector_t retval = ~((sector_t)0);
 878	loff_t sz = bdev_nr_bytes(bdev);
 879
 880	if (sz) {
 881		unsigned int sizebits = blksize_bits(size);
 882		retval = (sz >> sizebits);
 883	}
 884	return retval;
 885}
 886
 887/*
 888 * Initialise the state of a blockdev page's buffers.
 889 */ 
 890static sector_t
 891init_page_buffers(struct page *page, struct block_device *bdev,
 892			sector_t block, int size)
 893{
 894	struct buffer_head *head = page_buffers(page);
 895	struct buffer_head *bh = head;
 896	int uptodate = PageUptodate(page);
 
 897	sector_t end_block = blkdev_max_block(bdev, size);
 898
 899	do {
 900		if (!buffer_mapped(bh)) {
 901			bh->b_end_io = NULL;
 902			bh->b_private = NULL;
 903			bh->b_bdev = bdev;
 904			bh->b_blocknr = block;
 905			if (uptodate)
 906				set_buffer_uptodate(bh);
 907			if (block < end_block)
 908				set_buffer_mapped(bh);
 909		}
 910		block++;
 911		bh = bh->b_this_page;
 912	} while (bh != head);
 913
 914	/*
 915	 * Caller needs to validate requested block against end of device.
 916	 */
 917	return end_block;
 918}
 919
 920/*
 921 * Create the page-cache page that contains the requested block.
 922 *
 923 * This is used purely for blockdev mappings.
 
 
 
 924 */
 925static int
 926grow_dev_page(struct block_device *bdev, sector_t block,
 927	      pgoff_t index, int size, int sizebits, gfp_t gfp)
 928{
 929	struct inode *inode = bdev->bd_inode;
 930	struct page *page;
 931	struct buffer_head *bh;
 932	sector_t end_block;
 933	int ret = 0;
 934	gfp_t gfp_mask;
 935
 936	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
 
 
 
 937
 938	/*
 939	 * XXX: __getblk_slow() can not really deal with failure and
 940	 * will endlessly loop on improvised global reclaim.  Prefer
 941	 * looping in the allocator rather than here, at least that
 942	 * code knows what it's doing.
 943	 */
 944	gfp_mask |= __GFP_NOFAIL;
 945
 946	page = find_or_create_page(inode->i_mapping, index, gfp_mask);
 947
 948	BUG_ON(!PageLocked(page));
 949
 950	if (page_has_buffers(page)) {
 951		bh = page_buffers(page);
 952		if (bh->b_size == size) {
 953			end_block = init_page_buffers(page, bdev,
 954						(sector_t)index << sizebits,
 955						size);
 956			goto done;
 957		}
 958		if (!try_to_free_buffers(page_folio(page)))
 959			goto failed;
 960	}
 961
 962	/*
 963	 * Allocate some buffers for this page
 964	 */
 965	bh = alloc_page_buffers(page, size, true);
 966
 967	/*
 968	 * Link the page to the buffers and initialise them.  Take the
 969	 * lock to be atomic wrt __find_get_block(), which does not
 970	 * run under the page lock.
 971	 */
 972	spin_lock(&inode->i_mapping->private_lock);
 973	link_dev_buffers(page, bh);
 974	end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
 975			size);
 976	spin_unlock(&inode->i_mapping->private_lock);
 977done:
 978	ret = (block < end_block) ? 1 : -ENXIO;
 979failed:
 980	unlock_page(page);
 981	put_page(page);
 982	return ret;
 983}
 984
 985/*
 986 * Create buffers for the specified block device block's page.  If
 987 * that page was dirty, the buffers are set dirty also.
 
 988 */
 989static int
 990grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
 991{
 992	pgoff_t index;
 993	int sizebits;
 994
 995	sizebits = PAGE_SHIFT - __ffs(size);
 996	index = block >> sizebits;
 997
 998	/*
 999	 * Check for a block which wants to lie outside our maximum possible
1000	 * pagecache index.  (this comparison is done using sector_t types).
1001	 */
1002	if (unlikely(index != block >> sizebits)) {
1003		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1004			"device %pg\n",
1005			__func__, (unsigned long long)block,
1006			bdev);
1007		return -EIO;
1008	}
1009
1010	/* Create a page with the proper size buffers.. */
1011	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1012}
1013
1014static struct buffer_head *
1015__getblk_slow(struct block_device *bdev, sector_t block,
1016	     unsigned size, gfp_t gfp)
1017{
1018	/* Size must be multiple of hard sectorsize */
1019	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1020			(size < 512 || size > PAGE_SIZE))) {
1021		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1022					size);
1023		printk(KERN_ERR "logical block size: %d\n",
1024					bdev_logical_block_size(bdev));
1025
1026		dump_stack();
1027		return NULL;
1028	}
1029
1030	for (;;) {
1031		struct buffer_head *bh;
1032		int ret;
1033
1034		bh = __find_get_block(bdev, block, size);
1035		if (bh)
1036			return bh;
1037
1038		ret = grow_buffers(bdev, block, size, gfp);
1039		if (ret < 0)
1040			return NULL;
1041	}
1042}
1043
1044/*
1045 * The relationship between dirty buffers and dirty pages:
1046 *
1047 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1048 * the page is tagged dirty in the page cache.
1049 *
1050 * At all times, the dirtiness of the buffers represents the dirtiness of
1051 * subsections of the page.  If the page has buffers, the page dirty bit is
1052 * merely a hint about the true dirty state.
1053 *
1054 * When a page is set dirty in its entirety, all its buffers are marked dirty
1055 * (if the page has buffers).
1056 *
1057 * When a buffer is marked dirty, its page is dirtied, but the page's other
1058 * buffers are not.
1059 *
1060 * Also.  When blockdev buffers are explicitly read with bread(), they
1061 * individually become uptodate.  But their backing page remains not
1062 * uptodate - even if all of its buffers are uptodate.  A subsequent
1063 * block_read_full_folio() against that folio will discover all the uptodate
1064 * buffers, will set the folio uptodate and will perform no I/O.
1065 */
1066
1067/**
1068 * mark_buffer_dirty - mark a buffer_head as needing writeout
1069 * @bh: the buffer_head to mark dirty
1070 *
1071 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1072 * its backing page dirty, then tag the page as dirty in the page cache
1073 * and then attach the address_space's inode to its superblock's dirty
1074 * inode list.
1075 *
1076 * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1077 * i_pages lock and mapping->host->i_lock.
1078 */
1079void mark_buffer_dirty(struct buffer_head *bh)
1080{
1081	WARN_ON_ONCE(!buffer_uptodate(bh));
1082
1083	trace_block_dirty_buffer(bh);
1084
1085	/*
1086	 * Very *carefully* optimize the it-is-already-dirty case.
1087	 *
1088	 * Don't let the final "is it dirty" escape to before we
1089	 * perhaps modified the buffer.
1090	 */
1091	if (buffer_dirty(bh)) {
1092		smp_mb();
1093		if (buffer_dirty(bh))
1094			return;
1095	}
1096
1097	if (!test_set_buffer_dirty(bh)) {
1098		struct page *page = bh->b_page;
1099		struct address_space *mapping = NULL;
1100
1101		lock_page_memcg(page);
1102		if (!TestSetPageDirty(page)) {
1103			mapping = page_mapping(page);
1104			if (mapping)
1105				__set_page_dirty(page, mapping, 0);
1106		}
1107		unlock_page_memcg(page);
1108		if (mapping)
1109			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1110	}
1111}
1112EXPORT_SYMBOL(mark_buffer_dirty);
1113
1114void mark_buffer_write_io_error(struct buffer_head *bh)
1115{
1116	struct super_block *sb;
1117
1118	set_buffer_write_io_error(bh);
1119	/* FIXME: do we need to set this in both places? */
1120	if (bh->b_page && bh->b_page->mapping)
1121		mapping_set_error(bh->b_page->mapping, -EIO);
1122	if (bh->b_assoc_map)
1123		mapping_set_error(bh->b_assoc_map, -EIO);
1124	rcu_read_lock();
1125	sb = READ_ONCE(bh->b_bdev->bd_super);
1126	if (sb)
1127		errseq_set(&sb->s_wb_err, -EIO);
1128	rcu_read_unlock();
1129}
1130EXPORT_SYMBOL(mark_buffer_write_io_error);
1131
1132/*
1133 * Decrement a buffer_head's reference count.  If all buffers against a page
1134 * have zero reference count, are clean and unlocked, and if the page is clean
1135 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1136 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1137 * a page but it ends up not being freed, and buffers may later be reattached).
1138 */
1139void __brelse(struct buffer_head * buf)
1140{
1141	if (atomic_read(&buf->b_count)) {
1142		put_bh(buf);
1143		return;
1144	}
1145	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1146}
1147EXPORT_SYMBOL(__brelse);
1148
1149/*
1150 * bforget() is like brelse(), except it discards any
1151 * potentially dirty data.
 
 
 
1152 */
1153void __bforget(struct buffer_head *bh)
1154{
1155	clear_buffer_dirty(bh);
1156	if (bh->b_assoc_map) {
1157		struct address_space *buffer_mapping = bh->b_page->mapping;
1158
1159		spin_lock(&buffer_mapping->private_lock);
1160		list_del_init(&bh->b_assoc_buffers);
1161		bh->b_assoc_map = NULL;
1162		spin_unlock(&buffer_mapping->private_lock);
1163	}
1164	__brelse(bh);
1165}
1166EXPORT_SYMBOL(__bforget);
1167
1168static struct buffer_head *__bread_slow(struct buffer_head *bh)
1169{
1170	lock_buffer(bh);
1171	if (buffer_uptodate(bh)) {
1172		unlock_buffer(bh);
1173		return bh;
1174	} else {
1175		get_bh(bh);
1176		bh->b_end_io = end_buffer_read_sync;
1177		submit_bh(REQ_OP_READ, bh);
1178		wait_on_buffer(bh);
1179		if (buffer_uptodate(bh))
1180			return bh;
1181	}
1182	brelse(bh);
1183	return NULL;
1184}
1185
1186/*
1187 * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1188 * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1189 * refcount elevated by one when they're in an LRU.  A buffer can only appear
1190 * once in a particular CPU's LRU.  A single buffer can be present in multiple
1191 * CPU's LRUs at the same time.
1192 *
1193 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1194 * sb_find_get_block().
1195 *
1196 * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1197 * a local interrupt disable for that.
1198 */
1199
1200#define BH_LRU_SIZE	16
1201
1202struct bh_lru {
1203	struct buffer_head *bhs[BH_LRU_SIZE];
1204};
1205
1206static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1207
1208#ifdef CONFIG_SMP
1209#define bh_lru_lock()	local_irq_disable()
1210#define bh_lru_unlock()	local_irq_enable()
1211#else
1212#define bh_lru_lock()	preempt_disable()
1213#define bh_lru_unlock()	preempt_enable()
1214#endif
1215
1216static inline void check_irqs_on(void)
1217{
1218#ifdef irqs_disabled
1219	BUG_ON(irqs_disabled());
1220#endif
1221}
1222
1223/*
1224 * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1225 * inserted at the front, and the buffer_head at the back if any is evicted.
1226 * Or, if already in the LRU it is moved to the front.
1227 */
1228static void bh_lru_install(struct buffer_head *bh)
1229{
1230	struct buffer_head *evictee = bh;
1231	struct bh_lru *b;
1232	int i;
1233
1234	check_irqs_on();
1235	bh_lru_lock();
1236
1237	/*
1238	 * the refcount of buffer_head in bh_lru prevents dropping the
1239	 * attached page(i.e., try_to_free_buffers) so it could cause
1240	 * failing page migration.
1241	 * Skip putting upcoming bh into bh_lru until migration is done.
1242	 */
1243	if (lru_cache_disabled()) {
1244		bh_lru_unlock();
1245		return;
1246	}
1247
1248	b = this_cpu_ptr(&bh_lrus);
1249	for (i = 0; i < BH_LRU_SIZE; i++) {
1250		swap(evictee, b->bhs[i]);
1251		if (evictee == bh) {
1252			bh_lru_unlock();
1253			return;
1254		}
1255	}
1256
1257	get_bh(bh);
1258	bh_lru_unlock();
1259	brelse(evictee);
1260}
1261
1262/*
1263 * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1264 */
1265static struct buffer_head *
1266lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1267{
1268	struct buffer_head *ret = NULL;
1269	unsigned int i;
1270
1271	check_irqs_on();
1272	bh_lru_lock();
 
 
 
 
1273	for (i = 0; i < BH_LRU_SIZE; i++) {
1274		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1275
1276		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1277		    bh->b_size == size) {
1278			if (i) {
1279				while (i) {
1280					__this_cpu_write(bh_lrus.bhs[i],
1281						__this_cpu_read(bh_lrus.bhs[i - 1]));
1282					i--;
1283				}
1284				__this_cpu_write(bh_lrus.bhs[0], bh);
1285			}
1286			get_bh(bh);
1287			ret = bh;
1288			break;
1289		}
1290	}
1291	bh_lru_unlock();
1292	return ret;
1293}
1294
1295/*
1296 * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1297 * it in the LRU and mark it as accessed.  If it is not present then return
1298 * NULL
1299 */
1300struct buffer_head *
1301__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1302{
1303	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1304
1305	if (bh == NULL) {
1306		/* __find_get_block_slow will mark the page accessed */
1307		bh = __find_get_block_slow(bdev, block);
1308		if (bh)
1309			bh_lru_install(bh);
1310	} else
1311		touch_buffer(bh);
1312
1313	return bh;
1314}
1315EXPORT_SYMBOL(__find_get_block);
1316
1317/*
1318 * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1319 * which corresponds to the passed block_device, block and size. The
1320 * returned buffer has its reference count incremented.
 
 
 
 
 
 
 
1321 *
1322 * __getblk_gfp() will lock up the machine if grow_dev_page's
1323 * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
1324 */
1325struct buffer_head *
1326__getblk_gfp(struct block_device *bdev, sector_t block,
1327	     unsigned size, gfp_t gfp)
1328{
1329	struct buffer_head *bh = __find_get_block(bdev, block, size);
1330
1331	might_sleep();
1332	if (bh == NULL)
1333		bh = __getblk_slow(bdev, block, size, gfp);
1334	return bh;
 
1335}
1336EXPORT_SYMBOL(__getblk_gfp);
1337
1338/*
1339 * Do async read-ahead on a buffer..
1340 */
1341void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1342{
1343	struct buffer_head *bh = __getblk(bdev, block, size);
 
 
1344	if (likely(bh)) {
1345		bh_readahead(bh, REQ_RAHEAD);
1346		brelse(bh);
1347	}
1348}
1349EXPORT_SYMBOL(__breadahead);
1350
1351/**
1352 *  __bread_gfp() - reads a specified block and returns the bh
1353 *  @bdev: the block_device to read from
1354 *  @block: number of block
1355 *  @size: size (in bytes) to read
1356 *  @gfp: page allocation flag
1357 *
1358 *  Reads a specified block, and returns buffer head that contains it.
1359 *  The page cache can be allocated from non-movable area
1360 *  not to prevent page migration if you set gfp to zero.
1361 *  It returns NULL if the block was unreadable.
 
 
 
 
 
 
 
 
 
 
1362 */
1363struct buffer_head *
1364__bread_gfp(struct block_device *bdev, sector_t block,
1365		   unsigned size, gfp_t gfp)
1366{
1367	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
 
 
 
 
 
 
 
 
 
 
1368
1369	if (likely(bh) && !buffer_uptodate(bh))
1370		bh = __bread_slow(bh);
1371	return bh;
1372}
1373EXPORT_SYMBOL(__bread_gfp);
1374
1375static void __invalidate_bh_lrus(struct bh_lru *b)
1376{
1377	int i;
1378
1379	for (i = 0; i < BH_LRU_SIZE; i++) {
1380		brelse(b->bhs[i]);
1381		b->bhs[i] = NULL;
1382	}
1383}
1384/*
1385 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1386 * This doesn't race because it runs in each cpu either in irq
1387 * or with preempt disabled.
1388 */
1389static void invalidate_bh_lru(void *arg)
1390{
1391	struct bh_lru *b = &get_cpu_var(bh_lrus);
1392
1393	__invalidate_bh_lrus(b);
1394	put_cpu_var(bh_lrus);
1395}
1396
1397bool has_bh_in_lru(int cpu, void *dummy)
1398{
1399	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1400	int i;
1401	
1402	for (i = 0; i < BH_LRU_SIZE; i++) {
1403		if (b->bhs[i])
1404			return true;
1405	}
1406
1407	return false;
1408}
1409
1410void invalidate_bh_lrus(void)
1411{
1412	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1413}
1414EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1415
1416/*
1417 * It's called from workqueue context so we need a bh_lru_lock to close
1418 * the race with preemption/irq.
1419 */
1420void invalidate_bh_lrus_cpu(void)
1421{
1422	struct bh_lru *b;
1423
1424	bh_lru_lock();
1425	b = this_cpu_ptr(&bh_lrus);
1426	__invalidate_bh_lrus(b);
1427	bh_lru_unlock();
1428}
1429
1430void set_bh_page(struct buffer_head *bh,
1431		struct page *page, unsigned long offset)
1432{
1433	bh->b_page = page;
1434	BUG_ON(offset >= PAGE_SIZE);
1435	if (PageHighMem(page))
1436		/*
1437		 * This catches illegal uses and preserves the offset:
1438		 */
1439		bh->b_data = (char *)(0 + offset);
1440	else
1441		bh->b_data = page_address(page) + offset;
1442}
1443EXPORT_SYMBOL(set_bh_page);
1444
1445/*
1446 * Called when truncating a buffer on a page completely.
1447 */
1448
1449/* Bits that are cleared during an invalidate */
1450#define BUFFER_FLAGS_DISCARD \
1451	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1452	 1 << BH_Delay | 1 << BH_Unwritten)
1453
1454static void discard_buffer(struct buffer_head * bh)
1455{
1456	unsigned long b_state;
1457
1458	lock_buffer(bh);
1459	clear_buffer_dirty(bh);
1460	bh->b_bdev = NULL;
1461	b_state = READ_ONCE(bh->b_state);
1462	do {
1463	} while (!try_cmpxchg(&bh->b_state, &b_state,
1464			      b_state & ~BUFFER_FLAGS_DISCARD));
1465	unlock_buffer(bh);
1466}
1467
1468/**
1469 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1470 * @folio: The folio which is affected.
1471 * @offset: start of the range to invalidate
1472 * @length: length of the range to invalidate
1473 *
1474 * block_invalidate_folio() is called when all or part of the folio has been
1475 * invalidated by a truncate operation.
1476 *
1477 * block_invalidate_folio() does not have to release all buffers, but it must
1478 * ensure that no dirty buffer is left outside @offset and that no I/O
1479 * is underway against any of the blocks which are outside the truncation
1480 * point.  Because the caller is about to free (and possibly reuse) those
1481 * blocks on-disk.
1482 */
1483void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1484{
1485	struct buffer_head *head, *bh, *next;
1486	size_t curr_off = 0;
1487	size_t stop = length + offset;
1488
1489	BUG_ON(!folio_test_locked(folio));
1490
1491	/*
1492	 * Check for overflow
1493	 */
1494	BUG_ON(stop > folio_size(folio) || stop < length);
1495
1496	head = folio_buffers(folio);
1497	if (!head)
1498		return;
1499
1500	bh = head;
1501	do {
1502		size_t next_off = curr_off + bh->b_size;
1503		next = bh->b_this_page;
1504
1505		/*
1506		 * Are we still fully in range ?
1507		 */
1508		if (next_off > stop)
1509			goto out;
1510
1511		/*
1512		 * is this block fully invalidated?
1513		 */
1514		if (offset <= curr_off)
1515			discard_buffer(bh);
1516		curr_off = next_off;
1517		bh = next;
1518	} while (bh != head);
1519
1520	/*
1521	 * We release buffers only if the entire folio is being invalidated.
1522	 * The get_block cached value has been unconditionally invalidated,
1523	 * so real IO is not possible anymore.
1524	 */
1525	if (length == folio_size(folio))
1526		filemap_release_folio(folio, 0);
1527out:
 
1528	return;
1529}
1530EXPORT_SYMBOL(block_invalidate_folio);
1531
1532
1533/*
1534 * We attach and possibly dirty the buffers atomically wrt
1535 * block_dirty_folio() via private_lock.  try_to_free_buffers
1536 * is already excluded via the page lock.
1537 */
1538void create_empty_buffers(struct page *page,
1539			unsigned long blocksize, unsigned long b_state)
1540{
1541	struct buffer_head *bh, *head, *tail;
 
1542
1543	head = alloc_page_buffers(page, blocksize, true);
1544	bh = head;
1545	do {
1546		bh->b_state |= b_state;
1547		tail = bh;
1548		bh = bh->b_this_page;
1549	} while (bh);
1550	tail->b_this_page = head;
1551
1552	spin_lock(&page->mapping->private_lock);
1553	if (PageUptodate(page) || PageDirty(page)) {
1554		bh = head;
1555		do {
1556			if (PageDirty(page))
1557				set_buffer_dirty(bh);
1558			if (PageUptodate(page))
1559				set_buffer_uptodate(bh);
1560			bh = bh->b_this_page;
1561		} while (bh != head);
1562	}
1563	attach_page_private(page, head);
1564	spin_unlock(&page->mapping->private_lock);
 
 
1565}
1566EXPORT_SYMBOL(create_empty_buffers);
1567
1568/**
1569 * clean_bdev_aliases: clean a range of buffers in block device
1570 * @bdev: Block device to clean buffers in
1571 * @block: Start of a range of blocks to clean
1572 * @len: Number of blocks to clean
1573 *
1574 * We are taking a range of blocks for data and we don't want writeback of any
1575 * buffer-cache aliases starting from return from this function and until the
1576 * moment when something will explicitly mark the buffer dirty (hopefully that
1577 * will not happen until we will free that block ;-) We don't even need to mark
1578 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1579 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1580 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1581 * would confuse anyone who might pick it with bread() afterwards...
1582 *
1583 * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1584 * writeout I/O going on against recently-freed buffers.  We don't wait on that
1585 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1586 * need to.  That happens here.
1587 */
1588void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1589{
1590	struct inode *bd_inode = bdev->bd_inode;
1591	struct address_space *bd_mapping = bd_inode->i_mapping;
1592	struct folio_batch fbatch;
1593	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
1594	pgoff_t end;
1595	int i, count;
1596	struct buffer_head *bh;
1597	struct buffer_head *head;
1598
1599	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
1600	folio_batch_init(&fbatch);
1601	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1602		count = folio_batch_count(&fbatch);
1603		for (i = 0; i < count; i++) {
1604			struct folio *folio = fbatch.folios[i];
1605
1606			if (!folio_buffers(folio))
1607				continue;
1608			/*
1609			 * We use folio lock instead of bd_mapping->private_lock
1610			 * to pin buffers here since we can afford to sleep and
1611			 * it scales better than a global spinlock lock.
1612			 */
1613			folio_lock(folio);
1614			/* Recheck when the folio is locked which pins bhs */
1615			head = folio_buffers(folio);
1616			if (!head)
1617				goto unlock_page;
1618			bh = head;
1619			do {
1620				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1621					goto next;
1622				if (bh->b_blocknr >= block + len)
1623					break;
1624				clear_buffer_dirty(bh);
1625				wait_on_buffer(bh);
1626				clear_buffer_req(bh);
1627next:
1628				bh = bh->b_this_page;
1629			} while (bh != head);
1630unlock_page:
1631			folio_unlock(folio);
1632		}
1633		folio_batch_release(&fbatch);
1634		cond_resched();
1635		/* End of range already reached? */
1636		if (index > end || !index)
1637			break;
1638	}
1639}
1640EXPORT_SYMBOL(clean_bdev_aliases);
1641
1642/*
1643 * Size is a power-of-two in the range 512..PAGE_SIZE,
1644 * and the case we care about most is PAGE_SIZE.
1645 *
1646 * So this *could* possibly be written with those
1647 * constraints in mind (relevant mostly if some
1648 * architecture has a slow bit-scan instruction)
1649 */
1650static inline int block_size_bits(unsigned int blocksize)
1651{
1652	return ilog2(blocksize);
1653}
1654
1655static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
1656{
1657	BUG_ON(!PageLocked(page));
1658
1659	if (!page_has_buffers(page))
1660		create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
1661				     b_state);
1662	return page_buffers(page);
 
1663}
1664
1665/*
1666 * NOTE! All mapped/uptodate combinations are valid:
1667 *
1668 *	Mapped	Uptodate	Meaning
1669 *
1670 *	No	No		"unknown" - must do get_block()
1671 *	No	Yes		"hole" - zero-filled
1672 *	Yes	No		"allocated" - allocated on disk, not read in
1673 *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1674 *
1675 * "Dirty" is valid only with the last case (mapped+uptodate).
1676 */
1677
1678/*
1679 * While block_write_full_page is writing back the dirty buffers under
1680 * the page lock, whoever dirtied the buffers may decide to clean them
1681 * again at any time.  We handle that by only looking at the buffer
1682 * state inside lock_buffer().
1683 *
1684 * If block_write_full_page() is called for regular writeback
1685 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1686 * locked buffer.   This only can happen if someone has written the buffer
1687 * directly, with submit_bh().  At the address_space level PageWriteback
1688 * prevents this contention from occurring.
1689 *
1690 * If block_write_full_page() is called with wbc->sync_mode ==
1691 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1692 * causes the writes to be flagged as synchronous writes.
1693 */
1694int __block_write_full_page(struct inode *inode, struct page *page,
1695			get_block_t *get_block, struct writeback_control *wbc,
1696			bh_end_io_t *handler)
1697{
1698	int err;
1699	sector_t block;
1700	sector_t last_block;
1701	struct buffer_head *bh, *head;
1702	unsigned int blocksize, bbits;
1703	int nr_underway = 0;
1704	blk_opf_t write_flags = wbc_to_write_flags(wbc);
1705
1706	head = create_page_buffers(page, inode,
1707					(1 << BH_Dirty)|(1 << BH_Uptodate));
1708
1709	/*
1710	 * Be very careful.  We have no exclusion from block_dirty_folio
1711	 * here, and the (potentially unmapped) buffers may become dirty at
1712	 * any time.  If a buffer becomes dirty here after we've inspected it
1713	 * then we just miss that fact, and the page stays dirty.
1714	 *
1715	 * Buffers outside i_size may be dirtied by block_dirty_folio;
1716	 * handle that here by just cleaning them.
1717	 */
1718
1719	bh = head;
1720	blocksize = bh->b_size;
1721	bbits = block_size_bits(blocksize);
1722
1723	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1724	last_block = (i_size_read(inode) - 1) >> bbits;
1725
1726	/*
1727	 * Get all the dirty buffers mapped to disk addresses and
1728	 * handle any aliases from the underlying blockdev's mapping.
1729	 */
1730	do {
1731		if (block > last_block) {
1732			/*
1733			 * mapped buffers outside i_size will occur, because
1734			 * this page can be outside i_size when there is a
1735			 * truncate in progress.
1736			 */
1737			/*
1738			 * The buffer was zeroed by block_write_full_page()
1739			 */
1740			clear_buffer_dirty(bh);
1741			set_buffer_uptodate(bh);
1742		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1743			   buffer_dirty(bh)) {
1744			WARN_ON(bh->b_size != blocksize);
1745			err = get_block(inode, block, bh, 1);
1746			if (err)
1747				goto recover;
1748			clear_buffer_delay(bh);
1749			if (buffer_new(bh)) {
1750				/* blockdev mappings never come here */
1751				clear_buffer_new(bh);
1752				clean_bdev_bh_alias(bh);
1753			}
1754		}
1755		bh = bh->b_this_page;
1756		block++;
1757	} while (bh != head);
1758
1759	do {
1760		if (!buffer_mapped(bh))
1761			continue;
1762		/*
1763		 * If it's a fully non-blocking write attempt and we cannot
1764		 * lock the buffer then redirty the page.  Note that this can
1765		 * potentially cause a busy-wait loop from writeback threads
1766		 * and kswapd activity, but those code paths have their own
1767		 * higher-level throttling.
1768		 */
1769		if (wbc->sync_mode != WB_SYNC_NONE) {
1770			lock_buffer(bh);
1771		} else if (!trylock_buffer(bh)) {
1772			redirty_page_for_writepage(wbc, page);
1773			continue;
1774		}
1775		if (test_clear_buffer_dirty(bh)) {
1776			mark_buffer_async_write_endio(bh, handler);
 
1777		} else {
1778			unlock_buffer(bh);
1779		}
1780	} while ((bh = bh->b_this_page) != head);
1781
1782	/*
1783	 * The page and its buffers are protected by PageWriteback(), so we can
1784	 * drop the bh refcounts early.
1785	 */
1786	BUG_ON(PageWriteback(page));
1787	set_page_writeback(page);
1788
1789	do {
1790		struct buffer_head *next = bh->b_this_page;
1791		if (buffer_async_write(bh)) {
1792			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
 
1793			nr_underway++;
1794		}
1795		bh = next;
1796	} while (bh != head);
1797	unlock_page(page);
1798
1799	err = 0;
1800done:
1801	if (nr_underway == 0) {
1802		/*
1803		 * The page was marked dirty, but the buffers were
1804		 * clean.  Someone wrote them back by hand with
1805		 * write_dirty_buffer/submit_bh.  A rare case.
1806		 */
1807		end_page_writeback(page);
1808
1809		/*
1810		 * The page and buffer_heads can be released at any time from
1811		 * here on.
1812		 */
1813	}
1814	return err;
1815
1816recover:
1817	/*
1818	 * ENOSPC, or some other error.  We may already have added some
1819	 * blocks to the file, so we need to write these out to avoid
1820	 * exposing stale data.
1821	 * The page is currently locked and not marked for writeback
1822	 */
1823	bh = head;
1824	/* Recovery: lock and submit the mapped buffers */
1825	do {
1826		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1827		    !buffer_delay(bh)) {
1828			lock_buffer(bh);
1829			mark_buffer_async_write_endio(bh, handler);
 
1830		} else {
1831			/*
1832			 * The buffer may have been set dirty during
1833			 * attachment to a dirty page.
1834			 */
1835			clear_buffer_dirty(bh);
1836		}
1837	} while ((bh = bh->b_this_page) != head);
1838	SetPageError(page);
1839	BUG_ON(PageWriteback(page));
1840	mapping_set_error(page->mapping, err);
1841	set_page_writeback(page);
1842	do {
1843		struct buffer_head *next = bh->b_this_page;
1844		if (buffer_async_write(bh)) {
1845			clear_buffer_dirty(bh);
1846			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
 
1847			nr_underway++;
1848		}
1849		bh = next;
1850	} while (bh != head);
1851	unlock_page(page);
1852	goto done;
1853}
1854EXPORT_SYMBOL(__block_write_full_page);
1855
1856/*
1857 * If a page has any new buffers, zero them out here, and mark them uptodate
1858 * and dirty so they'll be written out (in order to prevent uninitialised
1859 * block data from leaking). And clear the new bit.
1860 */
1861void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1862{
1863	unsigned int block_start, block_end;
1864	struct buffer_head *head, *bh;
1865
1866	BUG_ON(!PageLocked(page));
1867	if (!page_has_buffers(page))
 
1868		return;
1869
1870	bh = head = page_buffers(page);
1871	block_start = 0;
1872	do {
1873		block_end = block_start + bh->b_size;
1874
1875		if (buffer_new(bh)) {
1876			if (block_end > from && block_start < to) {
1877				if (!PageUptodate(page)) {
1878					unsigned start, size;
1879
1880					start = max(from, block_start);
1881					size = min(to, block_end) - start;
1882
1883					zero_user(page, start, size);
1884					set_buffer_uptodate(bh);
1885				}
1886
1887				clear_buffer_new(bh);
1888				mark_buffer_dirty(bh);
1889			}
1890		}
1891
1892		block_start = block_end;
1893		bh = bh->b_this_page;
1894	} while (bh != head);
1895}
1896EXPORT_SYMBOL(page_zero_new_buffers);
1897
1898static void
1899iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
1900		const struct iomap *iomap)
1901{
1902	loff_t offset = block << inode->i_blkbits;
1903
1904	bh->b_bdev = iomap->bdev;
1905
1906	/*
1907	 * Block points to offset in file we need to map, iomap contains
1908	 * the offset at which the map starts. If the map ends before the
1909	 * current block, then do not map the buffer and let the caller
1910	 * handle it.
1911	 */
1912	BUG_ON(offset >= iomap->offset + iomap->length);
 
1913
1914	switch (iomap->type) {
1915	case IOMAP_HOLE:
1916		/*
1917		 * If the buffer is not up to date or beyond the current EOF,
1918		 * we need to mark it as new to ensure sub-block zeroing is
1919		 * executed if necessary.
1920		 */
1921		if (!buffer_uptodate(bh) ||
1922		    (offset >= i_size_read(inode)))
1923			set_buffer_new(bh);
1924		break;
1925	case IOMAP_DELALLOC:
1926		if (!buffer_uptodate(bh) ||
1927		    (offset >= i_size_read(inode)))
1928			set_buffer_new(bh);
1929		set_buffer_uptodate(bh);
1930		set_buffer_mapped(bh);
1931		set_buffer_delay(bh);
1932		break;
1933	case IOMAP_UNWRITTEN:
1934		/*
1935		 * For unwritten regions, we always need to ensure that regions
1936		 * in the block we are not writing to are zeroed. Mark the
1937		 * buffer as new to ensure this.
1938		 */
1939		set_buffer_new(bh);
1940		set_buffer_unwritten(bh);
1941		fallthrough;
1942	case IOMAP_MAPPED:
1943		if ((iomap->flags & IOMAP_F_NEW) ||
1944		    offset >= i_size_read(inode))
 
 
 
 
 
 
 
 
1945			set_buffer_new(bh);
 
1946		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
1947				inode->i_blkbits;
1948		set_buffer_mapped(bh);
1949		break;
 
 
 
1950	}
1951}
1952
1953int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
1954		get_block_t *get_block, const struct iomap *iomap)
1955{
1956	unsigned from = pos & (PAGE_SIZE - 1);
1957	unsigned to = from + len;
1958	struct inode *inode = folio->mapping->host;
1959	unsigned block_start, block_end;
1960	sector_t block;
1961	int err = 0;
1962	unsigned blocksize, bbits;
1963	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1964
1965	BUG_ON(!folio_test_locked(folio));
1966	BUG_ON(from > PAGE_SIZE);
1967	BUG_ON(to > PAGE_SIZE);
1968	BUG_ON(from > to);
1969
1970	head = create_page_buffers(&folio->page, inode, 0);
1971	blocksize = head->b_size;
1972	bbits = block_size_bits(blocksize);
1973
1974	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1975
1976	for(bh = head, block_start = 0; bh != head || !block_start;
1977	    block++, block_start=block_end, bh = bh->b_this_page) {
1978		block_end = block_start + blocksize;
1979		if (block_end <= from || block_start >= to) {
1980			if (folio_test_uptodate(folio)) {
1981				if (!buffer_uptodate(bh))
1982					set_buffer_uptodate(bh);
1983			}
1984			continue;
1985		}
1986		if (buffer_new(bh))
1987			clear_buffer_new(bh);
1988		if (!buffer_mapped(bh)) {
1989			WARN_ON(bh->b_size != blocksize);
1990			if (get_block) {
1991				err = get_block(inode, block, bh, 1);
1992				if (err)
1993					break;
1994			} else {
1995				iomap_to_bh(inode, block, bh, iomap);
1996			}
1997
1998			if (buffer_new(bh)) {
1999				clean_bdev_bh_alias(bh);
2000				if (folio_test_uptodate(folio)) {
2001					clear_buffer_new(bh);
2002					set_buffer_uptodate(bh);
2003					mark_buffer_dirty(bh);
2004					continue;
2005				}
2006				if (block_end > to || block_start < from)
2007					folio_zero_segments(folio,
2008						to, block_end,
2009						block_start, from);
2010				continue;
2011			}
2012		}
2013		if (folio_test_uptodate(folio)) {
2014			if (!buffer_uptodate(bh))
2015				set_buffer_uptodate(bh);
2016			continue; 
2017		}
2018		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2019		    !buffer_unwritten(bh) &&
2020		     (block_start < from || block_end > to)) {
2021			bh_read_nowait(bh, 0);
2022			*wait_bh++=bh;
2023		}
2024	}
2025	/*
2026	 * If we issued read requests - let them complete.
2027	 */
2028	while(wait_bh > wait) {
2029		wait_on_buffer(*--wait_bh);
2030		if (!buffer_uptodate(*wait_bh))
2031			err = -EIO;
2032	}
2033	if (unlikely(err))
2034		page_zero_new_buffers(&folio->page, from, to);
2035	return err;
2036}
2037
2038int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2039		get_block_t *get_block)
2040{
2041	return __block_write_begin_int(page_folio(page), pos, len, get_block,
2042				       NULL);
2043}
2044EXPORT_SYMBOL(__block_write_begin);
2045
2046static int __block_commit_write(struct inode *inode, struct page *page,
2047		unsigned from, unsigned to)
2048{
2049	unsigned block_start, block_end;
2050	int partial = 0;
2051	unsigned blocksize;
2052	struct buffer_head *bh, *head;
2053
2054	bh = head = page_buffers(page);
 
 
2055	blocksize = bh->b_size;
2056
2057	block_start = 0;
2058	do {
2059		block_end = block_start + blocksize;
2060		if (block_end <= from || block_start >= to) {
2061			if (!buffer_uptodate(bh))
2062				partial = 1;
2063		} else {
2064			set_buffer_uptodate(bh);
2065			mark_buffer_dirty(bh);
2066		}
2067		if (buffer_new(bh))
2068			clear_buffer_new(bh);
2069
2070		block_start = block_end;
2071		bh = bh->b_this_page;
2072	} while (bh != head);
2073
2074	/*
2075	 * If this is a partial write which happened to make all buffers
2076	 * uptodate then we can optimize away a bogus read_folio() for
2077	 * the next read(). Here we 'discover' whether the page went
2078	 * uptodate as a result of this (potentially partial) write.
2079	 */
2080	if (!partial)
2081		SetPageUptodate(page);
2082	return 0;
2083}
2084
2085/*
2086 * block_write_begin takes care of the basic task of block allocation and
2087 * bringing partial write blocks uptodate first.
2088 *
2089 * The filesystem needs to handle block truncation upon failure.
2090 */
2091int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2092		struct page **pagep, get_block_t *get_block)
2093{
2094	pgoff_t index = pos >> PAGE_SHIFT;
2095	struct page *page;
2096	int status;
2097
2098	page = grab_cache_page_write_begin(mapping, index);
2099	if (!page)
2100		return -ENOMEM;
 
2101
2102	status = __block_write_begin(page, pos, len, get_block);
2103	if (unlikely(status)) {
2104		unlock_page(page);
2105		put_page(page);
2106		page = NULL;
2107	}
2108
2109	*pagep = page;
2110	return status;
2111}
2112EXPORT_SYMBOL(block_write_begin);
2113
2114int block_write_end(struct file *file, struct address_space *mapping,
2115			loff_t pos, unsigned len, unsigned copied,
2116			struct page *page, void *fsdata)
2117{
2118	struct inode *inode = mapping->host;
2119	unsigned start;
2120
2121	start = pos & (PAGE_SIZE - 1);
2122
2123	if (unlikely(copied < len)) {
2124		/*
2125		 * The buffers that were written will now be uptodate, so
2126		 * we don't have to worry about a read_folio reading them
2127		 * and overwriting a partial write. However if we have
2128		 * encountered a short write and only partially written
2129		 * into a buffer, it will not be marked uptodate, so a
2130		 * read_folio might come in and destroy our partial write.
2131		 *
2132		 * Do the simplest thing, and just treat any short write to a
2133		 * non uptodate page as a zero-length write, and force the
2134		 * caller to redo the whole thing.
2135		 */
2136		if (!PageUptodate(page))
2137			copied = 0;
2138
2139		page_zero_new_buffers(page, start+copied, start+len);
2140	}
2141	flush_dcache_page(page);
2142
2143	/* This could be a short (even 0-length) commit */
2144	__block_commit_write(inode, page, start, start+copied);
2145
2146	return copied;
2147}
2148EXPORT_SYMBOL(block_write_end);
2149
2150int generic_write_end(struct file *file, struct address_space *mapping,
2151			loff_t pos, unsigned len, unsigned copied,
2152			struct page *page, void *fsdata)
2153{
2154	struct inode *inode = mapping->host;
2155	loff_t old_size = inode->i_size;
2156	bool i_size_changed = false;
2157
2158	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2159
2160	/*
2161	 * No need to use i_size_read() here, the i_size cannot change under us
2162	 * because we hold i_rwsem.
2163	 *
2164	 * But it's important to update i_size while still holding page lock:
2165	 * page writeout could otherwise come in and zero beyond i_size.
2166	 */
2167	if (pos + copied > inode->i_size) {
2168		i_size_write(inode, pos + copied);
2169		i_size_changed = true;
2170	}
2171
2172	unlock_page(page);
2173	put_page(page);
2174
2175	if (old_size < pos)
2176		pagecache_isize_extended(inode, old_size, pos);
2177	/*
2178	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2179	 * makes the holding time of page lock longer. Second, it forces lock
2180	 * ordering of page lock and transaction start for journaling
2181	 * filesystems.
2182	 */
2183	if (i_size_changed)
2184		mark_inode_dirty(inode);
2185	return copied;
2186}
2187EXPORT_SYMBOL(generic_write_end);
2188
2189/*
2190 * block_is_partially_uptodate checks whether buffers within a folio are
2191 * uptodate or not.
2192 *
2193 * Returns true if all buffers which correspond to the specified part
2194 * of the folio are uptodate.
2195 */
2196bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2197{
2198	unsigned block_start, block_end, blocksize;
2199	unsigned to;
2200	struct buffer_head *bh, *head;
2201	bool ret = true;
2202
2203	head = folio_buffers(folio);
2204	if (!head)
2205		return false;
2206	blocksize = head->b_size;
2207	to = min_t(unsigned, folio_size(folio) - from, count);
2208	to = from + to;
2209	if (from < blocksize && to > folio_size(folio) - blocksize)
2210		return false;
2211
2212	bh = head;
2213	block_start = 0;
2214	do {
2215		block_end = block_start + blocksize;
2216		if (block_end > from && block_start < to) {
2217			if (!buffer_uptodate(bh)) {
2218				ret = false;
2219				break;
2220			}
2221			if (block_end >= to)
2222				break;
2223		}
2224		block_start = block_end;
2225		bh = bh->b_this_page;
2226	} while (bh != head);
2227
2228	return ret;
2229}
2230EXPORT_SYMBOL(block_is_partially_uptodate);
2231
2232/*
2233 * Generic "read_folio" function for block devices that have the normal
2234 * get_block functionality. This is most of the block device filesystems.
2235 * Reads the folio asynchronously --- the unlock_buffer() and
2236 * set/clear_buffer_uptodate() functions propagate buffer state into the
2237 * folio once IO has completed.
2238 */
2239int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2240{
2241	struct inode *inode = folio->mapping->host;
2242	sector_t iblock, lblock;
2243	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2244	unsigned int blocksize, bbits;
2245	int nr, i;
2246	int fully_mapped = 1;
2247	bool page_error = false;
 
 
 
 
 
2248
2249	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2250
2251	head = create_page_buffers(&folio->page, inode, 0);
2252	blocksize = head->b_size;
2253	bbits = block_size_bits(blocksize);
2254
2255	iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2256	lblock = (i_size_read(inode)+blocksize-1) >> bbits;
2257	bh = head;
2258	nr = 0;
2259	i = 0;
2260
2261	do {
2262		if (buffer_uptodate(bh))
2263			continue;
2264
2265		if (!buffer_mapped(bh)) {
2266			int err = 0;
2267
2268			fully_mapped = 0;
2269			if (iblock < lblock) {
2270				WARN_ON(bh->b_size != blocksize);
2271				err = get_block(inode, iblock, bh, 0);
2272				if (err) {
2273					folio_set_error(folio);
2274					page_error = true;
2275				}
2276			}
2277			if (!buffer_mapped(bh)) {
2278				folio_zero_range(folio, i * blocksize,
2279						blocksize);
2280				if (!err)
2281					set_buffer_uptodate(bh);
2282				continue;
2283			}
2284			/*
2285			 * get_block() might have updated the buffer
2286			 * synchronously
2287			 */
2288			if (buffer_uptodate(bh))
2289				continue;
2290		}
2291		arr[nr++] = bh;
2292	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2293
2294	if (fully_mapped)
2295		folio_set_mappedtodisk(folio);
2296
2297	if (!nr) {
2298		/*
2299		 * All buffers are uptodate - we can set the folio uptodate
2300		 * as well. But not if get_block() returned an error.
2301		 */
2302		if (!page_error)
2303			folio_mark_uptodate(folio);
2304		folio_unlock(folio);
2305		return 0;
2306	}
2307
2308	/* Stage two: lock the buffers */
2309	for (i = 0; i < nr; i++) {
2310		bh = arr[i];
2311		lock_buffer(bh);
2312		mark_buffer_async_read(bh);
2313	}
2314
2315	/*
2316	 * Stage 3: start the IO.  Check for uptodateness
2317	 * inside the buffer lock in case another process reading
2318	 * the underlying blockdev brought it uptodate (the sct fix).
2319	 */
2320	for (i = 0; i < nr; i++) {
2321		bh = arr[i];
2322		if (buffer_uptodate(bh))
2323			end_buffer_async_read(bh, 1);
2324		else
2325			submit_bh(REQ_OP_READ, bh);
2326	}
2327	return 0;
2328}
2329EXPORT_SYMBOL(block_read_full_folio);
2330
2331/* utility function for filesystems that need to do work on expanding
2332 * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2333 * deal with the hole.  
2334 */
2335int generic_cont_expand_simple(struct inode *inode, loff_t size)
2336{
2337	struct address_space *mapping = inode->i_mapping;
2338	const struct address_space_operations *aops = mapping->a_ops;
2339	struct page *page;
2340	void *fsdata = NULL;
2341	int err;
2342
2343	err = inode_newsize_ok(inode, size);
2344	if (err)
2345		goto out;
2346
2347	err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
2348	if (err)
2349		goto out;
2350
2351	err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
2352	BUG_ON(err > 0);
2353
2354out:
2355	return err;
2356}
2357EXPORT_SYMBOL(generic_cont_expand_simple);
2358
2359static int cont_expand_zero(struct file *file, struct address_space *mapping,
2360			    loff_t pos, loff_t *bytes)
2361{
2362	struct inode *inode = mapping->host;
2363	const struct address_space_operations *aops = mapping->a_ops;
2364	unsigned int blocksize = i_blocksize(inode);
2365	struct page *page;
2366	void *fsdata = NULL;
2367	pgoff_t index, curidx;
2368	loff_t curpos;
2369	unsigned zerofrom, offset, len;
2370	int err = 0;
2371
2372	index = pos >> PAGE_SHIFT;
2373	offset = pos & ~PAGE_MASK;
2374
2375	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2376		zerofrom = curpos & ~PAGE_MASK;
2377		if (zerofrom & (blocksize-1)) {
2378			*bytes |= (blocksize-1);
2379			(*bytes)++;
2380		}
2381		len = PAGE_SIZE - zerofrom;
2382
2383		err = aops->write_begin(file, mapping, curpos, len,
2384					    &page, &fsdata);
2385		if (err)
2386			goto out;
2387		zero_user(page, zerofrom, len);
2388		err = aops->write_end(file, mapping, curpos, len, len,
2389						page, fsdata);
2390		if (err < 0)
2391			goto out;
2392		BUG_ON(err != len);
2393		err = 0;
2394
2395		balance_dirty_pages_ratelimited(mapping);
2396
2397		if (fatal_signal_pending(current)) {
2398			err = -EINTR;
2399			goto out;
2400		}
2401	}
2402
2403	/* page covers the boundary, find the boundary offset */
2404	if (index == curidx) {
2405		zerofrom = curpos & ~PAGE_MASK;
2406		/* if we will expand the thing last block will be filled */
2407		if (offset <= zerofrom) {
2408			goto out;
2409		}
2410		if (zerofrom & (blocksize-1)) {
2411			*bytes |= (blocksize-1);
2412			(*bytes)++;
2413		}
2414		len = offset - zerofrom;
2415
2416		err = aops->write_begin(file, mapping, curpos, len,
2417					    &page, &fsdata);
2418		if (err)
2419			goto out;
2420		zero_user(page, zerofrom, len);
2421		err = aops->write_end(file, mapping, curpos, len, len,
2422						page, fsdata);
2423		if (err < 0)
2424			goto out;
2425		BUG_ON(err != len);
2426		err = 0;
2427	}
2428out:
2429	return err;
2430}
2431
2432/*
2433 * For moronic filesystems that do not allow holes in file.
2434 * We may have to extend the file.
2435 */
2436int cont_write_begin(struct file *file, struct address_space *mapping,
2437			loff_t pos, unsigned len,
2438			struct page **pagep, void **fsdata,
2439			get_block_t *get_block, loff_t *bytes)
2440{
2441	struct inode *inode = mapping->host;
2442	unsigned int blocksize = i_blocksize(inode);
2443	unsigned int zerofrom;
2444	int err;
2445
2446	err = cont_expand_zero(file, mapping, pos, bytes);
2447	if (err)
2448		return err;
2449
2450	zerofrom = *bytes & ~PAGE_MASK;
2451	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2452		*bytes |= (blocksize-1);
2453		(*bytes)++;
2454	}
2455
2456	return block_write_begin(mapping, pos, len, pagep, get_block);
2457}
2458EXPORT_SYMBOL(cont_write_begin);
2459
2460int block_commit_write(struct page *page, unsigned from, unsigned to)
2461{
2462	struct inode *inode = page->mapping->host;
2463	__block_commit_write(inode,page,from,to);
2464	return 0;
2465}
2466EXPORT_SYMBOL(block_commit_write);
2467
2468/*
2469 * block_page_mkwrite() is not allowed to change the file size as it gets
2470 * called from a page fault handler when a page is first dirtied. Hence we must
2471 * be careful to check for EOF conditions here. We set the page up correctly
2472 * for a written page which means we get ENOSPC checking when writing into
2473 * holes and correct delalloc and unwritten extent mapping on filesystems that
2474 * support these features.
2475 *
2476 * We are not allowed to take the i_mutex here so we have to play games to
2477 * protect against truncate races as the page could now be beyond EOF.  Because
2478 * truncate writes the inode size before removing pages, once we have the
2479 * page lock we can determine safely if the page is beyond EOF. If it is not
2480 * beyond EOF, then the page is guaranteed safe against truncation until we
2481 * unlock the page.
2482 *
2483 * Direct callers of this function should protect against filesystem freezing
2484 * using sb_start_pagefault() - sb_end_pagefault() functions.
2485 */
2486int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2487			 get_block_t get_block)
2488{
2489	struct page *page = vmf->page;
2490	struct inode *inode = file_inode(vma->vm_file);
2491	unsigned long end;
2492	loff_t size;
2493	int ret;
2494
2495	lock_page(page);
2496	size = i_size_read(inode);
2497	if ((page->mapping != inode->i_mapping) ||
2498	    (page_offset(page) > size)) {
2499		/* We overload EFAULT to mean page got truncated */
2500		ret = -EFAULT;
2501		goto out_unlock;
2502	}
2503
2504	/* page is wholly or partially inside EOF */
2505	if (((page->index + 1) << PAGE_SHIFT) > size)
2506		end = size & ~PAGE_MASK;
2507	else
2508		end = PAGE_SIZE;
 
 
 
2509
2510	ret = __block_write_begin(page, 0, end, get_block);
2511	if (!ret)
2512		ret = block_commit_write(page, 0, end);
2513
2514	if (unlikely(ret < 0))
2515		goto out_unlock;
2516	set_page_dirty(page);
2517	wait_for_stable_page(page);
2518	return 0;
2519out_unlock:
2520	unlock_page(page);
2521	return ret;
2522}
2523EXPORT_SYMBOL(block_page_mkwrite);
2524
2525int block_truncate_page(struct address_space *mapping,
2526			loff_t from, get_block_t *get_block)
2527{
2528	pgoff_t index = from >> PAGE_SHIFT;
2529	unsigned offset = from & (PAGE_SIZE-1);
2530	unsigned blocksize;
2531	sector_t iblock;
2532	unsigned length, pos;
2533	struct inode *inode = mapping->host;
2534	struct page *page;
2535	struct buffer_head *bh;
2536	int err;
2537
2538	blocksize = i_blocksize(inode);
2539	length = offset & (blocksize - 1);
2540
2541	/* Block boundary? Nothing to do */
2542	if (!length)
2543		return 0;
2544
2545	length = blocksize - length;
2546	iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2547	
2548	page = grab_cache_page(mapping, index);
2549	err = -ENOMEM;
2550	if (!page)
2551		goto out;
2552
2553	if (!page_has_buffers(page))
2554		create_empty_buffers(page, blocksize, 0);
 
 
 
 
 
2555
2556	/* Find the buffer that contains "offset" */
2557	bh = page_buffers(page);
2558	pos = blocksize;
2559	while (offset >= pos) {
2560		bh = bh->b_this_page;
2561		iblock++;
2562		pos += blocksize;
2563	}
2564
2565	err = 0;
2566	if (!buffer_mapped(bh)) {
2567		WARN_ON(bh->b_size != blocksize);
2568		err = get_block(inode, iblock, bh, 0);
2569		if (err)
2570			goto unlock;
2571		/* unmapped? It's a hole - nothing to do */
2572		if (!buffer_mapped(bh))
2573			goto unlock;
2574	}
2575
2576	/* Ok, it's mapped. Make sure it's up-to-date */
2577	if (PageUptodate(page))
2578		set_buffer_uptodate(bh);
2579
2580	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2581		err = bh_read(bh, 0);
2582		/* Uhhuh. Read error. Complain and punt. */
2583		if (err < 0)
2584			goto unlock;
2585	}
2586
2587	zero_user(page, offset, length);
2588	mark_buffer_dirty(bh);
2589	err = 0;
2590
2591unlock:
2592	unlock_page(page);
2593	put_page(page);
2594out:
2595	return err;
2596}
2597EXPORT_SYMBOL(block_truncate_page);
2598
2599/*
2600 * The generic ->writepage function for buffer-backed address_spaces
2601 */
2602int block_write_full_page(struct page *page, get_block_t *get_block,
2603			struct writeback_control *wbc)
2604{
2605	struct inode * const inode = page->mapping->host;
2606	loff_t i_size = i_size_read(inode);
2607	const pgoff_t end_index = i_size >> PAGE_SHIFT;
2608	unsigned offset;
2609
2610	/* Is the page fully inside i_size? */
2611	if (page->index < end_index)
2612		return __block_write_full_page(inode, page, get_block, wbc,
2613					       end_buffer_async_write);
2614
2615	/* Is the page fully outside i_size? (truncate in progress) */
2616	offset = i_size & (PAGE_SIZE-1);
2617	if (page->index >= end_index+1 || !offset) {
2618		unlock_page(page);
2619		return 0; /* don't care */
2620	}
2621
2622	/*
2623	 * The page straddles i_size.  It must be zeroed out on each and every
2624	 * writepage invocation because it may be mmapped.  "A file is mapped
2625	 * in multiples of the page size.  For a file that is not a multiple of
2626	 * the  page size, the remaining memory is zeroed when mapped, and
2627	 * writes to that region are not written out to the file."
2628	 */
2629	zero_user_segment(page, offset, PAGE_SIZE);
2630	return __block_write_full_page(inode, page, get_block, wbc,
2631							end_buffer_async_write);
2632}
2633EXPORT_SYMBOL(block_write_full_page);
2634
2635sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2636			    get_block_t *get_block)
2637{
2638	struct inode *inode = mapping->host;
2639	struct buffer_head tmp = {
2640		.b_size = i_blocksize(inode),
2641	};
2642
2643	get_block(inode, block, &tmp, 0);
2644	return tmp.b_blocknr;
2645}
2646EXPORT_SYMBOL(generic_block_bmap);
2647
2648static void end_bio_bh_io_sync(struct bio *bio)
2649{
2650	struct buffer_head *bh = bio->bi_private;
2651
2652	if (unlikely(bio_flagged(bio, BIO_QUIET)))
2653		set_bit(BH_Quiet, &bh->b_state);
2654
2655	bh->b_end_io(bh, !bio->bi_status);
2656	bio_put(bio);
2657}
2658
2659static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
 
2660			  struct writeback_control *wbc)
2661{
2662	const enum req_op op = opf & REQ_OP_MASK;
2663	struct bio *bio;
2664
2665	BUG_ON(!buffer_locked(bh));
2666	BUG_ON(!buffer_mapped(bh));
2667	BUG_ON(!bh->b_end_io);
2668	BUG_ON(buffer_delay(bh));
2669	BUG_ON(buffer_unwritten(bh));
2670
2671	/*
2672	 * Only clear out a write error when rewriting
2673	 */
2674	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2675		clear_buffer_write_io_error(bh);
2676
2677	if (buffer_meta(bh))
2678		opf |= REQ_META;
2679	if (buffer_prio(bh))
2680		opf |= REQ_PRIO;
2681
2682	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2683
2684	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2685
2686	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 
2687
2688	bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2689	BUG_ON(bio->bi_iter.bi_size != bh->b_size);
2690
2691	bio->bi_end_io = end_bio_bh_io_sync;
2692	bio->bi_private = bh;
2693
2694	/* Take care of bh's that straddle the end of the device */
2695	guard_bio_eod(bio);
2696
2697	if (wbc) {
2698		wbc_init_bio(wbc, bio);
2699		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2700	}
2701
2702	submit_bio(bio);
2703}
2704
2705void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2706{
2707	submit_bh_wbc(opf, bh, NULL);
2708}
2709EXPORT_SYMBOL(submit_bh);
2710
2711void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2712{
2713	lock_buffer(bh);
2714	if (!test_clear_buffer_dirty(bh)) {
2715		unlock_buffer(bh);
2716		return;
2717	}
2718	bh->b_end_io = end_buffer_write_sync;
2719	get_bh(bh);
2720	submit_bh(REQ_OP_WRITE | op_flags, bh);
2721}
2722EXPORT_SYMBOL(write_dirty_buffer);
2723
2724/*
2725 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2726 * and then start new I/O and then wait upon it.  The caller must have a ref on
2727 * the buffer_head.
2728 */
2729int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2730{
2731	WARN_ON(atomic_read(&bh->b_count) < 1);
2732	lock_buffer(bh);
2733	if (test_clear_buffer_dirty(bh)) {
2734		/*
2735		 * The bh should be mapped, but it might not be if the
2736		 * device was hot-removed. Not much we can do but fail the I/O.
2737		 */
2738		if (!buffer_mapped(bh)) {
2739			unlock_buffer(bh);
2740			return -EIO;
2741		}
2742
2743		get_bh(bh);
2744		bh->b_end_io = end_buffer_write_sync;
2745		submit_bh(REQ_OP_WRITE | op_flags, bh);
2746		wait_on_buffer(bh);
2747		if (!buffer_uptodate(bh))
2748			return -EIO;
2749	} else {
2750		unlock_buffer(bh);
2751	}
2752	return 0;
2753}
2754EXPORT_SYMBOL(__sync_dirty_buffer);
2755
2756int sync_dirty_buffer(struct buffer_head *bh)
2757{
2758	return __sync_dirty_buffer(bh, REQ_SYNC);
2759}
2760EXPORT_SYMBOL(sync_dirty_buffer);
2761
2762/*
2763 * try_to_free_buffers() checks if all the buffers on this particular folio
2764 * are unused, and releases them if so.
2765 *
2766 * Exclusion against try_to_free_buffers may be obtained by either
2767 * locking the folio or by holding its mapping's private_lock.
2768 *
2769 * If the folio is dirty but all the buffers are clean then we need to
2770 * be sure to mark the folio clean as well.  This is because the folio
2771 * may be against a block device, and a later reattachment of buffers
2772 * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2773 * filesystem data on the same device.
2774 *
2775 * The same applies to regular filesystem folios: if all the buffers are
2776 * clean then we set the folio clean and proceed.  To do that, we require
2777 * total exclusion from block_dirty_folio().  That is obtained with
2778 * private_lock.
2779 *
2780 * try_to_free_buffers() is non-blocking.
2781 */
2782static inline int buffer_busy(struct buffer_head *bh)
2783{
2784	return atomic_read(&bh->b_count) |
2785		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2786}
2787
2788static bool
2789drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2790{
2791	struct buffer_head *head = folio_buffers(folio);
2792	struct buffer_head *bh;
2793
2794	bh = head;
2795	do {
2796		if (buffer_busy(bh))
2797			goto failed;
2798		bh = bh->b_this_page;
2799	} while (bh != head);
2800
2801	do {
2802		struct buffer_head *next = bh->b_this_page;
2803
2804		if (bh->b_assoc_map)
2805			__remove_assoc_queue(bh);
2806		bh = next;
2807	} while (bh != head);
2808	*buffers_to_free = head;
2809	folio_detach_private(folio);
2810	return true;
2811failed:
2812	return false;
2813}
2814
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2815bool try_to_free_buffers(struct folio *folio)
2816{
2817	struct address_space * const mapping = folio->mapping;
2818	struct buffer_head *buffers_to_free = NULL;
2819	bool ret = 0;
2820
2821	BUG_ON(!folio_test_locked(folio));
2822	if (folio_test_writeback(folio))
2823		return false;
2824
2825	if (mapping == NULL) {		/* can this still happen? */
2826		ret = drop_buffers(folio, &buffers_to_free);
2827		goto out;
2828	}
2829
2830	spin_lock(&mapping->private_lock);
2831	ret = drop_buffers(folio, &buffers_to_free);
2832
2833	/*
2834	 * If the filesystem writes its buffers by hand (eg ext3)
2835	 * then we can have clean buffers against a dirty folio.  We
2836	 * clean the folio here; otherwise the VM will never notice
2837	 * that the filesystem did any IO at all.
2838	 *
2839	 * Also, during truncate, discard_buffer will have marked all
2840	 * the folio's buffers clean.  We discover that here and clean
2841	 * the folio also.
2842	 *
2843	 * private_lock must be held over this entire operation in order
2844	 * to synchronise against block_dirty_folio and prevent the
2845	 * dirty bit from being lost.
2846	 */
2847	if (ret)
2848		folio_cancel_dirty(folio);
2849	spin_unlock(&mapping->private_lock);
2850out:
2851	if (buffers_to_free) {
2852		struct buffer_head *bh = buffers_to_free;
2853
2854		do {
2855			struct buffer_head *next = bh->b_this_page;
2856			free_buffer_head(bh);
2857			bh = next;
2858		} while (bh != buffers_to_free);
2859	}
2860	return ret;
2861}
2862EXPORT_SYMBOL(try_to_free_buffers);
2863
2864/*
2865 * Buffer-head allocation
2866 */
2867static struct kmem_cache *bh_cachep __read_mostly;
2868
2869/*
2870 * Once the number of bh's in the machine exceeds this level, we start
2871 * stripping them in writeback.
2872 */
2873static unsigned long max_buffer_heads;
2874
2875int buffer_heads_over_limit;
2876
2877struct bh_accounting {
2878	int nr;			/* Number of live bh's */
2879	int ratelimit;		/* Limit cacheline bouncing */
2880};
2881
2882static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2883
2884static void recalc_bh_state(void)
2885{
2886	int i;
2887	int tot = 0;
2888
2889	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
2890		return;
2891	__this_cpu_write(bh_accounting.ratelimit, 0);
2892	for_each_online_cpu(i)
2893		tot += per_cpu(bh_accounting, i).nr;
2894	buffer_heads_over_limit = (tot > max_buffer_heads);
2895}
2896
2897struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2898{
2899	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
2900	if (ret) {
2901		INIT_LIST_HEAD(&ret->b_assoc_buffers);
2902		spin_lock_init(&ret->b_uptodate_lock);
2903		preempt_disable();
2904		__this_cpu_inc(bh_accounting.nr);
2905		recalc_bh_state();
2906		preempt_enable();
2907	}
2908	return ret;
2909}
2910EXPORT_SYMBOL(alloc_buffer_head);
2911
2912void free_buffer_head(struct buffer_head *bh)
2913{
2914	BUG_ON(!list_empty(&bh->b_assoc_buffers));
2915	kmem_cache_free(bh_cachep, bh);
2916	preempt_disable();
2917	__this_cpu_dec(bh_accounting.nr);
2918	recalc_bh_state();
2919	preempt_enable();
2920}
2921EXPORT_SYMBOL(free_buffer_head);
2922
2923static int buffer_exit_cpu_dead(unsigned int cpu)
2924{
2925	int i;
2926	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2927
2928	for (i = 0; i < BH_LRU_SIZE; i++) {
2929		brelse(b->bhs[i]);
2930		b->bhs[i] = NULL;
2931	}
2932	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
2933	per_cpu(bh_accounting, cpu).nr = 0;
2934	return 0;
2935}
2936
2937/**
2938 * bh_uptodate_or_lock - Test whether the buffer is uptodate
2939 * @bh: struct buffer_head
2940 *
2941 * Return true if the buffer is up-to-date and false,
2942 * with the buffer locked, if not.
2943 */
2944int bh_uptodate_or_lock(struct buffer_head *bh)
2945{
2946	if (!buffer_uptodate(bh)) {
2947		lock_buffer(bh);
2948		if (!buffer_uptodate(bh))
2949			return 0;
2950		unlock_buffer(bh);
2951	}
2952	return 1;
2953}
2954EXPORT_SYMBOL(bh_uptodate_or_lock);
2955
2956/**
2957 * __bh_read - Submit read for a locked buffer
2958 * @bh: struct buffer_head
2959 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
2960 * @wait: wait until reading finish
2961 *
2962 * Returns zero on success or don't wait, and -EIO on error.
2963 */
2964int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
2965{
2966	int ret = 0;
2967
2968	BUG_ON(!buffer_locked(bh));
2969
2970	get_bh(bh);
2971	bh->b_end_io = end_buffer_read_sync;
2972	submit_bh(REQ_OP_READ | op_flags, bh);
2973	if (wait) {
2974		wait_on_buffer(bh);
2975		if (!buffer_uptodate(bh))
2976			ret = -EIO;
2977	}
2978	return ret;
2979}
2980EXPORT_SYMBOL(__bh_read);
2981
2982/**
2983 * __bh_read_batch - Submit read for a batch of unlocked buffers
2984 * @nr: entry number of the buffer batch
2985 * @bhs: a batch of struct buffer_head
2986 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
2987 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
2988 *              buffer that cannot lock.
2989 *
2990 * Returns zero on success or don't wait, and -EIO on error.
2991 */
2992void __bh_read_batch(int nr, struct buffer_head *bhs[],
2993		     blk_opf_t op_flags, bool force_lock)
2994{
2995	int i;
2996
2997	for (i = 0; i < nr; i++) {
2998		struct buffer_head *bh = bhs[i];
2999
3000		if (buffer_uptodate(bh))
3001			continue;
3002
3003		if (force_lock)
3004			lock_buffer(bh);
3005		else
3006			if (!trylock_buffer(bh))
3007				continue;
3008
3009		if (buffer_uptodate(bh)) {
3010			unlock_buffer(bh);
3011			continue;
3012		}
3013
3014		bh->b_end_io = end_buffer_read_sync;
3015		get_bh(bh);
3016		submit_bh(REQ_OP_READ | op_flags, bh);
3017	}
3018}
3019EXPORT_SYMBOL(__bh_read_batch);
3020
3021void __init buffer_init(void)
3022{
3023	unsigned long nrpages;
3024	int ret;
3025
3026	bh_cachep = kmem_cache_create("buffer_head",
3027			sizeof(struct buffer_head), 0,
3028				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3029				SLAB_MEM_SPREAD),
3030				NULL);
3031
3032	/*
3033	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3034	 */
3035	nrpages = (nr_free_buffer_pages() * 10) / 100;
3036	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3037	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3038					NULL, buffer_exit_cpu_dead);
3039	WARN_ON(ret < 0);
3040}