Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	linux/mm/filemap.c
   4 *
   5 * Copyright (C) 1994-1999  Linus Torvalds
   6 */
   7
   8/*
   9 * This file handles the generic file mmap semantics used by
  10 * most "normal" filesystems (but you don't /have/ to use this:
  11 * the NFS filesystem used to do this differently, for example)
  12 */
  13#include <linux/export.h>
  14#include <linux/compiler.h>
  15#include <linux/dax.h>
  16#include <linux/fs.h>
  17#include <linux/sched/signal.h>
  18#include <linux/uaccess.h>
  19#include <linux/capability.h>
  20#include <linux/kernel_stat.h>
  21#include <linux/gfp.h>
  22#include <linux/mm.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
 
  25#include <linux/mman.h>
  26#include <linux/pagemap.h>
  27#include <linux/file.h>
  28#include <linux/uio.h>
  29#include <linux/error-injection.h>
  30#include <linux/hash.h>
  31#include <linux/writeback.h>
  32#include <linux/backing-dev.h>
  33#include <linux/pagevec.h>
  34#include <linux/security.h>
  35#include <linux/cpuset.h>
  36#include <linux/hugetlb.h>
  37#include <linux/memcontrol.h>
  38#include <linux/shmem_fs.h>
  39#include <linux/rmap.h>
  40#include <linux/delayacct.h>
  41#include <linux/psi.h>
  42#include <linux/ramfs.h>
  43#include <linux/page_idle.h>
  44#include <linux/migrate.h>
 
 
 
 
  45#include <asm/pgalloc.h>
  46#include <asm/tlbflush.h>
  47#include "internal.h"
  48
  49#define CREATE_TRACE_POINTS
  50#include <trace/events/filemap.h>
  51
  52/*
  53 * FIXME: remove all knowledge of the buffer layer from the core VM
  54 */
  55#include <linux/buffer_head.h> /* for try_to_free_buffers */
  56
  57#include <asm/mman.h>
  58
 
 
  59/*
  60 * Shared mappings implemented 30.11.1994. It's not fully working yet,
  61 * though.
  62 *
  63 * Shared mappings now work. 15.8.1995  Bruno.
  64 *
  65 * finished 'unifying' the page and buffer cache and SMP-threaded the
  66 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
  67 *
  68 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
  69 */
  70
  71/*
  72 * Lock ordering:
  73 *
  74 *  ->i_mmap_rwsem		(truncate_pagecache)
  75 *    ->private_lock		(__free_pte->block_dirty_folio)
  76 *      ->swap_lock		(exclusive_swap_page, others)
  77 *        ->i_pages lock
  78 *
  79 *  ->i_rwsem
  80 *    ->invalidate_lock		(acquired by fs in truncate path)
  81 *      ->i_mmap_rwsem		(truncate->unmap_mapping_range)
  82 *
  83 *  ->mmap_lock
  84 *    ->i_mmap_rwsem
  85 *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
  86 *        ->i_pages lock	(arch-dependent flush_dcache_mmap_lock)
  87 *
  88 *  ->mmap_lock
  89 *    ->invalidate_lock		(filemap_fault)
  90 *      ->lock_page		(filemap_fault, access_process_vm)
  91 *
  92 *  ->i_rwsem			(generic_perform_write)
  93 *    ->mmap_lock		(fault_in_readable->do_page_fault)
  94 *
  95 *  bdi->wb.list_lock
  96 *    sb_lock			(fs/fs-writeback.c)
  97 *    ->i_pages lock		(__sync_single_inode)
  98 *
  99 *  ->i_mmap_rwsem
 100 *    ->anon_vma.lock		(vma_adjust)
 101 *
 102 *  ->anon_vma.lock
 103 *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
 104 *
 105 *  ->page_table_lock or pte_lock
 106 *    ->swap_lock		(try_to_unmap_one)
 107 *    ->private_lock		(try_to_unmap_one)
 108 *    ->i_pages lock		(try_to_unmap_one)
 109 *    ->lruvec->lru_lock	(follow_page->mark_page_accessed)
 110 *    ->lruvec->lru_lock	(check_pte_range->isolate_lru_page)
 111 *    ->private_lock		(page_remove_rmap->set_page_dirty)
 112 *    ->i_pages lock		(page_remove_rmap->set_page_dirty)
 113 *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
 114 *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
 115 *    ->memcg->move_lock	(page_remove_rmap->lock_page_memcg)
 116 *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
 117 *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
 118 *    ->private_lock		(zap_pte_range->block_dirty_folio)
 119 *
 120 * ->i_mmap_rwsem
 121 *   ->tasklist_lock            (memory_failure, collect_procs_ao)
 122 */
 123
 124static void page_cache_delete(struct address_space *mapping,
 125				   struct folio *folio, void *shadow)
 126{
 127	XA_STATE(xas, &mapping->i_pages, folio->index);
 128	long nr = 1;
 129
 130	mapping_set_update(&xas, mapping);
 131
 132	/* hugetlb pages are represented by a single entry in the xarray */
 133	if (!folio_test_hugetlb(folio)) {
 134		xas_set_order(&xas, folio->index, folio_order(folio));
 135		nr = folio_nr_pages(folio);
 136	}
 137
 138	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 139
 140	xas_store(&xas, shadow);
 141	xas_init_marks(&xas);
 142
 143	folio->mapping = NULL;
 144	/* Leave page->index set: truncation lookup relies upon it */
 145	mapping->nrpages -= nr;
 146}
 147
 148static void filemap_unaccount_folio(struct address_space *mapping,
 149		struct folio *folio)
 150{
 151	long nr;
 152
 153	VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
 154	if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
 155		pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
 156			 current->comm, folio_pfn(folio));
 157		dump_page(&folio->page, "still mapped when deleted");
 158		dump_stack();
 159		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 160
 161		if (mapping_exiting(mapping) && !folio_test_large(folio)) {
 162			int mapcount = page_mapcount(&folio->page);
 163
 164			if (folio_ref_count(folio) >= mapcount + 2) {
 165				/*
 166				 * All vmas have already been torn down, so it's
 167				 * a good bet that actually the page is unmapped
 168				 * and we'd rather not leak it: if we're wrong,
 169				 * another bad page check should catch it later.
 170				 */
 171				page_mapcount_reset(&folio->page);
 172				folio_ref_sub(folio, mapcount);
 173			}
 174		}
 175	}
 176
 177	/* hugetlb folios do not participate in page cache accounting. */
 178	if (folio_test_hugetlb(folio))
 179		return;
 180
 181	nr = folio_nr_pages(folio);
 182
 183	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
 184	if (folio_test_swapbacked(folio)) {
 185		__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
 186		if (folio_test_pmd_mappable(folio))
 187			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
 188	} else if (folio_test_pmd_mappable(folio)) {
 189		__lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
 190		filemap_nr_thps_dec(mapping);
 191	}
 192
 193	/*
 194	 * At this point folio must be either written or cleaned by
 195	 * truncate.  Dirty folio here signals a bug and loss of
 196	 * unwritten data - on ordinary filesystems.
 197	 *
 198	 * But it's harmless on in-memory filesystems like tmpfs; and can
 199	 * occur when a driver which did get_user_pages() sets page dirty
 200	 * before putting it, while the inode is being finally evicted.
 201	 *
 202	 * Below fixes dirty accounting after removing the folio entirely
 203	 * but leaves the dirty flag set: it has no effect for truncated
 204	 * folio and anyway will be cleared before returning folio to
 205	 * buddy allocator.
 206	 */
 207	if (WARN_ON_ONCE(folio_test_dirty(folio) &&
 208			 mapping_can_writeback(mapping)))
 209		folio_account_cleaned(folio, inode_to_wb(mapping->host));
 210}
 211
 212/*
 213 * Delete a page from the page cache and free it. Caller has to make
 214 * sure the page is locked and that nobody else uses it - or that usage
 215 * is safe.  The caller must hold the i_pages lock.
 216 */
 217void __filemap_remove_folio(struct folio *folio, void *shadow)
 218{
 219	struct address_space *mapping = folio->mapping;
 220
 221	trace_mm_filemap_delete_from_page_cache(folio);
 222	filemap_unaccount_folio(mapping, folio);
 223	page_cache_delete(mapping, folio, shadow);
 224}
 225
 226void filemap_free_folio(struct address_space *mapping, struct folio *folio)
 227{
 228	void (*free_folio)(struct folio *);
 229	int refs = 1;
 230
 231	free_folio = mapping->a_ops->free_folio;
 232	if (free_folio)
 233		free_folio(folio);
 234
 235	if (folio_test_large(folio) && !folio_test_hugetlb(folio))
 236		refs = folio_nr_pages(folio);
 237	folio_put_refs(folio, refs);
 238}
 239
 240/**
 241 * filemap_remove_folio - Remove folio from page cache.
 242 * @folio: The folio.
 243 *
 244 * This must be called only on folios that are locked and have been
 245 * verified to be in the page cache.  It will never put the folio into
 246 * the free list because the caller has a reference on the page.
 247 */
 248void filemap_remove_folio(struct folio *folio)
 249{
 250	struct address_space *mapping = folio->mapping;
 251
 252	BUG_ON(!folio_test_locked(folio));
 253	spin_lock(&mapping->host->i_lock);
 254	xa_lock_irq(&mapping->i_pages);
 255	__filemap_remove_folio(folio, NULL);
 256	xa_unlock_irq(&mapping->i_pages);
 257	if (mapping_shrinkable(mapping))
 258		inode_add_lru(mapping->host);
 259	spin_unlock(&mapping->host->i_lock);
 260
 261	filemap_free_folio(mapping, folio);
 262}
 263
 264/*
 265 * page_cache_delete_batch - delete several folios from page cache
 266 * @mapping: the mapping to which folios belong
 267 * @fbatch: batch of folios to delete
 268 *
 269 * The function walks over mapping->i_pages and removes folios passed in
 270 * @fbatch from the mapping. The function expects @fbatch to be sorted
 271 * by page index and is optimised for it to be dense.
 272 * It tolerates holes in @fbatch (mapping entries at those indices are not
 273 * modified).
 274 *
 275 * The function expects the i_pages lock to be held.
 276 */
 277static void page_cache_delete_batch(struct address_space *mapping,
 278			     struct folio_batch *fbatch)
 279{
 280	XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
 281	long total_pages = 0;
 282	int i = 0;
 283	struct folio *folio;
 284
 285	mapping_set_update(&xas, mapping);
 286	xas_for_each(&xas, folio, ULONG_MAX) {
 287		if (i >= folio_batch_count(fbatch))
 288			break;
 289
 290		/* A swap/dax/shadow entry got inserted? Skip it. */
 291		if (xa_is_value(folio))
 292			continue;
 293		/*
 294		 * A page got inserted in our range? Skip it. We have our
 295		 * pages locked so they are protected from being removed.
 296		 * If we see a page whose index is higher than ours, it
 297		 * means our page has been removed, which shouldn't be
 298		 * possible because we're holding the PageLock.
 299		 */
 300		if (folio != fbatch->folios[i]) {
 301			VM_BUG_ON_FOLIO(folio->index >
 302					fbatch->folios[i]->index, folio);
 303			continue;
 304		}
 305
 306		WARN_ON_ONCE(!folio_test_locked(folio));
 307
 308		folio->mapping = NULL;
 309		/* Leave folio->index set: truncation lookup relies on it */
 310
 311		i++;
 312		xas_store(&xas, NULL);
 313		total_pages += folio_nr_pages(folio);
 314	}
 315	mapping->nrpages -= total_pages;
 316}
 317
 318void delete_from_page_cache_batch(struct address_space *mapping,
 319				  struct folio_batch *fbatch)
 320{
 321	int i;
 322
 323	if (!folio_batch_count(fbatch))
 324		return;
 325
 326	spin_lock(&mapping->host->i_lock);
 327	xa_lock_irq(&mapping->i_pages);
 328	for (i = 0; i < folio_batch_count(fbatch); i++) {
 329		struct folio *folio = fbatch->folios[i];
 330
 331		trace_mm_filemap_delete_from_page_cache(folio);
 332		filemap_unaccount_folio(mapping, folio);
 333	}
 334	page_cache_delete_batch(mapping, fbatch);
 335	xa_unlock_irq(&mapping->i_pages);
 336	if (mapping_shrinkable(mapping))
 337		inode_add_lru(mapping->host);
 338	spin_unlock(&mapping->host->i_lock);
 339
 340	for (i = 0; i < folio_batch_count(fbatch); i++)
 341		filemap_free_folio(mapping, fbatch->folios[i]);
 342}
 343
 344int filemap_check_errors(struct address_space *mapping)
 345{
 346	int ret = 0;
 347	/* Check for outstanding write errors */
 348	if (test_bit(AS_ENOSPC, &mapping->flags) &&
 349	    test_and_clear_bit(AS_ENOSPC, &mapping->flags))
 350		ret = -ENOSPC;
 351	if (test_bit(AS_EIO, &mapping->flags) &&
 352	    test_and_clear_bit(AS_EIO, &mapping->flags))
 353		ret = -EIO;
 354	return ret;
 355}
 356EXPORT_SYMBOL(filemap_check_errors);
 357
 358static int filemap_check_and_keep_errors(struct address_space *mapping)
 359{
 360	/* Check for outstanding write errors */
 361	if (test_bit(AS_EIO, &mapping->flags))
 362		return -EIO;
 363	if (test_bit(AS_ENOSPC, &mapping->flags))
 364		return -ENOSPC;
 365	return 0;
 366}
 367
 368/**
 369 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
 370 * @mapping:	address space structure to write
 371 * @wbc:	the writeback_control controlling the writeout
 372 *
 373 * Call writepages on the mapping using the provided wbc to control the
 374 * writeout.
 375 *
 376 * Return: %0 on success, negative error code otherwise.
 377 */
 378int filemap_fdatawrite_wbc(struct address_space *mapping,
 379			   struct writeback_control *wbc)
 380{
 381	int ret;
 382
 383	if (!mapping_can_writeback(mapping) ||
 384	    !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
 385		return 0;
 386
 387	wbc_attach_fdatawrite_inode(wbc, mapping->host);
 388	ret = do_writepages(mapping, wbc);
 389	wbc_detach_inode(wbc);
 390	return ret;
 391}
 392EXPORT_SYMBOL(filemap_fdatawrite_wbc);
 393
 394/**
 395 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
 396 * @mapping:	address space structure to write
 397 * @start:	offset in bytes where the range starts
 398 * @end:	offset in bytes where the range ends (inclusive)
 399 * @sync_mode:	enable synchronous operation
 400 *
 401 * Start writeback against all of a mapping's dirty pages that lie
 402 * within the byte offsets <start, end> inclusive.
 403 *
 404 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
 405 * opposed to a regular memory cleansing writeback.  The difference between
 406 * these two operations is that if a dirty page/buffer is encountered, it must
 407 * be waited upon, and not just skipped over.
 408 *
 409 * Return: %0 on success, negative error code otherwise.
 410 */
 411int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 412				loff_t end, int sync_mode)
 413{
 414	struct writeback_control wbc = {
 415		.sync_mode = sync_mode,
 416		.nr_to_write = LONG_MAX,
 417		.range_start = start,
 418		.range_end = end,
 419	};
 420
 421	return filemap_fdatawrite_wbc(mapping, &wbc);
 422}
 423
 424static inline int __filemap_fdatawrite(struct address_space *mapping,
 425	int sync_mode)
 426{
 427	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
 428}
 429
 430int filemap_fdatawrite(struct address_space *mapping)
 431{
 432	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
 433}
 434EXPORT_SYMBOL(filemap_fdatawrite);
 435
 436int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 437				loff_t end)
 438{
 439	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
 440}
 441EXPORT_SYMBOL(filemap_fdatawrite_range);
 442
 443/**
 444 * filemap_flush - mostly a non-blocking flush
 445 * @mapping:	target address_space
 446 *
 447 * This is a mostly non-blocking flush.  Not suitable for data-integrity
 448 * purposes - I/O may not be started against all dirty pages.
 449 *
 450 * Return: %0 on success, negative error code otherwise.
 451 */
 452int filemap_flush(struct address_space *mapping)
 453{
 454	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
 455}
 456EXPORT_SYMBOL(filemap_flush);
 457
 458/**
 459 * filemap_range_has_page - check if a page exists in range.
 460 * @mapping:           address space within which to check
 461 * @start_byte:        offset in bytes where the range starts
 462 * @end_byte:          offset in bytes where the range ends (inclusive)
 463 *
 464 * Find at least one page in the range supplied, usually used to check if
 465 * direct writing in this range will trigger a writeback.
 466 *
 467 * Return: %true if at least one page exists in the specified range,
 468 * %false otherwise.
 469 */
 470bool filemap_range_has_page(struct address_space *mapping,
 471			   loff_t start_byte, loff_t end_byte)
 472{
 473	struct page *page;
 474	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
 475	pgoff_t max = end_byte >> PAGE_SHIFT;
 476
 477	if (end_byte < start_byte)
 478		return false;
 479
 480	rcu_read_lock();
 481	for (;;) {
 482		page = xas_find(&xas, max);
 483		if (xas_retry(&xas, page))
 484			continue;
 485		/* Shadow entries don't count */
 486		if (xa_is_value(page))
 487			continue;
 488		/*
 489		 * We don't need to try to pin this page; we're about to
 490		 * release the RCU lock anyway.  It is enough to know that
 491		 * there was a page here recently.
 492		 */
 493		break;
 494	}
 495	rcu_read_unlock();
 496
 497	return page != NULL;
 498}
 499EXPORT_SYMBOL(filemap_range_has_page);
 500
 501static void __filemap_fdatawait_range(struct address_space *mapping,
 502				     loff_t start_byte, loff_t end_byte)
 503{
 504	pgoff_t index = start_byte >> PAGE_SHIFT;
 505	pgoff_t end = end_byte >> PAGE_SHIFT;
 506	struct pagevec pvec;
 507	int nr_pages;
 
 
 508
 509	pagevec_init(&pvec);
 510	while (index <= end) {
 511		unsigned i;
 512
 513		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
 514				end, PAGECACHE_TAG_WRITEBACK);
 515		if (!nr_pages)
 
 516			break;
 517
 518		for (i = 0; i < nr_pages; i++) {
 519			struct page *page = pvec.pages[i];
 520
 521			wait_on_page_writeback(page);
 522			ClearPageError(page);
 523		}
 524		pagevec_release(&pvec);
 525		cond_resched();
 526	}
 527}
 528
 529/**
 530 * filemap_fdatawait_range - wait for writeback to complete
 531 * @mapping:		address space structure to wait for
 532 * @start_byte:		offset in bytes where the range starts
 533 * @end_byte:		offset in bytes where the range ends (inclusive)
 534 *
 535 * Walk the list of under-writeback pages of the given address space
 536 * in the given range and wait for all of them.  Check error status of
 537 * the address space and return it.
 538 *
 539 * Since the error status of the address space is cleared by this function,
 540 * callers are responsible for checking the return value and handling and/or
 541 * reporting the error.
 542 *
 543 * Return: error status of the address space.
 544 */
 545int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
 546			    loff_t end_byte)
 547{
 548	__filemap_fdatawait_range(mapping, start_byte, end_byte);
 549	return filemap_check_errors(mapping);
 550}
 551EXPORT_SYMBOL(filemap_fdatawait_range);
 552
 553/**
 554 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
 555 * @mapping:		address space structure to wait for
 556 * @start_byte:		offset in bytes where the range starts
 557 * @end_byte:		offset in bytes where the range ends (inclusive)
 558 *
 559 * Walk the list of under-writeback pages of the given address space in the
 560 * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
 561 * this function does not clear error status of the address space.
 562 *
 563 * Use this function if callers don't handle errors themselves.  Expected
 564 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
 565 * fsfreeze(8)
 566 */
 567int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
 568		loff_t start_byte, loff_t end_byte)
 569{
 570	__filemap_fdatawait_range(mapping, start_byte, end_byte);
 571	return filemap_check_and_keep_errors(mapping);
 572}
 573EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
 574
 575/**
 576 * file_fdatawait_range - wait for writeback to complete
 577 * @file:		file pointing to address space structure to wait for
 578 * @start_byte:		offset in bytes where the range starts
 579 * @end_byte:		offset in bytes where the range ends (inclusive)
 580 *
 581 * Walk the list of under-writeback pages of the address space that file
 582 * refers to, in the given range and wait for all of them.  Check error
 583 * status of the address space vs. the file->f_wb_err cursor and return it.
 584 *
 585 * Since the error status of the file is advanced by this function,
 586 * callers are responsible for checking the return value and handling and/or
 587 * reporting the error.
 588 *
 589 * Return: error status of the address space vs. the file->f_wb_err cursor.
 590 */
 591int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
 592{
 593	struct address_space *mapping = file->f_mapping;
 594
 595	__filemap_fdatawait_range(mapping, start_byte, end_byte);
 596	return file_check_and_advance_wb_err(file);
 597}
 598EXPORT_SYMBOL(file_fdatawait_range);
 599
 600/**
 601 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
 602 * @mapping: address space structure to wait for
 603 *
 604 * Walk the list of under-writeback pages of the given address space
 605 * and wait for all of them.  Unlike filemap_fdatawait(), this function
 606 * does not clear error status of the address space.
 607 *
 608 * Use this function if callers don't handle errors themselves.  Expected
 609 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
 610 * fsfreeze(8)
 611 *
 612 * Return: error status of the address space.
 613 */
 614int filemap_fdatawait_keep_errors(struct address_space *mapping)
 615{
 616	__filemap_fdatawait_range(mapping, 0, LLONG_MAX);
 617	return filemap_check_and_keep_errors(mapping);
 618}
 619EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
 620
 621/* Returns true if writeback might be needed or already in progress. */
 622static bool mapping_needs_writeback(struct address_space *mapping)
 623{
 624	return mapping->nrpages;
 625}
 626
 627bool filemap_range_has_writeback(struct address_space *mapping,
 628				 loff_t start_byte, loff_t end_byte)
 629{
 630	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
 631	pgoff_t max = end_byte >> PAGE_SHIFT;
 632	struct folio *folio;
 633
 634	if (end_byte < start_byte)
 635		return false;
 636
 637	rcu_read_lock();
 638	xas_for_each(&xas, folio, max) {
 639		if (xas_retry(&xas, folio))
 640			continue;
 641		if (xa_is_value(folio))
 642			continue;
 643		if (folio_test_dirty(folio) || folio_test_locked(folio) ||
 644				folio_test_writeback(folio))
 645			break;
 646	}
 647	rcu_read_unlock();
 648	return folio != NULL;
 649}
 650EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
 651
 652/**
 653 * filemap_write_and_wait_range - write out & wait on a file range
 654 * @mapping:	the address_space for the pages
 655 * @lstart:	offset in bytes where the range starts
 656 * @lend:	offset in bytes where the range ends (inclusive)
 657 *
 658 * Write out and wait upon file offsets lstart->lend, inclusive.
 659 *
 660 * Note that @lend is inclusive (describes the last byte to be written) so
 661 * that this function can be used to write to the very end-of-file (end = -1).
 662 *
 663 * Return: error status of the address space.
 664 */
 665int filemap_write_and_wait_range(struct address_space *mapping,
 666				 loff_t lstart, loff_t lend)
 667{
 668	int err = 0, err2;
 669
 670	if (lend < lstart)
 671		return 0;
 672
 673	if (mapping_needs_writeback(mapping)) {
 674		err = __filemap_fdatawrite_range(mapping, lstart, lend,
 675						 WB_SYNC_ALL);
 676		/*
 677		 * Even if the above returned error, the pages may be
 678		 * written partially (e.g. -ENOSPC), so we wait for it.
 679		 * But the -EIO is special case, it may indicate the worst
 680		 * thing (e.g. bug) happened, so we avoid waiting for it.
 681		 */
 682		if (err != -EIO)
 683			__filemap_fdatawait_range(mapping, lstart, lend);
 684	}
 685	err2 = filemap_check_errors(mapping);
 686	if (!err)
 687		err = err2;
 688	return err;
 689}
 690EXPORT_SYMBOL(filemap_write_and_wait_range);
 691
 692void __filemap_set_wb_err(struct address_space *mapping, int err)
 693{
 694	errseq_t eseq = errseq_set(&mapping->wb_err, err);
 695
 696	trace_filemap_set_wb_err(mapping, eseq);
 697}
 698EXPORT_SYMBOL(__filemap_set_wb_err);
 699
 700/**
 701 * file_check_and_advance_wb_err - report wb error (if any) that was previously
 702 * 				   and advance wb_err to current one
 703 * @file: struct file on which the error is being reported
 704 *
 705 * When userland calls fsync (or something like nfsd does the equivalent), we
 706 * want to report any writeback errors that occurred since the last fsync (or
 707 * since the file was opened if there haven't been any).
 708 *
 709 * Grab the wb_err from the mapping. If it matches what we have in the file,
 710 * then just quickly return 0. The file is all caught up.
 711 *
 712 * If it doesn't match, then take the mapping value, set the "seen" flag in
 713 * it and try to swap it into place. If it works, or another task beat us
 714 * to it with the new value, then update the f_wb_err and return the error
 715 * portion. The error at this point must be reported via proper channels
 716 * (a'la fsync, or NFS COMMIT operation, etc.).
 717 *
 718 * While we handle mapping->wb_err with atomic operations, the f_wb_err
 719 * value is protected by the f_lock since we must ensure that it reflects
 720 * the latest value swapped in for this file descriptor.
 721 *
 722 * Return: %0 on success, negative error code otherwise.
 723 */
 724int file_check_and_advance_wb_err(struct file *file)
 725{
 726	int err = 0;
 727	errseq_t old = READ_ONCE(file->f_wb_err);
 728	struct address_space *mapping = file->f_mapping;
 729
 730	/* Locklessly handle the common case where nothing has changed */
 731	if (errseq_check(&mapping->wb_err, old)) {
 732		/* Something changed, must use slow path */
 733		spin_lock(&file->f_lock);
 734		old = file->f_wb_err;
 735		err = errseq_check_and_advance(&mapping->wb_err,
 736						&file->f_wb_err);
 737		trace_file_check_and_advance_wb_err(file, old);
 738		spin_unlock(&file->f_lock);
 739	}
 740
 741	/*
 742	 * We're mostly using this function as a drop in replacement for
 743	 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
 744	 * that the legacy code would have had on these flags.
 745	 */
 746	clear_bit(AS_EIO, &mapping->flags);
 747	clear_bit(AS_ENOSPC, &mapping->flags);
 748	return err;
 749}
 750EXPORT_SYMBOL(file_check_and_advance_wb_err);
 751
 752/**
 753 * file_write_and_wait_range - write out & wait on a file range
 754 * @file:	file pointing to address_space with pages
 755 * @lstart:	offset in bytes where the range starts
 756 * @lend:	offset in bytes where the range ends (inclusive)
 757 *
 758 * Write out and wait upon file offsets lstart->lend, inclusive.
 759 *
 760 * Note that @lend is inclusive (describes the last byte to be written) so
 761 * that this function can be used to write to the very end-of-file (end = -1).
 762 *
 763 * After writing out and waiting on the data, we check and advance the
 764 * f_wb_err cursor to the latest value, and return any errors detected there.
 765 *
 766 * Return: %0 on success, negative error code otherwise.
 767 */
 768int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
 769{
 770	int err = 0, err2;
 771	struct address_space *mapping = file->f_mapping;
 772
 773	if (lend < lstart)
 774		return 0;
 775
 776	if (mapping_needs_writeback(mapping)) {
 777		err = __filemap_fdatawrite_range(mapping, lstart, lend,
 778						 WB_SYNC_ALL);
 779		/* See comment of filemap_write_and_wait() */
 780		if (err != -EIO)
 781			__filemap_fdatawait_range(mapping, lstart, lend);
 782	}
 783	err2 = file_check_and_advance_wb_err(file);
 784	if (!err)
 785		err = err2;
 786	return err;
 787}
 788EXPORT_SYMBOL(file_write_and_wait_range);
 789
 790/**
 791 * replace_page_cache_folio - replace a pagecache folio with a new one
 792 * @old:	folio to be replaced
 793 * @new:	folio to replace with
 794 *
 795 * This function replaces a folio in the pagecache with a new one.  On
 796 * success it acquires the pagecache reference for the new folio and
 797 * drops it for the old folio.  Both the old and new folios must be
 798 * locked.  This function does not add the new folio to the LRU, the
 799 * caller must do that.
 800 *
 801 * The remove + add is atomic.  This function cannot fail.
 802 */
 803void replace_page_cache_folio(struct folio *old, struct folio *new)
 804{
 805	struct address_space *mapping = old->mapping;
 806	void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
 807	pgoff_t offset = old->index;
 808	XA_STATE(xas, &mapping->i_pages, offset);
 809
 810	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
 811	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
 812	VM_BUG_ON_FOLIO(new->mapping, new);
 813
 814	folio_get(new);
 815	new->mapping = mapping;
 816	new->index = offset;
 817
 818	mem_cgroup_migrate(old, new);
 819
 820	xas_lock_irq(&xas);
 821	xas_store(&xas, new);
 822
 823	old->mapping = NULL;
 824	/* hugetlb pages do not participate in page cache accounting. */
 825	if (!folio_test_hugetlb(old))
 826		__lruvec_stat_sub_folio(old, NR_FILE_PAGES);
 827	if (!folio_test_hugetlb(new))
 828		__lruvec_stat_add_folio(new, NR_FILE_PAGES);
 829	if (folio_test_swapbacked(old))
 830		__lruvec_stat_sub_folio(old, NR_SHMEM);
 831	if (folio_test_swapbacked(new))
 832		__lruvec_stat_add_folio(new, NR_SHMEM);
 833	xas_unlock_irq(&xas);
 834	if (free_folio)
 835		free_folio(old);
 836	folio_put(old);
 837}
 838EXPORT_SYMBOL_GPL(replace_page_cache_folio);
 839
 840noinline int __filemap_add_folio(struct address_space *mapping,
 841		struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
 842{
 843	XA_STATE(xas, &mapping->i_pages, index);
 844	int huge = folio_test_hugetlb(folio);
 845	bool charged = false;
 846	long nr = 1;
 
 847
 848	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 849	VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
 
 
 850	mapping_set_update(&xas, mapping);
 851
 852	if (!huge) {
 853		int error = mem_cgroup_charge(folio, NULL, gfp);
 854		VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
 855		if (error)
 856			return error;
 857		charged = true;
 858		xas_set_order(&xas, index, folio_order(folio));
 859		nr = folio_nr_pages(folio);
 860	}
 861
 862	gfp &= GFP_RECLAIM_MASK;
 863	folio_ref_add(folio, nr);
 864	folio->mapping = mapping;
 865	folio->index = xas.xa_index;
 866
 867	do {
 868		unsigned int order = xa_get_order(xas.xa, xas.xa_index);
 869		void *entry, *old = NULL;
 870
 871		if (order > folio_order(folio))
 872			xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
 873					order, gfp);
 874		xas_lock_irq(&xas);
 875		xas_for_each_conflict(&xas, entry) {
 876			old = entry;
 877			if (!xa_is_value(entry)) {
 878				xas_set_err(&xas, -EEXIST);
 879				goto unlock;
 880			}
 
 
 
 
 
 
 
 
 
 
 
 
 881		}
 882
 883		if (old) {
 884			if (shadowp)
 885				*shadowp = old;
 886			/* entry may have been split before we acquired lock */
 887			order = xa_get_order(xas.xa, xas.xa_index);
 888			if (order > folio_order(folio)) {
 889				/* How to handle large swap entries? */
 890				BUG_ON(shmem_mapping(mapping));
 
 
 
 
 891				xas_split(&xas, old, order);
 892				xas_reset(&xas);
 893			}
 
 
 894		}
 895
 896		xas_store(&xas, folio);
 897		if (xas_error(&xas))
 898			goto unlock;
 899
 900		mapping->nrpages += nr;
 901
 902		/* hugetlb pages do not participate in page cache accounting */
 903		if (!huge) {
 904			__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
 905			if (folio_test_pmd_mappable(folio))
 906				__lruvec_stat_mod_folio(folio,
 907						NR_FILE_THPS, nr);
 908		}
 
 909unlock:
 910		xas_unlock_irq(&xas);
 911	} while (xas_nomem(&xas, gfp));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 912
 913	if (xas_error(&xas))
 914		goto error;
 915
 916	trace_mm_filemap_add_to_page_cache(folio);
 917	return 0;
 918error:
 919	if (charged)
 920		mem_cgroup_uncharge(folio);
 921	folio->mapping = NULL;
 922	/* Leave page->index set: truncation relies upon it */
 923	folio_put_refs(folio, nr);
 924	return xas_error(&xas);
 925}
 926ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
 927
 928int filemap_add_folio(struct address_space *mapping, struct folio *folio,
 929				pgoff_t index, gfp_t gfp)
 930{
 931	void *shadow = NULL;
 932	int ret;
 933
 
 
 
 
 934	__folio_set_locked(folio);
 935	ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
 936	if (unlikely(ret))
 
 937		__folio_clear_locked(folio);
 938	else {
 939		/*
 940		 * The folio might have been evicted from cache only
 941		 * recently, in which case it should be activated like
 942		 * any other repeatedly accessed folio.
 943		 * The exception is folios getting rewritten; evicting other
 944		 * data from the working set, only to cache data that will
 945		 * get overwritten with something else, is a waste of memory.
 946		 */
 947		WARN_ON_ONCE(folio_test_active(folio));
 948		if (!(gfp & __GFP_WRITE) && shadow)
 949			workingset_refault(folio, shadow);
 950		folio_add_lru(folio);
 951	}
 952	return ret;
 953}
 954EXPORT_SYMBOL_GPL(filemap_add_folio);
 955
 956#ifdef CONFIG_NUMA
 957struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
 958{
 959	int n;
 960	struct folio *folio;
 961
 962	if (cpuset_do_page_mem_spread()) {
 963		unsigned int cpuset_mems_cookie;
 964		do {
 965			cpuset_mems_cookie = read_mems_allowed_begin();
 966			n = cpuset_mem_spread_node();
 967			folio = __folio_alloc_node(gfp, order, n);
 968		} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
 969
 970		return folio;
 971	}
 972	return folio_alloc(gfp, order);
 973}
 974EXPORT_SYMBOL(filemap_alloc_folio);
 975#endif
 976
 977/*
 978 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
 979 *
 980 * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
 981 *
 982 * @mapping1: the first mapping to lock
 983 * @mapping2: the second mapping to lock
 984 */
 985void filemap_invalidate_lock_two(struct address_space *mapping1,
 986				 struct address_space *mapping2)
 987{
 988	if (mapping1 > mapping2)
 989		swap(mapping1, mapping2);
 990	if (mapping1)
 991		down_write(&mapping1->invalidate_lock);
 992	if (mapping2 && mapping1 != mapping2)
 993		down_write_nested(&mapping2->invalidate_lock, 1);
 994}
 995EXPORT_SYMBOL(filemap_invalidate_lock_two);
 996
 997/*
 998 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
 999 *
1000 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1001 *
1002 * @mapping1: the first mapping to unlock
1003 * @mapping2: the second mapping to unlock
1004 */
1005void filemap_invalidate_unlock_two(struct address_space *mapping1,
1006				   struct address_space *mapping2)
1007{
1008	if (mapping1)
1009		up_write(&mapping1->invalidate_lock);
1010	if (mapping2 && mapping1 != mapping2)
1011		up_write(&mapping2->invalidate_lock);
1012}
1013EXPORT_SYMBOL(filemap_invalidate_unlock_two);
1014
1015/*
1016 * In order to wait for pages to become available there must be
1017 * waitqueues associated with pages. By using a hash table of
1018 * waitqueues where the bucket discipline is to maintain all
1019 * waiters on the same queue and wake all when any of the pages
1020 * become available, and for the woken contexts to check to be
1021 * sure the appropriate page became available, this saves space
1022 * at a cost of "thundering herd" phenomena during rare hash
1023 * collisions.
1024 */
1025#define PAGE_WAIT_TABLE_BITS 8
1026#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1027static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1028
1029static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1030{
1031	return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1032}
1033
1034void __init pagecache_init(void)
1035{
1036	int i;
1037
1038	for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1039		init_waitqueue_head(&folio_wait_table[i]);
1040
1041	page_writeback_init();
1042}
1043
1044/*
1045 * The page wait code treats the "wait->flags" somewhat unusually, because
1046 * we have multiple different kinds of waits, not just the usual "exclusive"
1047 * one.
1048 *
1049 * We have:
1050 *
1051 *  (a) no special bits set:
1052 *
1053 *	We're just waiting for the bit to be released, and when a waker
1054 *	calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1055 *	and remove it from the wait queue.
1056 *
1057 *	Simple and straightforward.
1058 *
1059 *  (b) WQ_FLAG_EXCLUSIVE:
1060 *
1061 *	The waiter is waiting to get the lock, and only one waiter should
1062 *	be woken up to avoid any thundering herd behavior. We'll set the
1063 *	WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1064 *
1065 *	This is the traditional exclusive wait.
1066 *
1067 *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1068 *
1069 *	The waiter is waiting to get the bit, and additionally wants the
1070 *	lock to be transferred to it for fair lock behavior. If the lock
1071 *	cannot be taken, we stop walking the wait queue without waking
1072 *	the waiter.
1073 *
1074 *	This is the "fair lock handoff" case, and in addition to setting
1075 *	WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1076 *	that it now has the lock.
1077 */
1078static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1079{
1080	unsigned int flags;
1081	struct wait_page_key *key = arg;
1082	struct wait_page_queue *wait_page
1083		= container_of(wait, struct wait_page_queue, wait);
1084
1085	if (!wake_page_match(wait_page, key))
1086		return 0;
1087
1088	/*
1089	 * If it's a lock handoff wait, we get the bit for it, and
1090	 * stop walking (and do not wake it up) if we can't.
1091	 */
1092	flags = wait->flags;
1093	if (flags & WQ_FLAG_EXCLUSIVE) {
1094		if (test_bit(key->bit_nr, &key->folio->flags))
1095			return -1;
1096		if (flags & WQ_FLAG_CUSTOM) {
1097			if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1098				return -1;
1099			flags |= WQ_FLAG_DONE;
1100		}
1101	}
1102
1103	/*
1104	 * We are holding the wait-queue lock, but the waiter that
1105	 * is waiting for this will be checking the flags without
1106	 * any locking.
1107	 *
1108	 * So update the flags atomically, and wake up the waiter
1109	 * afterwards to avoid any races. This store-release pairs
1110	 * with the load-acquire in folio_wait_bit_common().
1111	 */
1112	smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1113	wake_up_state(wait->private, mode);
1114
1115	/*
1116	 * Ok, we have successfully done what we're waiting for,
1117	 * and we can unconditionally remove the wait entry.
1118	 *
1119	 * Note that this pairs with the "finish_wait()" in the
1120	 * waiter, and has to be the absolute last thing we do.
1121	 * After this list_del_init(&wait->entry) the wait entry
1122	 * might be de-allocated and the process might even have
1123	 * exited.
1124	 */
1125	list_del_init_careful(&wait->entry);
1126	return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1127}
1128
1129static void folio_wake_bit(struct folio *folio, int bit_nr)
1130{
1131	wait_queue_head_t *q = folio_waitqueue(folio);
1132	struct wait_page_key key;
1133	unsigned long flags;
1134	wait_queue_entry_t bookmark;
1135
1136	key.folio = folio;
1137	key.bit_nr = bit_nr;
1138	key.page_match = 0;
1139
1140	bookmark.flags = 0;
1141	bookmark.private = NULL;
1142	bookmark.func = NULL;
1143	INIT_LIST_HEAD(&bookmark.entry);
1144
1145	spin_lock_irqsave(&q->lock, flags);
1146	__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1147
1148	while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1149		/*
1150		 * Take a breather from holding the lock,
1151		 * allow pages that finish wake up asynchronously
1152		 * to acquire the lock and remove themselves
1153		 * from wait queue
1154		 */
1155		spin_unlock_irqrestore(&q->lock, flags);
1156		cpu_relax();
1157		spin_lock_irqsave(&q->lock, flags);
1158		__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1159	}
1160
1161	/*
1162	 * It's possible to miss clearing waiters here, when we woke our page
1163	 * waiters, but the hashed waitqueue has waiters for other pages on it.
1164	 * That's okay, it's a rare case. The next waker will clear it.
1165	 *
1166	 * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,
1167	 * other), the flag may be cleared in the course of freeing the page;
1168	 * but that is not required for correctness.
1169	 */
1170	if (!waitqueue_active(q) || !key.page_match)
1171		folio_clear_waiters(folio);
1172
1173	spin_unlock_irqrestore(&q->lock, flags);
1174}
1175
1176static void folio_wake(struct folio *folio, int bit)
1177{
1178	if (!folio_test_waiters(folio))
1179		return;
1180	folio_wake_bit(folio, bit);
1181}
1182
1183/*
1184 * A choice of three behaviors for folio_wait_bit_common():
1185 */
1186enum behavior {
1187	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
1188			 * __folio_lock() waiting on then setting PG_locked.
1189			 */
1190	SHARED,		/* Hold ref to page and check the bit when woken, like
1191			 * folio_wait_writeback() waiting on PG_writeback.
1192			 */
1193	DROP,		/* Drop ref to page before wait, no check when woken,
1194			 * like folio_put_wait_locked() on PG_locked.
1195			 */
1196};
1197
1198/*
1199 * Attempt to check (or get) the folio flag, and mark us done
1200 * if successful.
1201 */
1202static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1203					struct wait_queue_entry *wait)
1204{
1205	if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1206		if (test_and_set_bit(bit_nr, &folio->flags))
1207			return false;
1208	} else if (test_bit(bit_nr, &folio->flags))
1209		return false;
1210
1211	wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1212	return true;
1213}
1214
1215/* How many times do we accept lock stealing from under a waiter? */
1216int sysctl_page_lock_unfairness = 5;
1217
1218static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1219		int state, enum behavior behavior)
1220{
1221	wait_queue_head_t *q = folio_waitqueue(folio);
1222	int unfairness = sysctl_page_lock_unfairness;
1223	struct wait_page_queue wait_page;
1224	wait_queue_entry_t *wait = &wait_page.wait;
1225	bool thrashing = false;
1226	unsigned long pflags;
1227	bool in_thrashing;
1228
1229	if (bit_nr == PG_locked &&
1230	    !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1231		delayacct_thrashing_start(&in_thrashing);
1232		psi_memstall_enter(&pflags);
1233		thrashing = true;
1234	}
1235
1236	init_wait(wait);
1237	wait->func = wake_page_function;
1238	wait_page.folio = folio;
1239	wait_page.bit_nr = bit_nr;
1240
1241repeat:
1242	wait->flags = 0;
1243	if (behavior == EXCLUSIVE) {
1244		wait->flags = WQ_FLAG_EXCLUSIVE;
1245		if (--unfairness < 0)
1246			wait->flags |= WQ_FLAG_CUSTOM;
1247	}
1248
1249	/*
1250	 * Do one last check whether we can get the
1251	 * page bit synchronously.
1252	 *
1253	 * Do the folio_set_waiters() marking before that
1254	 * to let any waker we _just_ missed know they
1255	 * need to wake us up (otherwise they'll never
1256	 * even go to the slow case that looks at the
1257	 * page queue), and add ourselves to the wait
1258	 * queue if we need to sleep.
1259	 *
1260	 * This part needs to be done under the queue
1261	 * lock to avoid races.
1262	 */
1263	spin_lock_irq(&q->lock);
1264	folio_set_waiters(folio);
1265	if (!folio_trylock_flag(folio, bit_nr, wait))
1266		__add_wait_queue_entry_tail(q, wait);
1267	spin_unlock_irq(&q->lock);
1268
1269	/*
1270	 * From now on, all the logic will be based on
1271	 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1272	 * see whether the page bit testing has already
1273	 * been done by the wake function.
1274	 *
1275	 * We can drop our reference to the folio.
1276	 */
1277	if (behavior == DROP)
1278		folio_put(folio);
1279
1280	/*
1281	 * Note that until the "finish_wait()", or until
1282	 * we see the WQ_FLAG_WOKEN flag, we need to
1283	 * be very careful with the 'wait->flags', because
1284	 * we may race with a waker that sets them.
1285	 */
1286	for (;;) {
1287		unsigned int flags;
1288
1289		set_current_state(state);
1290
1291		/* Loop until we've been woken or interrupted */
1292		flags = smp_load_acquire(&wait->flags);
1293		if (!(flags & WQ_FLAG_WOKEN)) {
1294			if (signal_pending_state(state, current))
1295				break;
1296
1297			io_schedule();
1298			continue;
1299		}
1300
1301		/* If we were non-exclusive, we're done */
1302		if (behavior != EXCLUSIVE)
1303			break;
1304
1305		/* If the waker got the lock for us, we're done */
1306		if (flags & WQ_FLAG_DONE)
1307			break;
1308
1309		/*
1310		 * Otherwise, if we're getting the lock, we need to
1311		 * try to get it ourselves.
1312		 *
1313		 * And if that fails, we'll have to retry this all.
1314		 */
1315		if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1316			goto repeat;
1317
1318		wait->flags |= WQ_FLAG_DONE;
1319		break;
1320	}
1321
1322	/*
1323	 * If a signal happened, this 'finish_wait()' may remove the last
1324	 * waiter from the wait-queues, but the folio waiters bit will remain
1325	 * set. That's ok. The next wakeup will take care of it, and trying
1326	 * to do it here would be difficult and prone to races.
1327	 */
1328	finish_wait(q, wait);
1329
1330	if (thrashing) {
1331		delayacct_thrashing_end(&in_thrashing);
1332		psi_memstall_leave(&pflags);
1333	}
1334
1335	/*
1336	 * NOTE! The wait->flags weren't stable until we've done the
1337	 * 'finish_wait()', and we could have exited the loop above due
1338	 * to a signal, and had a wakeup event happen after the signal
1339	 * test but before the 'finish_wait()'.
1340	 *
1341	 * So only after the finish_wait() can we reliably determine
1342	 * if we got woken up or not, so we can now figure out the final
1343	 * return value based on that state without races.
1344	 *
1345	 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1346	 * waiter, but an exclusive one requires WQ_FLAG_DONE.
1347	 */
1348	if (behavior == EXCLUSIVE)
1349		return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1350
1351	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1352}
1353
1354#ifdef CONFIG_MIGRATION
1355/**
1356 * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1357 * @entry: migration swap entry.
1358 * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required
1359 *        for pte entries, pass NULL for pmd entries.
1360 * @ptl: already locked ptl. This function will drop the lock.
1361 *
1362 * Wait for a migration entry referencing the given page to be removed. This is
1363 * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
1364 * this can be called without taking a reference on the page. Instead this
1365 * should be called while holding the ptl for the migration entry referencing
1366 * the page.
1367 *
1368 * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock().
1369 *
1370 * This follows the same logic as folio_wait_bit_common() so see the comments
1371 * there.
1372 */
1373void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
1374				spinlock_t *ptl)
1375{
1376	struct wait_page_queue wait_page;
1377	wait_queue_entry_t *wait = &wait_page.wait;
1378	bool thrashing = false;
1379	unsigned long pflags;
1380	bool in_thrashing;
1381	wait_queue_head_t *q;
1382	struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
1383
1384	q = folio_waitqueue(folio);
1385	if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1386		delayacct_thrashing_start(&in_thrashing);
1387		psi_memstall_enter(&pflags);
1388		thrashing = true;
1389	}
1390
1391	init_wait(wait);
1392	wait->func = wake_page_function;
1393	wait_page.folio = folio;
1394	wait_page.bit_nr = PG_locked;
1395	wait->flags = 0;
1396
1397	spin_lock_irq(&q->lock);
1398	folio_set_waiters(folio);
1399	if (!folio_trylock_flag(folio, PG_locked, wait))
1400		__add_wait_queue_entry_tail(q, wait);
1401	spin_unlock_irq(&q->lock);
1402
1403	/*
1404	 * If a migration entry exists for the page the migration path must hold
1405	 * a valid reference to the page, and it must take the ptl to remove the
1406	 * migration entry. So the page is valid until the ptl is dropped.
1407	 */
1408	if (ptep)
1409		pte_unmap_unlock(ptep, ptl);
1410	else
1411		spin_unlock(ptl);
1412
1413	for (;;) {
1414		unsigned int flags;
1415
1416		set_current_state(TASK_UNINTERRUPTIBLE);
1417
1418		/* Loop until we've been woken or interrupted */
1419		flags = smp_load_acquire(&wait->flags);
1420		if (!(flags & WQ_FLAG_WOKEN)) {
1421			if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
1422				break;
1423
1424			io_schedule();
1425			continue;
1426		}
1427		break;
1428	}
1429
1430	finish_wait(q, wait);
1431
1432	if (thrashing) {
1433		delayacct_thrashing_end(&in_thrashing);
1434		psi_memstall_leave(&pflags);
1435	}
1436}
1437#endif
1438
1439void folio_wait_bit(struct folio *folio, int bit_nr)
1440{
1441	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1442}
1443EXPORT_SYMBOL(folio_wait_bit);
1444
1445int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1446{
1447	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
1448}
1449EXPORT_SYMBOL(folio_wait_bit_killable);
1450
1451/**
1452 * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1453 * @folio: The folio to wait for.
1454 * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
1455 *
1456 * The caller should hold a reference on @folio.  They expect the page to
1457 * become unlocked relatively soon, but do not wish to hold up migration
1458 * (for example) by holding the reference while waiting for the folio to
1459 * come unlocked.  After this function returns, the caller should not
1460 * dereference @folio.
1461 *
1462 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1463 */
1464static int folio_put_wait_locked(struct folio *folio, int state)
1465{
1466	return folio_wait_bit_common(folio, PG_locked, state, DROP);
1467}
1468
1469/**
1470 * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1471 * @folio: Folio defining the wait queue of interest
1472 * @waiter: Waiter to add to the queue
1473 *
1474 * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1475 */
1476void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1477{
1478	wait_queue_head_t *q = folio_waitqueue(folio);
1479	unsigned long flags;
1480
1481	spin_lock_irqsave(&q->lock, flags);
1482	__add_wait_queue_entry_tail(q, waiter);
1483	folio_set_waiters(folio);
1484	spin_unlock_irqrestore(&q->lock, flags);
1485}
1486EXPORT_SYMBOL_GPL(folio_add_wait_queue);
1487
1488#ifndef clear_bit_unlock_is_negative_byte
1489
1490/*
1491 * PG_waiters is the high bit in the same byte as PG_lock.
1492 *
1493 * On x86 (and on many other architectures), we can clear PG_lock and
1494 * test the sign bit at the same time. But if the architecture does
1495 * not support that special operation, we just do this all by hand
1496 * instead.
1497 *
1498 * The read of PG_waiters has to be after (or concurrently with) PG_locked
1499 * being cleared, but a memory barrier should be unnecessary since it is
1500 * in the same byte as PG_locked.
1501 */
1502static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1503{
1504	clear_bit_unlock(nr, mem);
1505	/* smp_mb__after_atomic(); */
1506	return test_bit(PG_waiters, mem);
1507}
1508
1509#endif
1510
1511/**
1512 * folio_unlock - Unlock a locked folio.
1513 * @folio: The folio.
1514 *
1515 * Unlocks the folio and wakes up any thread sleeping on the page lock.
1516 *
1517 * Context: May be called from interrupt or process context.  May not be
1518 * called from NMI context.
1519 */
1520void folio_unlock(struct folio *folio)
1521{
1522	/* Bit 7 allows x86 to check the byte's sign bit */
1523	BUILD_BUG_ON(PG_waiters != 7);
1524	BUILD_BUG_ON(PG_locked > 7);
1525	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1526	if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0)))
1527		folio_wake_bit(folio, PG_locked);
1528}
1529EXPORT_SYMBOL(folio_unlock);
1530
1531/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1532 * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1533 * @folio: The folio.
1534 *
1535 * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1536 * it.  The folio reference held for PG_private_2 being set is released.
1537 *
1538 * This is, for example, used when a netfs folio is being written to a local
1539 * disk cache, thereby allowing writes to the cache for the same folio to be
1540 * serialised.
1541 */
1542void folio_end_private_2(struct folio *folio)
1543{
1544	VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1545	clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1546	folio_wake_bit(folio, PG_private_2);
1547	folio_put(folio);
1548}
1549EXPORT_SYMBOL(folio_end_private_2);
1550
1551/**
1552 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1553 * @folio: The folio to wait on.
1554 *
1555 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio.
1556 */
1557void folio_wait_private_2(struct folio *folio)
1558{
1559	while (folio_test_private_2(folio))
1560		folio_wait_bit(folio, PG_private_2);
1561}
1562EXPORT_SYMBOL(folio_wait_private_2);
1563
1564/**
1565 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1566 * @folio: The folio to wait on.
1567 *
1568 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a
1569 * fatal signal is received by the calling task.
1570 *
1571 * Return:
1572 * - 0 if successful.
1573 * - -EINTR if a fatal signal was encountered.
1574 */
1575int folio_wait_private_2_killable(struct folio *folio)
1576{
1577	int ret = 0;
1578
1579	while (folio_test_private_2(folio)) {
1580		ret = folio_wait_bit_killable(folio, PG_private_2);
1581		if (ret < 0)
1582			break;
1583	}
1584
1585	return ret;
1586}
1587EXPORT_SYMBOL(folio_wait_private_2_killable);
1588
1589/**
1590 * folio_end_writeback - End writeback against a folio.
1591 * @folio: The folio.
 
 
 
 
1592 */
1593void folio_end_writeback(struct folio *folio)
1594{
 
 
1595	/*
1596	 * folio_test_clear_reclaim() could be used here but it is an
1597	 * atomic operation and overkill in this particular case. Failing
1598	 * to shuffle a folio marked for immediate reclaim is too mild
1599	 * a gain to justify taking an atomic operation penalty at the
1600	 * end of every folio writeback.
1601	 */
1602	if (folio_test_reclaim(folio)) {
1603		folio_clear_reclaim(folio);
1604		folio_rotate_reclaimable(folio);
1605	}
1606
1607	/*
1608	 * Writeback does not hold a folio reference of its own, relying
1609	 * on truncation to wait for the clearing of PG_writeback.
1610	 * But here we must make sure that the folio is not freed and
1611	 * reused before the folio_wake().
1612	 */
1613	folio_get(folio);
1614	if (!__folio_end_writeback(folio))
1615		BUG();
1616
1617	smp_mb__after_atomic();
1618	folio_wake(folio, PG_writeback);
1619	acct_reclaim_writeback(folio);
1620	folio_put(folio);
1621}
1622EXPORT_SYMBOL(folio_end_writeback);
1623
1624/*
1625 * After completing I/O on a page, call this routine to update the page
1626 * flags appropriately
1627 */
1628void page_endio(struct page *page, bool is_write, int err)
1629{
1630	struct folio *folio = page_folio(page);
1631
1632	if (!is_write) {
1633		if (!err) {
1634			folio_mark_uptodate(folio);
1635		} else {
1636			folio_clear_uptodate(folio);
1637			folio_set_error(folio);
1638		}
1639		folio_unlock(folio);
1640	} else {
1641		if (err) {
1642			struct address_space *mapping;
1643
1644			folio_set_error(folio);
1645			mapping = folio_mapping(folio);
1646			if (mapping)
1647				mapping_set_error(mapping, err);
1648		}
1649		folio_end_writeback(folio);
1650	}
1651}
1652EXPORT_SYMBOL_GPL(page_endio);
1653
1654/**
1655 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1656 * @folio: The folio to lock
1657 */
1658void __folio_lock(struct folio *folio)
1659{
1660	folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1661				EXCLUSIVE);
1662}
1663EXPORT_SYMBOL(__folio_lock);
1664
1665int __folio_lock_killable(struct folio *folio)
1666{
1667	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1668					EXCLUSIVE);
1669}
1670EXPORT_SYMBOL_GPL(__folio_lock_killable);
1671
1672static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1673{
1674	struct wait_queue_head *q = folio_waitqueue(folio);
1675	int ret = 0;
1676
1677	wait->folio = folio;
1678	wait->bit_nr = PG_locked;
1679
1680	spin_lock_irq(&q->lock);
1681	__add_wait_queue_entry_tail(q, &wait->wait);
1682	folio_set_waiters(folio);
1683	ret = !folio_trylock(folio);
1684	/*
1685	 * If we were successful now, we know we're still on the
1686	 * waitqueue as we're still under the lock. This means it's
1687	 * safe to remove and return success, we know the callback
1688	 * isn't going to trigger.
1689	 */
1690	if (!ret)
1691		__remove_wait_queue(q, &wait->wait);
1692	else
1693		ret = -EIOCBQUEUED;
1694	spin_unlock_irq(&q->lock);
1695	return ret;
1696}
1697
1698/*
1699 * Return values:
1700 * true - folio is locked; mmap_lock is still held.
1701 * false - folio is not locked.
1702 *     mmap_lock has been released (mmap_read_unlock(), unless flags had both
1703 *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1704 *     which case mmap_lock is still held.
1705 *
1706 * If neither ALLOW_RETRY nor KILLABLE are set, will always return true
1707 * with the folio locked and the mmap_lock unperturbed.
1708 */
1709bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
1710			 unsigned int flags)
1711{
 
 
1712	if (fault_flag_allow_retry_first(flags)) {
1713		/*
1714		 * CAUTION! In this case, mmap_lock is not released
1715		 * even though return 0.
1716		 */
1717		if (flags & FAULT_FLAG_RETRY_NOWAIT)
1718			return false;
1719
1720		mmap_read_unlock(mm);
1721		if (flags & FAULT_FLAG_KILLABLE)
1722			folio_wait_locked_killable(folio);
1723		else
1724			folio_wait_locked(folio);
1725		return false;
1726	}
1727	if (flags & FAULT_FLAG_KILLABLE) {
1728		bool ret;
1729
1730		ret = __folio_lock_killable(folio);
1731		if (ret) {
1732			mmap_read_unlock(mm);
1733			return false;
1734		}
1735	} else {
1736		__folio_lock(folio);
1737	}
1738
1739	return true;
1740}
1741
1742/**
1743 * page_cache_next_miss() - Find the next gap in the page cache.
1744 * @mapping: Mapping.
1745 * @index: Index.
1746 * @max_scan: Maximum range to search.
1747 *
1748 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1749 * gap with the lowest index.
1750 *
1751 * This function may be called under the rcu_read_lock.  However, this will
1752 * not atomically search a snapshot of the cache at a single point in time.
1753 * For example, if a gap is created at index 5, then subsequently a gap is
1754 * created at index 10, page_cache_next_miss covering both indices may
1755 * return 10 if called under the rcu_read_lock.
1756 *
1757 * Return: The index of the gap if found, otherwise an index outside the
1758 * range specified (in which case 'return - index >= max_scan' will be true).
1759 * In the rare case of index wrap-around, 0 will be returned.
1760 */
1761pgoff_t page_cache_next_miss(struct address_space *mapping,
1762			     pgoff_t index, unsigned long max_scan)
1763{
1764	XA_STATE(xas, &mapping->i_pages, index);
1765
1766	while (max_scan--) {
1767		void *entry = xas_next(&xas);
1768		if (!entry || xa_is_value(entry))
1769			break;
1770		if (xas.xa_index == 0)
1771			break;
1772	}
1773
1774	return xas.xa_index;
1775}
1776EXPORT_SYMBOL(page_cache_next_miss);
1777
1778/**
1779 * page_cache_prev_miss() - Find the previous gap in the page cache.
1780 * @mapping: Mapping.
1781 * @index: Index.
1782 * @max_scan: Maximum range to search.
1783 *
1784 * Search the range [max(index - max_scan + 1, 0), index] for the
1785 * gap with the highest index.
1786 *
1787 * This function may be called under the rcu_read_lock.  However, this will
1788 * not atomically search a snapshot of the cache at a single point in time.
1789 * For example, if a gap is created at index 10, then subsequently a gap is
1790 * created at index 5, page_cache_prev_miss() covering both indices may
1791 * return 5 if called under the rcu_read_lock.
1792 *
1793 * Return: The index of the gap if found, otherwise an index outside the
1794 * range specified (in which case 'index - return >= max_scan' will be true).
1795 * In the rare case of wrap-around, ULONG_MAX will be returned.
1796 */
1797pgoff_t page_cache_prev_miss(struct address_space *mapping,
1798			     pgoff_t index, unsigned long max_scan)
1799{
1800	XA_STATE(xas, &mapping->i_pages, index);
1801
1802	while (max_scan--) {
1803		void *entry = xas_prev(&xas);
1804		if (!entry || xa_is_value(entry))
1805			break;
1806		if (xas.xa_index == ULONG_MAX)
1807			break;
1808	}
1809
1810	return xas.xa_index;
1811}
1812EXPORT_SYMBOL(page_cache_prev_miss);
1813
1814/*
1815 * Lockless page cache protocol:
1816 * On the lookup side:
1817 * 1. Load the folio from i_pages
1818 * 2. Increment the refcount if it's not zero
1819 * 3. If the folio is not found by xas_reload(), put the refcount and retry
1820 *
1821 * On the removal side:
1822 * A. Freeze the page (by zeroing the refcount if nobody else has a reference)
1823 * B. Remove the page from i_pages
1824 * C. Return the page to the page allocator
1825 *
1826 * This means that any page may have its reference count temporarily
1827 * increased by a speculative page cache (or fast GUP) lookup as it can
1828 * be allocated by another user before the RCU grace period expires.
1829 * Because the refcount temporarily acquired here may end up being the
1830 * last refcount on the page, any page allocation must be freeable by
1831 * folio_put().
1832 */
1833
1834/*
1835 * mapping_get_entry - Get a page cache entry.
1836 * @mapping: the address_space to search
1837 * @index: The page cache index.
1838 *
1839 * Looks up the page cache entry at @mapping & @index.  If it is a folio,
1840 * it is returned with an increased refcount.  If it is a shadow entry
1841 * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1842 * it is returned without further action.
1843 *
1844 * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1845 */
1846static void *mapping_get_entry(struct address_space *mapping, pgoff_t index)
1847{
1848	XA_STATE(xas, &mapping->i_pages, index);
1849	struct folio *folio;
1850
1851	rcu_read_lock();
1852repeat:
1853	xas_reset(&xas);
1854	folio = xas_load(&xas);
1855	if (xas_retry(&xas, folio))
1856		goto repeat;
1857	/*
1858	 * A shadow entry of a recently evicted page, or a swap entry from
1859	 * shmem/tmpfs.  Return it without attempting to raise page count.
1860	 */
1861	if (!folio || xa_is_value(folio))
1862		goto out;
1863
1864	if (!folio_try_get_rcu(folio))
1865		goto repeat;
1866
1867	if (unlikely(folio != xas_reload(&xas))) {
1868		folio_put(folio);
1869		goto repeat;
1870	}
1871out:
1872	rcu_read_unlock();
1873
1874	return folio;
1875}
1876
1877/**
1878 * __filemap_get_folio - Find and get a reference to a folio.
1879 * @mapping: The address_space to search.
1880 * @index: The page index.
1881 * @fgp_flags: %FGP flags modify how the folio is returned.
1882 * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
1883 *
1884 * Looks up the page cache entry at @mapping & @index.
1885 *
1886 * @fgp_flags can be zero or more of these flags:
1887 *
1888 * * %FGP_ACCESSED - The folio will be marked accessed.
1889 * * %FGP_LOCK - The folio is returned locked.
1890 * * %FGP_ENTRY - If there is a shadow / swap / DAX entry, return it
1891 *   instead of allocating a new folio to replace it.
1892 * * %FGP_CREAT - If no page is present then a new page is allocated using
1893 *   @gfp and added to the page cache and the VM's LRU list.
1894 *   The page is returned locked and with an increased refcount.
1895 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
1896 *   page is already in cache.  If the page was allocated, unlock it before
1897 *   returning so the caller can do the same dance.
1898 * * %FGP_WRITE - The page will be written to by the caller.
1899 * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
1900 * * %FGP_NOWAIT - Don't get blocked by page lock.
1901 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
1902 *
1903 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1904 * if the %GFP flags specified for %FGP_CREAT are atomic.
1905 *
1906 * If there is a page cache page, it is returned with an increased refcount.
1907 *
1908 * Return: The found folio or %NULL otherwise.
1909 */
1910struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1911		int fgp_flags, gfp_t gfp)
1912{
1913	struct folio *folio;
1914
1915repeat:
1916	folio = mapping_get_entry(mapping, index);
1917	if (xa_is_value(folio)) {
1918		if (fgp_flags & FGP_ENTRY)
1919			return folio;
1920		folio = NULL;
1921	}
1922	if (!folio)
1923		goto no_page;
1924
1925	if (fgp_flags & FGP_LOCK) {
1926		if (fgp_flags & FGP_NOWAIT) {
1927			if (!folio_trylock(folio)) {
1928				folio_put(folio);
1929				return NULL;
1930			}
1931		} else {
1932			folio_lock(folio);
1933		}
1934
1935		/* Has the page been truncated? */
1936		if (unlikely(folio->mapping != mapping)) {
1937			folio_unlock(folio);
1938			folio_put(folio);
1939			goto repeat;
1940		}
1941		VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1942	}
1943
1944	if (fgp_flags & FGP_ACCESSED)
1945		folio_mark_accessed(folio);
1946	else if (fgp_flags & FGP_WRITE) {
1947		/* Clear idle flag for buffer write */
1948		if (folio_test_idle(folio))
1949			folio_clear_idle(folio);
1950	}
1951
1952	if (fgp_flags & FGP_STABLE)
1953		folio_wait_stable(folio);
1954no_page:
1955	if (!folio && (fgp_flags & FGP_CREAT)) {
 
 
1956		int err;
 
 
1957		if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1958			gfp |= __GFP_WRITE;
1959		if (fgp_flags & FGP_NOFS)
1960			gfp &= ~__GFP_FS;
1961		if (fgp_flags & FGP_NOWAIT) {
1962			gfp &= ~GFP_KERNEL;
1963			gfp |= GFP_NOWAIT | __GFP_NOWARN;
1964		}
1965
1966		folio = filemap_alloc_folio(gfp, 0);
1967		if (!folio)
1968			return NULL;
1969
1970		if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1971			fgp_flags |= FGP_LOCK;
1972
1973		/* Init accessed so avoid atomic mark_page_accessed later */
1974		if (fgp_flags & FGP_ACCESSED)
1975			__folio_set_referenced(folio);
 
 
1976
1977		err = filemap_add_folio(mapping, folio, index, gfp);
1978		if (unlikely(err)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1979			folio_put(folio);
1980			folio = NULL;
1981			if (err == -EEXIST)
1982				goto repeat;
1983		}
1984
 
 
 
 
1985		/*
1986		 * filemap_add_folio locks the page, and for mmap
1987		 * we expect an unlocked page.
1988		 */
1989		if (folio && (fgp_flags & FGP_FOR_MMAP))
1990			folio_unlock(folio);
1991	}
1992
 
 
1993	return folio;
1994}
1995EXPORT_SYMBOL(__filemap_get_folio);
1996
1997static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1998		xa_mark_t mark)
1999{
2000	struct folio *folio;
2001
2002retry:
2003	if (mark == XA_PRESENT)
2004		folio = xas_find(xas, max);
2005	else
2006		folio = xas_find_marked(xas, max, mark);
2007
2008	if (xas_retry(xas, folio))
2009		goto retry;
2010	/*
2011	 * A shadow entry of a recently evicted page, a swap
2012	 * entry from shmem/tmpfs or a DAX entry.  Return it
2013	 * without attempting to raise page count.
2014	 */
2015	if (!folio || xa_is_value(folio))
2016		return folio;
2017
2018	if (!folio_try_get_rcu(folio))
2019		goto reset;
2020
2021	if (unlikely(folio != xas_reload(xas))) {
2022		folio_put(folio);
2023		goto reset;
2024	}
2025
2026	return folio;
2027reset:
2028	xas_reset(xas);
2029	goto retry;
2030}
2031
2032/**
2033 * find_get_entries - gang pagecache lookup
2034 * @mapping:	The address_space to search
2035 * @start:	The starting page cache index
2036 * @end:	The final page index (inclusive).
2037 * @fbatch:	Where the resulting entries are placed.
2038 * @indices:	The cache indices corresponding to the entries in @entries
2039 *
2040 * find_get_entries() will search for and return a batch of entries in
2041 * the mapping.  The entries are placed in @fbatch.  find_get_entries()
2042 * takes a reference on any actual folios it returns.
2043 *
2044 * The entries have ascending indexes.  The indices may not be consecutive
2045 * due to not-present entries or large folios.
2046 *
2047 * Any shadow entries of evicted folios, or swap entries from
2048 * shmem/tmpfs, are included in the returned array.
2049 *
2050 * Return: The number of entries which were found.
2051 */
2052unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
2053		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2054{
2055	XA_STATE(xas, &mapping->i_pages, *start);
2056	struct folio *folio;
2057
2058	rcu_read_lock();
2059	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2060		indices[fbatch->nr] = xas.xa_index;
2061		if (!folio_batch_add(fbatch, folio))
2062			break;
2063	}
2064	rcu_read_unlock();
2065
2066	if (folio_batch_count(fbatch)) {
2067		unsigned long nr = 1;
2068		int idx = folio_batch_count(fbatch) - 1;
2069
2070		folio = fbatch->folios[idx];
2071		if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2072			nr = folio_nr_pages(folio);
2073		*start = indices[idx] + nr;
 
 
2074	}
 
 
2075	return folio_batch_count(fbatch);
2076}
2077
2078/**
2079 * find_lock_entries - Find a batch of pagecache entries.
2080 * @mapping:	The address_space to search.
2081 * @start:	The starting page cache index.
2082 * @end:	The final page index (inclusive).
2083 * @fbatch:	Where the resulting entries are placed.
2084 * @indices:	The cache indices of the entries in @fbatch.
2085 *
2086 * find_lock_entries() will return a batch of entries from @mapping.
2087 * Swap, shadow and DAX entries are included.  Folios are returned
2088 * locked and with an incremented refcount.  Folios which are locked
2089 * by somebody else or under writeback are skipped.  Folios which are
2090 * partially outside the range are not returned.
2091 *
2092 * The entries have ascending indexes.  The indices may not be consecutive
2093 * due to not-present entries, large folios, folios which could not be
2094 * locked or folios under writeback.
2095 *
2096 * Return: The number of entries which were found.
2097 */
2098unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
2099		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2100{
2101	XA_STATE(xas, &mapping->i_pages, *start);
2102	struct folio *folio;
2103
2104	rcu_read_lock();
2105	while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
 
 
 
2106		if (!xa_is_value(folio)) {
2107			if (folio->index < *start)
 
 
 
2108				goto put;
2109			if (folio->index + folio_nr_pages(folio) - 1 > end)
 
2110				goto put;
2111			if (!folio_trylock(folio))
2112				goto put;
2113			if (folio->mapping != mapping ||
2114			    folio_test_writeback(folio))
2115				goto unlock;
2116			VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2117					folio);
 
 
 
 
 
 
 
 
 
2118		}
 
 
 
2119		indices[fbatch->nr] = xas.xa_index;
2120		if (!folio_batch_add(fbatch, folio))
2121			break;
2122		continue;
2123unlock:
2124		folio_unlock(folio);
2125put:
2126		folio_put(folio);
2127	}
2128	rcu_read_unlock();
2129
2130	if (folio_batch_count(fbatch)) {
2131		unsigned long nr = 1;
2132		int idx = folio_batch_count(fbatch) - 1;
2133
2134		folio = fbatch->folios[idx];
2135		if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2136			nr = folio_nr_pages(folio);
2137		*start = indices[idx] + nr;
2138	}
2139	return folio_batch_count(fbatch);
2140}
2141
2142/**
2143 * filemap_get_folios - Get a batch of folios
2144 * @mapping:	The address_space to search
2145 * @start:	The starting page index
2146 * @end:	The final page index (inclusive)
2147 * @fbatch:	The batch to fill.
2148 *
2149 * Search for and return a batch of folios in the mapping starting at
2150 * index @start and up to index @end (inclusive).  The folios are returned
2151 * in @fbatch with an elevated reference count.
2152 *
2153 * The first folio may start before @start; if it does, it will contain
2154 * @start.  The final folio may extend beyond @end; if it does, it will
2155 * contain @end.  The folios have ascending indices.  There may be gaps
2156 * between the folios if there are indices which have no folio in the
2157 * page cache.  If folios are added to or removed from the page cache
2158 * while this is running, they may or may not be found by this call.
2159 *
2160 * Return: The number of folios which were found.
2161 * We also update @start to index the next folio for the traversal.
2162 */
2163unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
2164		pgoff_t end, struct folio_batch *fbatch)
2165{
2166	XA_STATE(xas, &mapping->i_pages, *start);
2167	struct folio *folio;
2168
2169	rcu_read_lock();
2170	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2171		/* Skip over shadow, swap and DAX entries */
2172		if (xa_is_value(folio))
2173			continue;
2174		if (!folio_batch_add(fbatch, folio)) {
2175			unsigned long nr = folio_nr_pages(folio);
2176
2177			if (folio_test_hugetlb(folio))
2178				nr = 1;
2179			*start = folio->index + nr;
2180			goto out;
2181		}
2182	}
2183
2184	/*
2185	 * We come here when there is no page beyond @end. We take care to not
2186	 * overflow the index @start as it confuses some of the callers. This
2187	 * breaks the iteration when there is a page at index -1 but that is
2188	 * already broken anyway.
2189	 */
2190	if (end == (pgoff_t)-1)
2191		*start = (pgoff_t)-1;
2192	else
2193		*start = end + 1;
2194out:
2195	rcu_read_unlock();
2196
2197	return folio_batch_count(fbatch);
2198}
2199EXPORT_SYMBOL(filemap_get_folios);
2200
2201static inline
2202bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
2203{
2204	if (!folio_test_large(folio) || folio_test_hugetlb(folio))
2205		return false;
2206	if (index >= max)
2207		return false;
2208	return index < folio->index + folio_nr_pages(folio) - 1;
2209}
2210
2211/**
2212 * filemap_get_folios_contig - Get a batch of contiguous folios
2213 * @mapping:	The address_space to search
2214 * @start:	The starting page index
2215 * @end:	The final page index (inclusive)
2216 * @fbatch:	The batch to fill
2217 *
2218 * filemap_get_folios_contig() works exactly like filemap_get_folios(),
2219 * except the returned folios are guaranteed to be contiguous. This may
2220 * not return all contiguous folios if the batch gets filled up.
2221 *
2222 * Return: The number of folios found.
2223 * Also update @start to be positioned for traversal of the next folio.
2224 */
2225
2226unsigned filemap_get_folios_contig(struct address_space *mapping,
2227		pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2228{
2229	XA_STATE(xas, &mapping->i_pages, *start);
2230	unsigned long nr;
2231	struct folio *folio;
2232
2233	rcu_read_lock();
2234
2235	for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2236			folio = xas_next(&xas)) {
2237		if (xas_retry(&xas, folio))
2238			continue;
2239		/*
2240		 * If the entry has been swapped out, we can stop looking.
2241		 * No current caller is looking for DAX entries.
2242		 */
2243		if (xa_is_value(folio))
2244			goto update_start;
2245
2246		if (!folio_try_get_rcu(folio))
 
 
 
 
2247			goto retry;
2248
2249		if (unlikely(folio != xas_reload(&xas)))
2250			goto put_folio;
2251
2252		if (!folio_batch_add(fbatch, folio)) {
2253			nr = folio_nr_pages(folio);
2254
2255			if (folio_test_hugetlb(folio))
2256				nr = 1;
2257			*start = folio->index + nr;
2258			goto out;
2259		}
2260		continue;
2261put_folio:
2262		folio_put(folio);
2263
2264retry:
2265		xas_reset(&xas);
2266	}
2267
2268update_start:
2269	nr = folio_batch_count(fbatch);
2270
2271	if (nr) {
2272		folio = fbatch->folios[nr - 1];
2273		if (folio_test_hugetlb(folio))
2274			*start = folio->index + 1;
2275		else
2276			*start = folio->index + folio_nr_pages(folio);
2277	}
2278out:
2279	rcu_read_unlock();
2280	return folio_batch_count(fbatch);
2281}
2282EXPORT_SYMBOL(filemap_get_folios_contig);
2283
2284/**
2285 * find_get_pages_range_tag - Find and return head pages matching @tag.
2286 * @mapping:	the address_space to search
2287 * @index:	the starting page index
2288 * @end:	The final page index (inclusive)
2289 * @tag:	the tag index
2290 * @nr_pages:	the maximum number of pages
2291 * @pages:	where the resulting pages are placed
2292 *
2293 * Like find_get_pages_range(), except we only return head pages which are
2294 * tagged with @tag.  @index is updated to the index immediately after the
2295 * last page we return, ready for the next iteration.
 
 
 
 
2296 *
2297 * Return: the number of pages which were found.
 
2298 */
2299unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
2300			pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
2301			struct page **pages)
2302{
2303	XA_STATE(xas, &mapping->i_pages, *index);
2304	struct folio *folio;
2305	unsigned ret = 0;
2306
2307	if (unlikely(!nr_pages))
2308		return 0;
2309
2310	rcu_read_lock();
2311	while ((folio = find_get_entry(&xas, end, tag))) {
2312		/*
2313		 * Shadow entries should never be tagged, but this iteration
2314		 * is lockless so there is a window for page reclaim to evict
2315		 * a page we saw tagged.  Skip over it.
2316		 */
2317		if (xa_is_value(folio))
2318			continue;
2319
2320		pages[ret] = &folio->page;
2321		if (++ret == nr_pages) {
2322			*index = folio->index + folio_nr_pages(folio);
2323			goto out;
2324		}
2325	}
2326
2327	/*
2328	 * We come here when we got to @end. We take care to not overflow the
2329	 * index @index as it confuses some of the callers. This breaks the
2330	 * iteration when there is a page at index -1 but that is already
2331	 * broken anyway.
2332	 */
2333	if (end == (pgoff_t)-1)
2334		*index = (pgoff_t)-1;
2335	else
2336		*index = end + 1;
2337out:
2338	rcu_read_unlock();
2339
2340	return ret;
2341}
2342EXPORT_SYMBOL(find_get_pages_range_tag);
2343
2344/*
2345 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2346 * a _large_ part of the i/o request. Imagine the worst scenario:
2347 *
2348 *      ---R__________________________________________B__________
2349 *         ^ reading here                             ^ bad block(assume 4k)
2350 *
2351 * read(R) => miss => readahead(R...B) => media error => frustrating retries
2352 * => failing the whole request => read(R) => read(R+1) =>
2353 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2354 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2355 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2356 *
2357 * It is going insane. Fix it by quickly scaling down the readahead size.
2358 */
2359static void shrink_readahead_size_eio(struct file_ra_state *ra)
2360{
2361	ra->ra_pages /= 4;
2362}
2363
2364/*
2365 * filemap_get_read_batch - Get a batch of folios for read
2366 *
2367 * Get a batch of folios which represent a contiguous range of bytes in
2368 * the file.  No exceptional entries will be returned.  If @index is in
2369 * the middle of a folio, the entire folio will be returned.  The last
2370 * folio in the batch may have the readahead flag set or the uptodate flag
2371 * clear so that the caller can take the appropriate action.
2372 */
2373static void filemap_get_read_batch(struct address_space *mapping,
2374		pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2375{
2376	XA_STATE(xas, &mapping->i_pages, index);
2377	struct folio *folio;
2378
2379	rcu_read_lock();
2380	for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2381		if (xas_retry(&xas, folio))
2382			continue;
2383		if (xas.xa_index > max || xa_is_value(folio))
2384			break;
2385		if (xa_is_sibling(folio))
2386			break;
2387		if (!folio_try_get_rcu(folio))
2388			goto retry;
2389
2390		if (unlikely(folio != xas_reload(&xas)))
2391			goto put_folio;
2392
2393		if (!folio_batch_add(fbatch, folio))
2394			break;
2395		if (!folio_test_uptodate(folio))
2396			break;
2397		if (folio_test_readahead(folio))
2398			break;
2399		xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1);
2400		continue;
2401put_folio:
2402		folio_put(folio);
2403retry:
2404		xas_reset(&xas);
2405	}
2406	rcu_read_unlock();
2407}
2408
2409static int filemap_read_folio(struct file *file, filler_t filler,
2410		struct folio *folio)
2411{
2412	bool workingset = folio_test_workingset(folio);
2413	unsigned long pflags;
2414	int error;
2415
2416	/*
2417	 * A previous I/O error may have been due to temporary failures,
2418	 * eg. multipath errors.  PG_error will be set again if read_folio
2419	 * fails.
2420	 */
2421	folio_clear_error(folio);
2422
2423	/* Start the actual read. The read will unlock the page. */
2424	if (unlikely(workingset))
2425		psi_memstall_enter(&pflags);
2426	error = filler(file, folio);
2427	if (unlikely(workingset))
2428		psi_memstall_leave(&pflags);
2429	if (error)
2430		return error;
2431
2432	error = folio_wait_locked_killable(folio);
2433	if (error)
2434		return error;
2435	if (folio_test_uptodate(folio))
2436		return 0;
2437	if (file)
2438		shrink_readahead_size_eio(&file->f_ra);
2439	return -EIO;
2440}
2441
2442static bool filemap_range_uptodate(struct address_space *mapping,
2443		loff_t pos, struct iov_iter *iter, struct folio *folio)
 
2444{
2445	int count;
2446
2447	if (folio_test_uptodate(folio))
2448		return true;
2449	/* pipes can't handle partially uptodate pages */
2450	if (iov_iter_is_pipe(iter))
2451		return false;
2452	if (!mapping->a_ops->is_partially_uptodate)
2453		return false;
2454	if (mapping->host->i_blkbits >= folio_shift(folio))
2455		return false;
2456
2457	count = iter->count;
2458	if (folio_pos(folio) > pos) {
2459		count -= folio_pos(folio) - pos;
2460		pos = 0;
2461	} else {
2462		pos -= folio_pos(folio);
2463	}
2464
2465	return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2466}
2467
2468static int filemap_update_page(struct kiocb *iocb,
2469		struct address_space *mapping, struct iov_iter *iter,
2470		struct folio *folio)
2471{
2472	int error;
2473
2474	if (iocb->ki_flags & IOCB_NOWAIT) {
2475		if (!filemap_invalidate_trylock_shared(mapping))
2476			return -EAGAIN;
2477	} else {
2478		filemap_invalidate_lock_shared(mapping);
2479	}
2480
2481	if (!folio_trylock(folio)) {
2482		error = -EAGAIN;
2483		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
2484			goto unlock_mapping;
2485		if (!(iocb->ki_flags & IOCB_WAITQ)) {
2486			filemap_invalidate_unlock_shared(mapping);
2487			/*
2488			 * This is where we usually end up waiting for a
2489			 * previously submitted readahead to finish.
2490			 */
2491			folio_put_wait_locked(folio, TASK_KILLABLE);
2492			return AOP_TRUNCATED_PAGE;
2493		}
2494		error = __folio_lock_async(folio, iocb->ki_waitq);
2495		if (error)
2496			goto unlock_mapping;
2497	}
2498
2499	error = AOP_TRUNCATED_PAGE;
2500	if (!folio->mapping)
2501		goto unlock;
2502
2503	error = 0;
2504	if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, folio))
 
2505		goto unlock;
2506
2507	error = -EAGAIN;
2508	if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
2509		goto unlock;
2510
2511	error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
2512			folio);
2513	goto unlock_mapping;
2514unlock:
2515	folio_unlock(folio);
2516unlock_mapping:
2517	filemap_invalidate_unlock_shared(mapping);
2518	if (error == AOP_TRUNCATED_PAGE)
2519		folio_put(folio);
2520	return error;
2521}
2522
2523static int filemap_create_folio(struct file *file,
2524		struct address_space *mapping, pgoff_t index,
2525		struct folio_batch *fbatch)
2526{
2527	struct folio *folio;
2528	int error;
 
 
2529
2530	folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
2531	if (!folio)
2532		return -ENOMEM;
2533
2534	/*
2535	 * Protect against truncate / hole punch. Grabbing invalidate_lock
2536	 * here assures we cannot instantiate and bring uptodate new
2537	 * pagecache folios after evicting page cache during truncate
2538	 * and before actually freeing blocks.	Note that we could
2539	 * release invalidate_lock after inserting the folio into
2540	 * the page cache as the locked folio would then be enough to
2541	 * synchronize with hole punching. But there are code paths
2542	 * such as filemap_update_page() filling in partially uptodate
2543	 * pages or ->readahead() that need to hold invalidate_lock
2544	 * while mapping blocks for IO so let's hold the lock here as
2545	 * well to keep locking rules simple.
2546	 */
2547	filemap_invalidate_lock_shared(mapping);
 
2548	error = filemap_add_folio(mapping, folio, index,
2549			mapping_gfp_constraint(mapping, GFP_KERNEL));
2550	if (error == -EEXIST)
2551		error = AOP_TRUNCATED_PAGE;
2552	if (error)
2553		goto error;
2554
2555	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2556	if (error)
2557		goto error;
2558
2559	filemap_invalidate_unlock_shared(mapping);
2560	folio_batch_add(fbatch, folio);
2561	return 0;
2562error:
2563	filemap_invalidate_unlock_shared(mapping);
2564	folio_put(folio);
2565	return error;
2566}
2567
2568static int filemap_readahead(struct kiocb *iocb, struct file *file,
2569		struct address_space *mapping, struct folio *folio,
2570		pgoff_t last_index)
2571{
2572	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2573
2574	if (iocb->ki_flags & IOCB_NOIO)
2575		return -EAGAIN;
2576	page_cache_async_ra(&ractl, folio, last_index - folio->index);
2577	return 0;
2578}
2579
2580static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
2581		struct folio_batch *fbatch)
2582{
2583	struct file *filp = iocb->ki_filp;
2584	struct address_space *mapping = filp->f_mapping;
2585	struct file_ra_state *ra = &filp->f_ra;
2586	pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2587	pgoff_t last_index;
2588	struct folio *folio;
 
2589	int err = 0;
2590
2591	/* "last_index" is the index of the page beyond the end of the read */
2592	last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE);
2593retry:
2594	if (fatal_signal_pending(current))
2595		return -EINTR;
2596
2597	filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2598	if (!folio_batch_count(fbatch)) {
2599		if (iocb->ki_flags & IOCB_NOIO)
2600			return -EAGAIN;
 
 
2601		page_cache_sync_readahead(mapping, ra, filp, index,
2602				last_index - index);
 
 
2603		filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2604	}
2605	if (!folio_batch_count(fbatch)) {
2606		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
2607			return -EAGAIN;
2608		err = filemap_create_folio(filp, mapping,
2609				iocb->ki_pos >> PAGE_SHIFT, fbatch);
2610		if (err == AOP_TRUNCATED_PAGE)
2611			goto retry;
2612		return err;
2613	}
2614
2615	folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2616	if (folio_test_readahead(folio)) {
2617		err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2618		if (err)
2619			goto err;
2620	}
2621	if (!folio_test_uptodate(folio)) {
2622		if ((iocb->ki_flags & IOCB_WAITQ) &&
2623		    folio_batch_count(fbatch) > 1)
2624			iocb->ki_flags |= IOCB_NOWAIT;
2625		err = filemap_update_page(iocb, mapping, iter, folio);
 
2626		if (err)
2627			goto err;
2628	}
2629
 
2630	return 0;
2631err:
2632	if (err < 0)
2633		folio_put(folio);
2634	if (likely(--fbatch->nr))
2635		return 0;
2636	if (err == AOP_TRUNCATED_PAGE)
2637		goto retry;
2638	return err;
2639}
2640
2641static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
2642{
2643	unsigned int shift = folio_shift(folio);
2644
2645	return (pos1 >> shift == pos2 >> shift);
2646}
2647
2648/**
2649 * filemap_read - Read data from the page cache.
2650 * @iocb: The iocb to read.
2651 * @iter: Destination for the data.
2652 * @already_read: Number of bytes already read by the caller.
2653 *
2654 * Copies data from the page cache.  If the data is not currently present,
2655 * uses the readahead and read_folio address_space operations to fetch it.
2656 *
2657 * Return: Total number of bytes copied, including those already read by
2658 * the caller.  If an error happens before any bytes are copied, returns
2659 * a negative error number.
2660 */
2661ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2662		ssize_t already_read)
2663{
2664	struct file *filp = iocb->ki_filp;
2665	struct file_ra_state *ra = &filp->f_ra;
2666	struct address_space *mapping = filp->f_mapping;
2667	struct inode *inode = mapping->host;
2668	struct folio_batch fbatch;
2669	int i, error = 0;
2670	bool writably_mapped;
2671	loff_t isize, end_offset;
 
2672
 
 
2673	if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
2674		return 0;
2675	if (unlikely(!iov_iter_count(iter)))
2676		return 0;
2677
2678	iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2679	folio_batch_init(&fbatch);
2680
2681	do {
2682		cond_resched();
2683
2684		/*
2685		 * If we've already successfully copied some data, then we
2686		 * can no longer safely return -EIOCBQUEUED. Hence mark
2687		 * an async read NOWAIT at that point.
2688		 */
2689		if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
2690			iocb->ki_flags |= IOCB_NOWAIT;
2691
2692		if (unlikely(iocb->ki_pos >= i_size_read(inode)))
2693			break;
2694
2695		error = filemap_get_pages(iocb, iter, &fbatch);
2696		if (error < 0)
2697			break;
2698
2699		/*
2700		 * i_size must be checked after we know the pages are Uptodate.
2701		 *
2702		 * Checking i_size after the check allows us to calculate
2703		 * the correct value for "nr", which means the zero-filled
2704		 * part of the page is not copied back to userspace (unless
2705		 * another truncate extends the file - this is desired though).
2706		 */
2707		isize = i_size_read(inode);
2708		if (unlikely(iocb->ki_pos >= isize))
2709			goto put_folios;
2710		end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
2711
2712		/*
2713		 * Once we start copying data, we don't want to be touching any
2714		 * cachelines that might be contended:
2715		 */
2716		writably_mapped = mapping_writably_mapped(mapping);
2717
2718		/*
2719		 * When a read accesses the same folio several times, only
2720		 * mark it as accessed the first time.
2721		 */
2722		if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1,
2723							fbatch.folios[0]))
2724			folio_mark_accessed(fbatch.folios[0]);
2725
2726		for (i = 0; i < folio_batch_count(&fbatch); i++) {
2727			struct folio *folio = fbatch.folios[i];
2728			size_t fsize = folio_size(folio);
2729			size_t offset = iocb->ki_pos & (fsize - 1);
2730			size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
2731					     fsize - offset);
2732			size_t copied;
2733
2734			if (end_offset < folio_pos(folio))
2735				break;
2736			if (i > 0)
2737				folio_mark_accessed(folio);
2738			/*
2739			 * If users can be writing to this folio using arbitrary
2740			 * virtual addresses, take care of potential aliasing
2741			 * before reading the folio on the kernel side.
2742			 */
2743			if (writably_mapped)
2744				flush_dcache_folio(folio);
2745
2746			copied = copy_folio_to_iter(folio, offset, bytes, iter);
2747
2748			already_read += copied;
2749			iocb->ki_pos += copied;
2750			ra->prev_pos = iocb->ki_pos;
2751
2752			if (copied < bytes) {
2753				error = -EFAULT;
2754				break;
2755			}
2756		}
2757put_folios:
2758		for (i = 0; i < folio_batch_count(&fbatch); i++)
2759			folio_put(fbatch.folios[i]);
2760		folio_batch_init(&fbatch);
2761	} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
2762
2763	file_accessed(filp);
2764
2765	return already_read ? already_read : error;
2766}
2767EXPORT_SYMBOL_GPL(filemap_read);
2768
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2769/**
2770 * generic_file_read_iter - generic filesystem read routine
2771 * @iocb:	kernel I/O control block
2772 * @iter:	destination for the data read
2773 *
2774 * This is the "read_iter()" routine for all filesystems
2775 * that can use the page cache directly.
2776 *
2777 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2778 * be returned when no data can be read without waiting for I/O requests
2779 * to complete; it doesn't prevent readahead.
2780 *
2781 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2782 * requests shall be made for the read or for readahead.  When no data
2783 * can be read, -EAGAIN shall be returned.  When readahead would be
2784 * triggered, a partial, possibly empty read shall be returned.
2785 *
2786 * Return:
2787 * * number of bytes copied, even for partial reads
2788 * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2789 */
2790ssize_t
2791generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2792{
2793	size_t count = iov_iter_count(iter);
2794	ssize_t retval = 0;
2795
2796	if (!count)
2797		return 0; /* skip atime */
2798
2799	if (iocb->ki_flags & IOCB_DIRECT) {
2800		struct file *file = iocb->ki_filp;
2801		struct address_space *mapping = file->f_mapping;
2802		struct inode *inode = mapping->host;
2803
2804		if (iocb->ki_flags & IOCB_NOWAIT) {
2805			if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
2806						iocb->ki_pos + count - 1))
2807				return -EAGAIN;
2808		} else {
2809			retval = filemap_write_and_wait_range(mapping,
2810						iocb->ki_pos,
2811					        iocb->ki_pos + count - 1);
2812			if (retval < 0)
2813				return retval;
2814		}
2815
2816		file_accessed(file);
2817
2818		retval = mapping->a_ops->direct_IO(iocb, iter);
2819		if (retval >= 0) {
2820			iocb->ki_pos += retval;
2821			count -= retval;
2822		}
2823		if (retval != -EIOCBQUEUED)
2824			iov_iter_revert(iter, count - iov_iter_count(iter));
2825
2826		/*
2827		 * Btrfs can have a short DIO read if we encounter
2828		 * compressed extents, so if there was an error, or if
2829		 * we've already read everything we wanted to, or if
2830		 * there was a short read because we hit EOF, go ahead
2831		 * and return.  Otherwise fallthrough to buffered io for
2832		 * the rest of the read.  Buffered reads will not work for
2833		 * DAX files, so don't bother trying.
2834		 */
2835		if (retval < 0 || !count || IS_DAX(inode))
2836			return retval;
2837		if (iocb->ki_pos >= i_size_read(inode))
2838			return retval;
2839	}
2840
2841	return filemap_read(iocb, iter, retval);
2842}
2843EXPORT_SYMBOL(generic_file_read_iter);
2844
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2845static inline loff_t folio_seek_hole_data(struct xa_state *xas,
2846		struct address_space *mapping, struct folio *folio,
2847		loff_t start, loff_t end, bool seek_data)
2848{
2849	const struct address_space_operations *ops = mapping->a_ops;
2850	size_t offset, bsz = i_blocksize(mapping->host);
2851
2852	if (xa_is_value(folio) || folio_test_uptodate(folio))
2853		return seek_data ? start : end;
2854	if (!ops->is_partially_uptodate)
2855		return seek_data ? end : start;
2856
2857	xas_pause(xas);
2858	rcu_read_unlock();
2859	folio_lock(folio);
2860	if (unlikely(folio->mapping != mapping))
2861		goto unlock;
2862
2863	offset = offset_in_folio(folio, start) & ~(bsz - 1);
2864
2865	do {
2866		if (ops->is_partially_uptodate(folio, offset, bsz) ==
2867							seek_data)
2868			break;
2869		start = (start + bsz) & ~(bsz - 1);
2870		offset += bsz;
2871	} while (offset < folio_size(folio));
2872unlock:
2873	folio_unlock(folio);
2874	rcu_read_lock();
2875	return start;
2876}
2877
2878static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
2879{
2880	if (xa_is_value(folio))
2881		return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
2882	return folio_size(folio);
2883}
2884
2885/**
2886 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
2887 * @mapping: Address space to search.
2888 * @start: First byte to consider.
2889 * @end: Limit of search (exclusive).
2890 * @whence: Either SEEK_HOLE or SEEK_DATA.
2891 *
2892 * If the page cache knows which blocks contain holes and which blocks
2893 * contain data, your filesystem can use this function to implement
2894 * SEEK_HOLE and SEEK_DATA.  This is useful for filesystems which are
2895 * entirely memory-based such as tmpfs, and filesystems which support
2896 * unwritten extents.
2897 *
2898 * Return: The requested offset on success, or -ENXIO if @whence specifies
2899 * SEEK_DATA and there is no data after @start.  There is an implicit hole
2900 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
2901 * and @end contain data.
2902 */
2903loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
2904		loff_t end, int whence)
2905{
2906	XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
2907	pgoff_t max = (end - 1) >> PAGE_SHIFT;
2908	bool seek_data = (whence == SEEK_DATA);
2909	struct folio *folio;
2910
2911	if (end <= start)
2912		return -ENXIO;
2913
2914	rcu_read_lock();
2915	while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
2916		loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
2917		size_t seek_size;
2918
2919		if (start < pos) {
2920			if (!seek_data)
2921				goto unlock;
2922			start = pos;
2923		}
2924
2925		seek_size = seek_folio_size(&xas, folio);
2926		pos = round_up((u64)pos + 1, seek_size);
2927		start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
2928				seek_data);
2929		if (start < pos)
2930			goto unlock;
2931		if (start >= end)
2932			break;
2933		if (seek_size > PAGE_SIZE)
2934			xas_set(&xas, pos >> PAGE_SHIFT);
2935		if (!xa_is_value(folio))
2936			folio_put(folio);
2937	}
2938	if (seek_data)
2939		start = -ENXIO;
2940unlock:
2941	rcu_read_unlock();
2942	if (folio && !xa_is_value(folio))
2943		folio_put(folio);
2944	if (start > end)
2945		return end;
2946	return start;
2947}
2948
2949#ifdef CONFIG_MMU
2950#define MMAP_LOTSAMISS  (100)
2951/*
2952 * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
2953 * @vmf - the vm_fault for this fault.
2954 * @folio - the folio to lock.
2955 * @fpin - the pointer to the file we may pin (or is already pinned).
2956 *
2957 * This works similar to lock_folio_or_retry in that it can drop the
2958 * mmap_lock.  It differs in that it actually returns the folio locked
2959 * if it returns 1 and 0 if it couldn't lock the folio.  If we did have
2960 * to drop the mmap_lock then fpin will point to the pinned file and
2961 * needs to be fput()'ed at a later point.
2962 */
2963static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
2964				     struct file **fpin)
2965{
2966	if (folio_trylock(folio))
2967		return 1;
2968
2969	/*
2970	 * NOTE! This will make us return with VM_FAULT_RETRY, but with
2971	 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
2972	 * is supposed to work. We have way too many special cases..
2973	 */
2974	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
2975		return 0;
2976
2977	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
2978	if (vmf->flags & FAULT_FLAG_KILLABLE) {
2979		if (__folio_lock_killable(folio)) {
2980			/*
2981			 * We didn't have the right flags to drop the mmap_lock,
2982			 * but all fault_handlers only check for fatal signals
2983			 * if we return VM_FAULT_RETRY, so we need to drop the
2984			 * mmap_lock here and return 0 if we don't have a fpin.
 
2985			 */
2986			if (*fpin == NULL)
2987				mmap_read_unlock(vmf->vma->vm_mm);
2988			return 0;
2989		}
2990	} else
2991		__folio_lock(folio);
2992
2993	return 1;
2994}
2995
2996/*
2997 * Synchronous readahead happens when we don't even find a page in the page
2998 * cache at all.  We don't want to perform IO under the mmap sem, so if we have
2999 * to drop the mmap sem we return the file that was pinned in order for us to do
3000 * that.  If we didn't pin a file then we return NULL.  The file that is
3001 * returned needs to be fput()'ed when we're done with it.
3002 */
3003static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
3004{
3005	struct file *file = vmf->vma->vm_file;
3006	struct file_ra_state *ra = &file->f_ra;
3007	struct address_space *mapping = file->f_mapping;
3008	DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
3009	struct file *fpin = NULL;
3010	unsigned long vm_flags = vmf->vma->vm_flags;
3011	unsigned int mmap_miss;
3012
3013#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3014	/* Use the readahead code, even if readahead is disabled */
3015	if (vm_flags & VM_HUGEPAGE) {
3016		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3017		ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
3018		ra->size = HPAGE_PMD_NR;
3019		/*
3020		 * Fetch two PMD folios, so we get the chance to actually
3021		 * readahead, unless we've been told not to.
3022		 */
3023		if (!(vm_flags & VM_RAND_READ))
3024			ra->size *= 2;
3025		ra->async_size = HPAGE_PMD_NR;
3026		page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
3027		return fpin;
3028	}
3029#endif
3030
3031	/* If we don't want any read-ahead, don't bother */
3032	if (vm_flags & VM_RAND_READ)
3033		return fpin;
3034	if (!ra->ra_pages)
3035		return fpin;
3036
3037	if (vm_flags & VM_SEQ_READ) {
3038		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3039		page_cache_sync_ra(&ractl, ra->ra_pages);
3040		return fpin;
3041	}
3042
3043	/* Avoid banging the cache line if not needed */
3044	mmap_miss = READ_ONCE(ra->mmap_miss);
3045	if (mmap_miss < MMAP_LOTSAMISS * 10)
3046		WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
3047
3048	/*
3049	 * Do we miss much more than hit in this file? If so,
3050	 * stop bothering with read-ahead. It will only hurt.
3051	 */
3052	if (mmap_miss > MMAP_LOTSAMISS)
3053		return fpin;
3054
3055	/*
3056	 * mmap read-around
3057	 */
3058	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3059	ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
3060	ra->size = ra->ra_pages;
3061	ra->async_size = ra->ra_pages / 4;
3062	ractl._index = ra->start;
3063	page_cache_ra_order(&ractl, ra, 0);
3064	return fpin;
3065}
3066
3067/*
3068 * Asynchronous readahead happens when we find the page and PG_readahead,
3069 * so we want to possibly extend the readahead further.  We return the file that
3070 * was pinned if we have to drop the mmap_lock in order to do IO.
3071 */
3072static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3073					    struct folio *folio)
3074{
3075	struct file *file = vmf->vma->vm_file;
3076	struct file_ra_state *ra = &file->f_ra;
3077	DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3078	struct file *fpin = NULL;
3079	unsigned int mmap_miss;
3080
3081	/* If we don't want any read-ahead, don't bother */
3082	if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
3083		return fpin;
3084
3085	mmap_miss = READ_ONCE(ra->mmap_miss);
3086	if (mmap_miss)
3087		WRITE_ONCE(ra->mmap_miss, --mmap_miss);
3088
3089	if (folio_test_readahead(folio)) {
3090		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3091		page_cache_async_ra(&ractl, folio, ra->ra_pages);
3092	}
3093	return fpin;
3094}
3095
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3096/**
3097 * filemap_fault - read in file data for page fault handling
3098 * @vmf:	struct vm_fault containing details of the fault
3099 *
3100 * filemap_fault() is invoked via the vma operations vector for a
3101 * mapped memory region to read in file data during a page fault.
3102 *
3103 * The goto's are kind of ugly, but this streamlines the normal case of having
3104 * it in the page cache, and handles the special cases reasonably without
3105 * having a lot of duplicated code.
3106 *
3107 * vma->vm_mm->mmap_lock must be held on entry.
3108 *
3109 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
3110 * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
3111 *
3112 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
3113 * has not been released.
3114 *
3115 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
3116 *
3117 * Return: bitwise-OR of %VM_FAULT_ codes.
3118 */
3119vm_fault_t filemap_fault(struct vm_fault *vmf)
3120{
3121	int error;
3122	struct file *file = vmf->vma->vm_file;
3123	struct file *fpin = NULL;
3124	struct address_space *mapping = file->f_mapping;
3125	struct inode *inode = mapping->host;
3126	pgoff_t max_idx, index = vmf->pgoff;
3127	struct folio *folio;
3128	vm_fault_t ret = 0;
3129	bool mapping_locked = false;
3130
3131	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3132	if (unlikely(index >= max_idx))
3133		return VM_FAULT_SIGBUS;
3134
 
 
3135	/*
3136	 * Do we have something in the page cache already?
3137	 */
3138	folio = filemap_get_folio(mapping, index);
3139	if (likely(folio)) {
3140		/*
3141		 * We found the page, so try async readahead before waiting for
3142		 * the lock.
3143		 */
3144		if (!(vmf->flags & FAULT_FLAG_TRIED))
3145			fpin = do_async_mmap_readahead(vmf, folio);
3146		if (unlikely(!folio_test_uptodate(folio))) {
3147			filemap_invalidate_lock_shared(mapping);
3148			mapping_locked = true;
3149		}
3150	} else {
 
 
 
 
3151		/* No page in the page cache at all */
3152		count_vm_event(PGMAJFAULT);
3153		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
3154		ret = VM_FAULT_MAJOR;
3155		fpin = do_sync_mmap_readahead(vmf);
3156retry_find:
3157		/*
3158		 * See comment in filemap_create_folio() why we need
3159		 * invalidate_lock
3160		 */
3161		if (!mapping_locked) {
3162			filemap_invalidate_lock_shared(mapping);
3163			mapping_locked = true;
3164		}
3165		folio = __filemap_get_folio(mapping, index,
3166					  FGP_CREAT|FGP_FOR_MMAP,
3167					  vmf->gfp_mask);
3168		if (!folio) {
3169			if (fpin)
3170				goto out_retry;
3171			filemap_invalidate_unlock_shared(mapping);
3172			return VM_FAULT_OOM;
3173		}
3174	}
3175
3176	if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3177		goto out_retry;
3178
3179	/* Did it get truncated? */
3180	if (unlikely(folio->mapping != mapping)) {
3181		folio_unlock(folio);
3182		folio_put(folio);
3183		goto retry_find;
3184	}
3185	VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3186
3187	/*
3188	 * We have a locked page in the page cache, now we need to check
3189	 * that it's up-to-date. If not, it is going to be due to an error.
 
3190	 */
3191	if (unlikely(!folio_test_uptodate(folio))) {
3192		/*
3193		 * The page was in cache and uptodate and now it is not.
3194		 * Strange but possible since we didn't hold the page lock all
3195		 * the time. Let's drop everything get the invalidate lock and
3196		 * try again.
3197		 */
3198		if (!mapping_locked) {
3199			folio_unlock(folio);
3200			folio_put(folio);
3201			goto retry_find;
3202		}
 
 
 
 
 
 
3203		goto page_not_uptodate;
3204	}
3205
3206	/*
3207	 * We've made it this far and we had to drop our mmap_lock, now is the
3208	 * time to return to the upper layer and have it re-find the vma and
3209	 * redo the fault.
3210	 */
3211	if (fpin) {
3212		folio_unlock(folio);
3213		goto out_retry;
3214	}
3215	if (mapping_locked)
3216		filemap_invalidate_unlock_shared(mapping);
3217
3218	/*
3219	 * Found the page and have a reference on it.
3220	 * We must recheck i_size under page lock.
3221	 */
3222	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3223	if (unlikely(index >= max_idx)) {
3224		folio_unlock(folio);
3225		folio_put(folio);
3226		return VM_FAULT_SIGBUS;
3227	}
3228
3229	vmf->page = folio_file_page(folio, index);
3230	return ret | VM_FAULT_LOCKED;
3231
3232page_not_uptodate:
3233	/*
3234	 * Umm, take care of errors if the page isn't up-to-date.
3235	 * Try to re-read it _once_. We do this synchronously,
3236	 * because there really aren't any performance issues here
3237	 * and we need to check for errors.
3238	 */
3239	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3240	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3241	if (fpin)
3242		goto out_retry;
3243	folio_put(folio);
3244
3245	if (!error || error == AOP_TRUNCATED_PAGE)
3246		goto retry_find;
3247	filemap_invalidate_unlock_shared(mapping);
3248
3249	return VM_FAULT_SIGBUS;
3250
3251out_retry:
3252	/*
3253	 * We dropped the mmap_lock, we need to return to the fault handler to
3254	 * re-find the vma and come back and find our hopefully still populated
3255	 * page.
3256	 */
3257	if (folio)
3258		folio_put(folio);
3259	if (mapping_locked)
3260		filemap_invalidate_unlock_shared(mapping);
3261	if (fpin)
3262		fput(fpin);
3263	return ret | VM_FAULT_RETRY;
3264}
3265EXPORT_SYMBOL(filemap_fault);
3266
3267static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
 
3268{
3269	struct mm_struct *mm = vmf->vma->vm_mm;
3270
3271	/* Huge page is mapped? No need to proceed. */
3272	if (pmd_trans_huge(*vmf->pmd)) {
3273		unlock_page(page);
3274		put_page(page);
3275		return true;
3276	}
3277
3278	if (pmd_none(*vmf->pmd) && PageTransHuge(page)) {
 
3279		vm_fault_t ret = do_set_pmd(vmf, page);
3280		if (!ret) {
3281			/* The page is mapped successfully, reference consumed. */
3282			unlock_page(page);
3283			return true;
3284		}
3285	}
3286
3287	if (pmd_none(*vmf->pmd))
3288		pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
3289
3290	/* See comment in handle_pte_fault() */
3291	if (pmd_devmap_trans_unstable(vmf->pmd)) {
3292		unlock_page(page);
3293		put_page(page);
3294		return true;
3295	}
3296
3297	return false;
3298}
3299
3300static struct folio *next_uptodate_page(struct folio *folio,
3301				       struct address_space *mapping,
3302				       struct xa_state *xas, pgoff_t end_pgoff)
3303{
 
3304	unsigned long max_idx;
3305
3306	do {
3307		if (!folio)
3308			return NULL;
3309		if (xas_retry(xas, folio))
3310			continue;
3311		if (xa_is_value(folio))
3312			continue;
3313		if (folio_test_locked(folio))
3314			continue;
3315		if (!folio_try_get_rcu(folio))
3316			continue;
 
 
3317		/* Has the page moved or been split? */
3318		if (unlikely(folio != xas_reload(xas)))
3319			goto skip;
3320		if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3321			goto skip;
3322		if (!folio_trylock(folio))
3323			goto skip;
3324		if (folio->mapping != mapping)
3325			goto unlock;
3326		if (!folio_test_uptodate(folio))
3327			goto unlock;
3328		max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3329		if (xas->xa_index >= max_idx)
3330			goto unlock;
3331		return folio;
3332unlock:
3333		folio_unlock(folio);
3334skip:
3335		folio_put(folio);
3336	} while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3337
3338	return NULL;
3339}
3340
3341static inline struct folio *first_map_page(struct address_space *mapping,
3342					  struct xa_state *xas,
3343					  pgoff_t end_pgoff)
 
 
 
 
 
3344{
3345	return next_uptodate_page(xas_find(xas, end_pgoff),
3346				  mapping, xas, end_pgoff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3347}
3348
3349static inline struct folio *next_map_page(struct address_space *mapping,
3350					 struct xa_state *xas,
3351					 pgoff_t end_pgoff)
3352{
3353	return next_uptodate_page(xas_next_entry(xas, end_pgoff),
3354				  mapping, xas, end_pgoff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3355}
3356
3357vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3358			     pgoff_t start_pgoff, pgoff_t end_pgoff)
3359{
3360	struct vm_area_struct *vma = vmf->vma;
3361	struct file *file = vma->vm_file;
3362	struct address_space *mapping = file->f_mapping;
3363	pgoff_t last_pgoff = start_pgoff;
3364	unsigned long addr;
3365	XA_STATE(xas, &mapping->i_pages, start_pgoff);
3366	struct folio *folio;
3367	struct page *page;
3368	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
3369	vm_fault_t ret = 0;
 
 
3370
3371	rcu_read_lock();
3372	folio = first_map_page(mapping, &xas, end_pgoff);
3373	if (!folio)
3374		goto out;
3375
3376	if (filemap_map_pmd(vmf, &folio->page)) {
3377		ret = VM_FAULT_NOPAGE;
3378		goto out;
3379	}
3380
3381	addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3382	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3383	do {
3384again:
3385		page = folio_file_page(folio, xas.xa_index);
3386		if (PageHWPoison(page))
3387			goto unlock;
 
 
 
 
3388
3389		if (mmap_miss > 0)
3390			mmap_miss--;
 
3391
3392		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3393		vmf->pte += xas.xa_index - last_pgoff;
3394		last_pgoff = xas.xa_index;
 
 
3395
3396		/*
3397		 * NOTE: If there're PTE markers, we'll leave them to be
3398		 * handled in the specific fault path, and it'll prohibit the
3399		 * fault-around logic.
3400		 */
3401		if (!pte_none(*vmf->pte))
3402			goto unlock;
3403
3404		/* We're about to handle the fault */
3405		if (vmf->address == addr)
3406			ret = VM_FAULT_NOPAGE;
3407
3408		do_set_pte(vmf, page, addr);
3409		/* no need to invalidate: a not-present page won't be cached */
3410		update_mmu_cache(vma, addr, vmf->pte);
3411		if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3412			xas.xa_index++;
3413			folio_ref_inc(folio);
3414			goto again;
3415		}
3416		folio_unlock(folio);
3417		continue;
3418unlock:
3419		if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3420			xas.xa_index++;
3421			goto again;
3422		}
3423		folio_unlock(folio);
3424		folio_put(folio);
3425	} while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
 
3426	pte_unmap_unlock(vmf->pte, vmf->ptl);
 
3427out:
3428	rcu_read_unlock();
3429	WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
 
 
 
 
 
 
3430	return ret;
3431}
3432EXPORT_SYMBOL(filemap_map_pages);
3433
3434vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3435{
3436	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3437	struct folio *folio = page_folio(vmf->page);
3438	vm_fault_t ret = VM_FAULT_LOCKED;
3439
3440	sb_start_pagefault(mapping->host->i_sb);
3441	file_update_time(vmf->vma->vm_file);
3442	folio_lock(folio);
3443	if (folio->mapping != mapping) {
3444		folio_unlock(folio);
3445		ret = VM_FAULT_NOPAGE;
3446		goto out;
3447	}
3448	/*
3449	 * We mark the folio dirty already here so that when freeze is in
3450	 * progress, we are guaranteed that writeback during freezing will
3451	 * see the dirty folio and writeprotect it again.
3452	 */
3453	folio_mark_dirty(folio);
3454	folio_wait_stable(folio);
3455out:
3456	sb_end_pagefault(mapping->host->i_sb);
3457	return ret;
3458}
3459
3460const struct vm_operations_struct generic_file_vm_ops = {
3461	.fault		= filemap_fault,
3462	.map_pages	= filemap_map_pages,
3463	.page_mkwrite	= filemap_page_mkwrite,
3464};
3465
3466/* This is used for a general mmap of a disk file */
3467
3468int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3469{
3470	struct address_space *mapping = file->f_mapping;
3471
3472	if (!mapping->a_ops->read_folio)
3473		return -ENOEXEC;
3474	file_accessed(file);
3475	vma->vm_ops = &generic_file_vm_ops;
3476	return 0;
3477}
3478
3479/*
3480 * This is for filesystems which do not implement ->writepage.
3481 */
3482int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3483{
3484	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
3485		return -EINVAL;
3486	return generic_file_mmap(file, vma);
3487}
3488#else
3489vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3490{
3491	return VM_FAULT_SIGBUS;
3492}
3493int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3494{
3495	return -ENOSYS;
3496}
3497int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3498{
3499	return -ENOSYS;
3500}
3501#endif /* CONFIG_MMU */
3502
3503EXPORT_SYMBOL(filemap_page_mkwrite);
3504EXPORT_SYMBOL(generic_file_mmap);
3505EXPORT_SYMBOL(generic_file_readonly_mmap);
3506
3507static struct folio *do_read_cache_folio(struct address_space *mapping,
3508		pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
3509{
3510	struct folio *folio;
3511	int err;
3512
3513	if (!filler)
3514		filler = mapping->a_ops->read_folio;
3515repeat:
3516	folio = filemap_get_folio(mapping, index);
3517	if (!folio) {
3518		folio = filemap_alloc_folio(gfp, 0);
 
3519		if (!folio)
3520			return ERR_PTR(-ENOMEM);
 
3521		err = filemap_add_folio(mapping, folio, index, gfp);
3522		if (unlikely(err)) {
3523			folio_put(folio);
3524			if (err == -EEXIST)
3525				goto repeat;
3526			/* Presumably ENOMEM for xarray node */
3527			return ERR_PTR(err);
3528		}
3529
3530		goto filler;
3531	}
3532	if (folio_test_uptodate(folio))
3533		goto out;
3534
3535	if (!folio_trylock(folio)) {
3536		folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3537		goto repeat;
3538	}
3539
3540	/* Folio was truncated from mapping */
3541	if (!folio->mapping) {
3542		folio_unlock(folio);
3543		folio_put(folio);
3544		goto repeat;
3545	}
3546
3547	/* Someone else locked and filled the page in a very small window */
3548	if (folio_test_uptodate(folio)) {
3549		folio_unlock(folio);
3550		goto out;
3551	}
3552
3553filler:
3554	err = filemap_read_folio(file, filler, folio);
3555	if (err) {
3556		folio_put(folio);
3557		if (err == AOP_TRUNCATED_PAGE)
3558			goto repeat;
3559		return ERR_PTR(err);
3560	}
3561
3562out:
3563	folio_mark_accessed(folio);
3564	return folio;
3565}
3566
3567/**
3568 * read_cache_folio - Read into page cache, fill it if needed.
3569 * @mapping: The address_space to read from.
3570 * @index: The index to read.
3571 * @filler: Function to perform the read, or NULL to use aops->read_folio().
3572 * @file: Passed to filler function, may be NULL if not required.
3573 *
3574 * Read one page into the page cache.  If it succeeds, the folio returned
3575 * will contain @index, but it may not be the first page of the folio.
3576 *
3577 * If the filler function returns an error, it will be returned to the
3578 * caller.
3579 *
3580 * Context: May sleep.  Expects mapping->invalidate_lock to be held.
3581 * Return: An uptodate folio on success, ERR_PTR() on failure.
3582 */
3583struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3584		filler_t filler, struct file *file)
3585{
3586	return do_read_cache_folio(mapping, index, filler, file,
3587			mapping_gfp_mask(mapping));
3588}
3589EXPORT_SYMBOL(read_cache_folio);
3590
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3591static struct page *do_read_cache_page(struct address_space *mapping,
3592		pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
3593{
3594	struct folio *folio;
3595
3596	folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3597	if (IS_ERR(folio))
3598		return &folio->page;
3599	return folio_file_page(folio, index);
3600}
3601
3602struct page *read_cache_page(struct address_space *mapping,
3603			pgoff_t index, filler_t *filler, struct file *file)
3604{
3605	return do_read_cache_page(mapping, index, filler, file,
3606			mapping_gfp_mask(mapping));
3607}
3608EXPORT_SYMBOL(read_cache_page);
3609
3610/**
3611 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3612 * @mapping:	the page's address_space
3613 * @index:	the page index
3614 * @gfp:	the page allocator flags to use if allocating
3615 *
3616 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3617 * any new page allocations done using the specified allocation flags.
3618 *
3619 * If the page does not get brought uptodate, return -EIO.
3620 *
3621 * The function expects mapping->invalidate_lock to be already held.
3622 *
3623 * Return: up to date page on success, ERR_PTR() on failure.
3624 */
3625struct page *read_cache_page_gfp(struct address_space *mapping,
3626				pgoff_t index,
3627				gfp_t gfp)
3628{
3629	return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3630}
3631EXPORT_SYMBOL(read_cache_page_gfp);
3632
3633/*
3634 * Warn about a page cache invalidation failure during a direct I/O write.
3635 */
3636void dio_warn_stale_pagecache(struct file *filp)
3637{
3638	static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3639	char pathname[128];
3640	char *path;
3641
3642	errseq_set(&filp->f_mapping->wb_err, -EIO);
3643	if (__ratelimit(&_rs)) {
3644		path = file_path(filp, pathname, sizeof(pathname));
3645		if (IS_ERR(path))
3646			path = "(unknown)";
3647		pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
3648		pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3649			current->comm);
3650	}
3651}
3652
3653ssize_t
3654generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3655{
3656	struct file	*file = iocb->ki_filp;
3657	struct address_space *mapping = file->f_mapping;
3658	struct inode	*inode = mapping->host;
3659	loff_t		pos = iocb->ki_pos;
3660	ssize_t		written;
3661	size_t		write_len;
3662	pgoff_t		end;
3663
3664	write_len = iov_iter_count(from);
3665	end = (pos + write_len - 1) >> PAGE_SHIFT;
 
 
 
 
3666
3667	if (iocb->ki_flags & IOCB_NOWAIT) {
3668		/* If there are pages to writeback, return */
3669		if (filemap_range_has_page(file->f_mapping, pos,
3670					   pos + write_len - 1))
3671			return -EAGAIN;
3672	} else {
3673		written = filemap_write_and_wait_range(mapping, pos,
3674							pos + write_len - 1);
3675		if (written)
3676			goto out;
3677	}
3678
3679	/*
3680	 * After a write we want buffered reads to be sure to go to disk to get
3681	 * the new data.  We invalidate clean cached page from the region we're
3682	 * about to write.  We do this *before* the write so that we can return
3683	 * without clobbering -EIOCBQUEUED from ->direct_IO().
3684	 */
3685	written = invalidate_inode_pages2_range(mapping,
3686					pos >> PAGE_SHIFT, end);
3687	/*
3688	 * If a page can not be invalidated, return 0 to fall back
3689	 * to buffered write.
3690	 */
 
3691	if (written) {
3692		if (written == -EBUSY)
3693			return 0;
3694		goto out;
3695	}
3696
3697	written = mapping->a_ops->direct_IO(iocb, from);
3698
3699	/*
3700	 * Finally, try again to invalidate clean pages which might have been
3701	 * cached by non-direct readahead, or faulted in by get_user_pages()
3702	 * if the source of the write was an mmap'ed region of the file
3703	 * we're writing.  Either one is a pretty crazy thing to do,
3704	 * so we don't support it 100%.  If this invalidation
3705	 * fails, tough, the write still worked...
3706	 *
3707	 * Most of the time we do not need this since dio_complete() will do
3708	 * the invalidation for us. However there are some file systems that
3709	 * do not end up with dio_complete() being called, so let's not break
3710	 * them by removing it completely.
3711	 *
3712	 * Noticeable example is a blkdev_direct_IO().
3713	 *
3714	 * Skip invalidation for async writes or if mapping has no pages.
3715	 */
3716	if (written > 0 && mapping->nrpages &&
3717	    invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end))
3718		dio_warn_stale_pagecache(file);
3719
3720	if (written > 0) {
 
 
 
 
3721		pos += written;
3722		write_len -= written;
3723		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3724			i_size_write(inode, pos);
3725			mark_inode_dirty(inode);
3726		}
3727		iocb->ki_pos = pos;
3728	}
3729	if (written != -EIOCBQUEUED)
3730		iov_iter_revert(from, write_len - iov_iter_count(from));
3731out:
3732	return written;
3733}
3734EXPORT_SYMBOL(generic_file_direct_write);
3735
3736ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
3737{
3738	struct file *file = iocb->ki_filp;
3739	loff_t pos = iocb->ki_pos;
3740	struct address_space *mapping = file->f_mapping;
3741	const struct address_space_operations *a_ops = mapping->a_ops;
 
3742	long status = 0;
3743	ssize_t written = 0;
3744
3745	do {
3746		struct page *page;
3747		unsigned long offset;	/* Offset into pagecache page */
3748		unsigned long bytes;	/* Bytes to write to page */
3749		size_t copied;		/* Bytes copied from user */
3750		void *fsdata = NULL;
3751
3752		offset = (pos & (PAGE_SIZE - 1));
3753		bytes = min_t(unsigned long, PAGE_SIZE - offset,
3754						iov_iter_count(i));
 
 
3755
3756again:
3757		/*
3758		 * Bring in the user page that we will copy from _first_.
3759		 * Otherwise there's a nasty deadlock on copying from the
3760		 * same page as we're writing to, without it being marked
3761		 * up-to-date.
3762		 */
3763		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
3764			status = -EFAULT;
3765			break;
3766		}
3767
3768		if (fatal_signal_pending(current)) {
3769			status = -EINTR;
3770			break;
3771		}
3772
3773		status = a_ops->write_begin(file, mapping, pos, bytes,
3774						&page, &fsdata);
3775		if (unlikely(status < 0))
3776			break;
3777
 
 
 
 
3778		if (mapping_writably_mapped(mapping))
3779			flush_dcache_page(page);
3780
3781		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
3782		flush_dcache_page(page);
3783
3784		status = a_ops->write_end(file, mapping, pos, bytes, copied,
3785						page, fsdata);
3786		if (unlikely(status != copied)) {
3787			iov_iter_revert(i, copied - max(status, 0L));
3788			if (unlikely(status < 0))
3789				break;
3790		}
3791		cond_resched();
3792
3793		if (unlikely(status == 0)) {
3794			/*
3795			 * A short copy made ->write_end() reject the
3796			 * thing entirely.  Might be memory poisoning
3797			 * halfway through, might be a race with munmap,
3798			 * might be severe memory pressure.
3799			 */
3800			if (copied)
 
 
3801				bytes = copied;
3802			goto again;
 
 
 
 
3803		}
3804		pos += status;
3805		written += status;
3806
3807		balance_dirty_pages_ratelimited(mapping);
3808	} while (iov_iter_count(i));
3809
3810	return written ? written : status;
 
 
 
3811}
3812EXPORT_SYMBOL(generic_perform_write);
3813
3814/**
3815 * __generic_file_write_iter - write data to a file
3816 * @iocb:	IO state structure (file, offset, etc.)
3817 * @from:	iov_iter with data to write
3818 *
3819 * This function does all the work needed for actually writing data to a
3820 * file. It does all basic checks, removes SUID from the file, updates
3821 * modification times and calls proper subroutines depending on whether we
3822 * do direct IO or a standard buffered write.
3823 *
3824 * It expects i_rwsem to be grabbed unless we work on a block device or similar
3825 * object which does not need locking at all.
3826 *
3827 * This function does *not* take care of syncing data in case of O_SYNC write.
3828 * A caller has to handle it. This is mainly due to the fact that we want to
3829 * avoid syncing under i_rwsem.
3830 *
3831 * Return:
3832 * * number of bytes written, even for truncated writes
3833 * * negative error code if no data has been written at all
3834 */
3835ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3836{
3837	struct file *file = iocb->ki_filp;
3838	struct address_space *mapping = file->f_mapping;
3839	struct inode 	*inode = mapping->host;
3840	ssize_t		written = 0;
3841	ssize_t		err;
3842	ssize_t		status;
3843
3844	/* We can write back this queue in page reclaim */
3845	current->backing_dev_info = inode_to_bdi(inode);
3846	err = file_remove_privs(file);
3847	if (err)
3848		goto out;
3849
3850	err = file_update_time(file);
3851	if (err)
3852		goto out;
 
 
 
 
3853
3854	if (iocb->ki_flags & IOCB_DIRECT) {
3855		loff_t pos, endbyte;
3856
3857		written = generic_file_direct_write(iocb, from);
3858		/*
3859		 * If the write stopped short of completing, fall back to
3860		 * buffered writes.  Some filesystems do this for writes to
3861		 * holes, for example.  For DAX files, a buffered write will
3862		 * not succeed (even if it did, DAX does not handle dirty
3863		 * page-cache pages correctly).
3864		 */
3865		if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
3866			goto out;
3867
3868		pos = iocb->ki_pos;
3869		status = generic_perform_write(iocb, from);
3870		/*
3871		 * If generic_perform_write() returned a synchronous error
3872		 * then we want to return the number of bytes which were
3873		 * direct-written, or the error code if that was zero.  Note
3874		 * that this differs from normal direct-io semantics, which
3875		 * will return -EFOO even if some bytes were written.
3876		 */
3877		if (unlikely(status < 0)) {
3878			err = status;
3879			goto out;
3880		}
3881		/*
3882		 * We need to ensure that the page cache pages are written to
3883		 * disk and invalidated to preserve the expected O_DIRECT
3884		 * semantics.
3885		 */
3886		endbyte = pos + status - 1;
3887		err = filemap_write_and_wait_range(mapping, pos, endbyte);
3888		if (err == 0) {
3889			iocb->ki_pos = endbyte + 1;
3890			written += status;
3891			invalidate_mapping_pages(mapping,
3892						 pos >> PAGE_SHIFT,
3893						 endbyte >> PAGE_SHIFT);
3894		} else {
3895			/*
3896			 * We don't know how much we wrote, so just return
3897			 * the number of bytes which were direct-written
3898			 */
3899		}
3900	} else {
3901		written = generic_perform_write(iocb, from);
3902		if (likely(written > 0))
3903			iocb->ki_pos += written;
3904	}
3905out:
3906	current->backing_dev_info = NULL;
3907	return written ? written : err;
3908}
3909EXPORT_SYMBOL(__generic_file_write_iter);
3910
3911/**
3912 * generic_file_write_iter - write data to a file
3913 * @iocb:	IO state structure
3914 * @from:	iov_iter with data to write
3915 *
3916 * This is a wrapper around __generic_file_write_iter() to be used by most
3917 * filesystems. It takes care of syncing the file in case of O_SYNC file
3918 * and acquires i_rwsem as needed.
3919 * Return:
3920 * * negative error code if no data has been written at all of
3921 *   vfs_fsync_range() failed for a synchronous write
3922 * * number of bytes written, even for truncated writes
3923 */
3924ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3925{
3926	struct file *file = iocb->ki_filp;
3927	struct inode *inode = file->f_mapping->host;
3928	ssize_t ret;
3929
3930	inode_lock(inode);
3931	ret = generic_write_checks(iocb, from);
3932	if (ret > 0)
3933		ret = __generic_file_write_iter(iocb, from);
3934	inode_unlock(inode);
3935
3936	if (ret > 0)
3937		ret = generic_write_sync(iocb, ret);
3938	return ret;
3939}
3940EXPORT_SYMBOL(generic_file_write_iter);
3941
3942/**
3943 * filemap_release_folio() - Release fs-specific metadata on a folio.
3944 * @folio: The folio which the kernel is trying to free.
3945 * @gfp: Memory allocation flags (and I/O mode).
3946 *
3947 * The address_space is trying to release any data attached to a folio
3948 * (presumably at folio->private).
3949 *
3950 * This will also be called if the private_2 flag is set on a page,
3951 * indicating that the folio has other metadata associated with it.
3952 *
3953 * The @gfp argument specifies whether I/O may be performed to release
3954 * this page (__GFP_IO), and whether the call may block
3955 * (__GFP_RECLAIM & __GFP_FS).
3956 *
3957 * Return: %true if the release was successful, otherwise %false.
3958 */
3959bool filemap_release_folio(struct folio *folio, gfp_t gfp)
3960{
3961	struct address_space * const mapping = folio->mapping;
3962
3963	BUG_ON(!folio_test_locked(folio));
 
 
3964	if (folio_test_writeback(folio))
3965		return false;
3966
3967	if (mapping && mapping->a_ops->release_folio)
3968		return mapping->a_ops->release_folio(folio, gfp);
3969	return try_to_free_buffers(folio);
3970}
3971EXPORT_SYMBOL(filemap_release_folio);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	linux/mm/filemap.c
   4 *
   5 * Copyright (C) 1994-1999  Linus Torvalds
   6 */
   7
   8/*
   9 * This file handles the generic file mmap semantics used by
  10 * most "normal" filesystems (but you don't /have/ to use this:
  11 * the NFS filesystem used to do this differently, for example)
  12 */
  13#include <linux/export.h>
  14#include <linux/compiler.h>
  15#include <linux/dax.h>
  16#include <linux/fs.h>
  17#include <linux/sched/signal.h>
  18#include <linux/uaccess.h>
  19#include <linux/capability.h>
  20#include <linux/kernel_stat.h>
  21#include <linux/gfp.h>
  22#include <linux/mm.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
  25#include <linux/syscalls.h>
  26#include <linux/mman.h>
  27#include <linux/pagemap.h>
  28#include <linux/file.h>
  29#include <linux/uio.h>
  30#include <linux/error-injection.h>
  31#include <linux/hash.h>
  32#include <linux/writeback.h>
  33#include <linux/backing-dev.h>
  34#include <linux/pagevec.h>
  35#include <linux/security.h>
  36#include <linux/cpuset.h>
  37#include <linux/hugetlb.h>
  38#include <linux/memcontrol.h>
  39#include <linux/shmem_fs.h>
  40#include <linux/rmap.h>
  41#include <linux/delayacct.h>
  42#include <linux/psi.h>
  43#include <linux/ramfs.h>
  44#include <linux/page_idle.h>
  45#include <linux/migrate.h>
  46#include <linux/pipe_fs_i.h>
  47#include <linux/splice.h>
  48#include <linux/rcupdate_wait.h>
  49#include <linux/sched/mm.h>
  50#include <asm/pgalloc.h>
  51#include <asm/tlbflush.h>
  52#include "internal.h"
  53
  54#define CREATE_TRACE_POINTS
  55#include <trace/events/filemap.h>
  56
  57/*
  58 * FIXME: remove all knowledge of the buffer layer from the core VM
  59 */
  60#include <linux/buffer_head.h> /* for try_to_free_buffers */
  61
  62#include <asm/mman.h>
  63
  64#include "swap.h"
  65
  66/*
  67 * Shared mappings implemented 30.11.1994. It's not fully working yet,
  68 * though.
  69 *
  70 * Shared mappings now work. 15.8.1995  Bruno.
  71 *
  72 * finished 'unifying' the page and buffer cache and SMP-threaded the
  73 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
  74 *
  75 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
  76 */
  77
  78/*
  79 * Lock ordering:
  80 *
  81 *  ->i_mmap_rwsem		(truncate_pagecache)
  82 *    ->private_lock		(__free_pte->block_dirty_folio)
  83 *      ->swap_lock		(exclusive_swap_page, others)
  84 *        ->i_pages lock
  85 *
  86 *  ->i_rwsem
  87 *    ->invalidate_lock		(acquired by fs in truncate path)
  88 *      ->i_mmap_rwsem		(truncate->unmap_mapping_range)
  89 *
  90 *  ->mmap_lock
  91 *    ->i_mmap_rwsem
  92 *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
  93 *        ->i_pages lock	(arch-dependent flush_dcache_mmap_lock)
  94 *
  95 *  ->mmap_lock
  96 *    ->invalidate_lock		(filemap_fault)
  97 *      ->lock_page		(filemap_fault, access_process_vm)
  98 *
  99 *  ->i_rwsem			(generic_perform_write)
 100 *    ->mmap_lock		(fault_in_readable->do_page_fault)
 101 *
 102 *  bdi->wb.list_lock
 103 *    sb_lock			(fs/fs-writeback.c)
 104 *    ->i_pages lock		(__sync_single_inode)
 105 *
 106 *  ->i_mmap_rwsem
 107 *    ->anon_vma.lock		(vma_merge)
 108 *
 109 *  ->anon_vma.lock
 110 *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
 111 *
 112 *  ->page_table_lock or pte_lock
 113 *    ->swap_lock		(try_to_unmap_one)
 114 *    ->private_lock		(try_to_unmap_one)
 115 *    ->i_pages lock		(try_to_unmap_one)
 116 *    ->lruvec->lru_lock	(follow_page_mask->mark_page_accessed)
 117 *    ->lruvec->lru_lock	(check_pte_range->folio_isolate_lru)
 118 *    ->private_lock		(folio_remove_rmap_pte->set_page_dirty)
 119 *    ->i_pages lock		(folio_remove_rmap_pte->set_page_dirty)
 120 *    bdi.wb->list_lock		(folio_remove_rmap_pte->set_page_dirty)
 121 *    ->inode->i_lock		(folio_remove_rmap_pte->set_page_dirty)
 
 122 *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
 123 *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
 124 *    ->private_lock		(zap_pte_range->block_dirty_folio)
 
 
 
 125 */
 126
 127static void page_cache_delete(struct address_space *mapping,
 128				   struct folio *folio, void *shadow)
 129{
 130	XA_STATE(xas, &mapping->i_pages, folio->index);
 131	long nr = 1;
 132
 133	mapping_set_update(&xas, mapping);
 134
 135	xas_set_order(&xas, folio->index, folio_order(folio));
 136	nr = folio_nr_pages(folio);
 
 
 
 137
 138	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 139
 140	xas_store(&xas, shadow);
 141	xas_init_marks(&xas);
 142
 143	folio->mapping = NULL;
 144	/* Leave page->index set: truncation lookup relies upon it */
 145	mapping->nrpages -= nr;
 146}
 147
 148static void filemap_unaccount_folio(struct address_space *mapping,
 149		struct folio *folio)
 150{
 151	long nr;
 152
 153	VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
 154	if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
 155		pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
 156			 current->comm, folio_pfn(folio));
 157		dump_page(&folio->page, "still mapped when deleted");
 158		dump_stack();
 159		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 160
 161		if (mapping_exiting(mapping) && !folio_test_large(folio)) {
 162			int mapcount = folio_mapcount(folio);
 163
 164			if (folio_ref_count(folio) >= mapcount + 2) {
 165				/*
 166				 * All vmas have already been torn down, so it's
 167				 * a good bet that actually the page is unmapped
 168				 * and we'd rather not leak it: if we're wrong,
 169				 * another bad page check should catch it later.
 170				 */
 171				atomic_set(&folio->_mapcount, -1);
 172				folio_ref_sub(folio, mapcount);
 173			}
 174		}
 175	}
 176
 177	/* hugetlb folios do not participate in page cache accounting. */
 178	if (folio_test_hugetlb(folio))
 179		return;
 180
 181	nr = folio_nr_pages(folio);
 182
 183	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
 184	if (folio_test_swapbacked(folio)) {
 185		__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
 186		if (folio_test_pmd_mappable(folio))
 187			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
 188	} else if (folio_test_pmd_mappable(folio)) {
 189		__lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
 190		filemap_nr_thps_dec(mapping);
 191	}
 192
 193	/*
 194	 * At this point folio must be either written or cleaned by
 195	 * truncate.  Dirty folio here signals a bug and loss of
 196	 * unwritten data - on ordinary filesystems.
 197	 *
 198	 * But it's harmless on in-memory filesystems like tmpfs; and can
 199	 * occur when a driver which did get_user_pages() sets page dirty
 200	 * before putting it, while the inode is being finally evicted.
 201	 *
 202	 * Below fixes dirty accounting after removing the folio entirely
 203	 * but leaves the dirty flag set: it has no effect for truncated
 204	 * folio and anyway will be cleared before returning folio to
 205	 * buddy allocator.
 206	 */
 207	if (WARN_ON_ONCE(folio_test_dirty(folio) &&
 208			 mapping_can_writeback(mapping)))
 209		folio_account_cleaned(folio, inode_to_wb(mapping->host));
 210}
 211
 212/*
 213 * Delete a page from the page cache and free it. Caller has to make
 214 * sure the page is locked and that nobody else uses it - or that usage
 215 * is safe.  The caller must hold the i_pages lock.
 216 */
 217void __filemap_remove_folio(struct folio *folio, void *shadow)
 218{
 219	struct address_space *mapping = folio->mapping;
 220
 221	trace_mm_filemap_delete_from_page_cache(folio);
 222	filemap_unaccount_folio(mapping, folio);
 223	page_cache_delete(mapping, folio, shadow);
 224}
 225
 226void filemap_free_folio(struct address_space *mapping, struct folio *folio)
 227{
 228	void (*free_folio)(struct folio *);
 229	int refs = 1;
 230
 231	free_folio = mapping->a_ops->free_folio;
 232	if (free_folio)
 233		free_folio(folio);
 234
 235	if (folio_test_large(folio))
 236		refs = folio_nr_pages(folio);
 237	folio_put_refs(folio, refs);
 238}
 239
 240/**
 241 * filemap_remove_folio - Remove folio from page cache.
 242 * @folio: The folio.
 243 *
 244 * This must be called only on folios that are locked and have been
 245 * verified to be in the page cache.  It will never put the folio into
 246 * the free list because the caller has a reference on the page.
 247 */
 248void filemap_remove_folio(struct folio *folio)
 249{
 250	struct address_space *mapping = folio->mapping;
 251
 252	BUG_ON(!folio_test_locked(folio));
 253	spin_lock(&mapping->host->i_lock);
 254	xa_lock_irq(&mapping->i_pages);
 255	__filemap_remove_folio(folio, NULL);
 256	xa_unlock_irq(&mapping->i_pages);
 257	if (mapping_shrinkable(mapping))
 258		inode_add_lru(mapping->host);
 259	spin_unlock(&mapping->host->i_lock);
 260
 261	filemap_free_folio(mapping, folio);
 262}
 263
 264/*
 265 * page_cache_delete_batch - delete several folios from page cache
 266 * @mapping: the mapping to which folios belong
 267 * @fbatch: batch of folios to delete
 268 *
 269 * The function walks over mapping->i_pages and removes folios passed in
 270 * @fbatch from the mapping. The function expects @fbatch to be sorted
 271 * by page index and is optimised for it to be dense.
 272 * It tolerates holes in @fbatch (mapping entries at those indices are not
 273 * modified).
 274 *
 275 * The function expects the i_pages lock to be held.
 276 */
 277static void page_cache_delete_batch(struct address_space *mapping,
 278			     struct folio_batch *fbatch)
 279{
 280	XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
 281	long total_pages = 0;
 282	int i = 0;
 283	struct folio *folio;
 284
 285	mapping_set_update(&xas, mapping);
 286	xas_for_each(&xas, folio, ULONG_MAX) {
 287		if (i >= folio_batch_count(fbatch))
 288			break;
 289
 290		/* A swap/dax/shadow entry got inserted? Skip it. */
 291		if (xa_is_value(folio))
 292			continue;
 293		/*
 294		 * A page got inserted in our range? Skip it. We have our
 295		 * pages locked so they are protected from being removed.
 296		 * If we see a page whose index is higher than ours, it
 297		 * means our page has been removed, which shouldn't be
 298		 * possible because we're holding the PageLock.
 299		 */
 300		if (folio != fbatch->folios[i]) {
 301			VM_BUG_ON_FOLIO(folio->index >
 302					fbatch->folios[i]->index, folio);
 303			continue;
 304		}
 305
 306		WARN_ON_ONCE(!folio_test_locked(folio));
 307
 308		folio->mapping = NULL;
 309		/* Leave folio->index set: truncation lookup relies on it */
 310
 311		i++;
 312		xas_store(&xas, NULL);
 313		total_pages += folio_nr_pages(folio);
 314	}
 315	mapping->nrpages -= total_pages;
 316}
 317
 318void delete_from_page_cache_batch(struct address_space *mapping,
 319				  struct folio_batch *fbatch)
 320{
 321	int i;
 322
 323	if (!folio_batch_count(fbatch))
 324		return;
 325
 326	spin_lock(&mapping->host->i_lock);
 327	xa_lock_irq(&mapping->i_pages);
 328	for (i = 0; i < folio_batch_count(fbatch); i++) {
 329		struct folio *folio = fbatch->folios[i];
 330
 331		trace_mm_filemap_delete_from_page_cache(folio);
 332		filemap_unaccount_folio(mapping, folio);
 333	}
 334	page_cache_delete_batch(mapping, fbatch);
 335	xa_unlock_irq(&mapping->i_pages);
 336	if (mapping_shrinkable(mapping))
 337		inode_add_lru(mapping->host);
 338	spin_unlock(&mapping->host->i_lock);
 339
 340	for (i = 0; i < folio_batch_count(fbatch); i++)
 341		filemap_free_folio(mapping, fbatch->folios[i]);
 342}
 343
 344int filemap_check_errors(struct address_space *mapping)
 345{
 346	int ret = 0;
 347	/* Check for outstanding write errors */
 348	if (test_bit(AS_ENOSPC, &mapping->flags) &&
 349	    test_and_clear_bit(AS_ENOSPC, &mapping->flags))
 350		ret = -ENOSPC;
 351	if (test_bit(AS_EIO, &mapping->flags) &&
 352	    test_and_clear_bit(AS_EIO, &mapping->flags))
 353		ret = -EIO;
 354	return ret;
 355}
 356EXPORT_SYMBOL(filemap_check_errors);
 357
 358static int filemap_check_and_keep_errors(struct address_space *mapping)
 359{
 360	/* Check for outstanding write errors */
 361	if (test_bit(AS_EIO, &mapping->flags))
 362		return -EIO;
 363	if (test_bit(AS_ENOSPC, &mapping->flags))
 364		return -ENOSPC;
 365	return 0;
 366}
 367
 368/**
 369 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
 370 * @mapping:	address space structure to write
 371 * @wbc:	the writeback_control controlling the writeout
 372 *
 373 * Call writepages on the mapping using the provided wbc to control the
 374 * writeout.
 375 *
 376 * Return: %0 on success, negative error code otherwise.
 377 */
 378int filemap_fdatawrite_wbc(struct address_space *mapping,
 379			   struct writeback_control *wbc)
 380{
 381	int ret;
 382
 383	if (!mapping_can_writeback(mapping) ||
 384	    !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
 385		return 0;
 386
 387	wbc_attach_fdatawrite_inode(wbc, mapping->host);
 388	ret = do_writepages(mapping, wbc);
 389	wbc_detach_inode(wbc);
 390	return ret;
 391}
 392EXPORT_SYMBOL(filemap_fdatawrite_wbc);
 393
 394/**
 395 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
 396 * @mapping:	address space structure to write
 397 * @start:	offset in bytes where the range starts
 398 * @end:	offset in bytes where the range ends (inclusive)
 399 * @sync_mode:	enable synchronous operation
 400 *
 401 * Start writeback against all of a mapping's dirty pages that lie
 402 * within the byte offsets <start, end> inclusive.
 403 *
 404 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
 405 * opposed to a regular memory cleansing writeback.  The difference between
 406 * these two operations is that if a dirty page/buffer is encountered, it must
 407 * be waited upon, and not just skipped over.
 408 *
 409 * Return: %0 on success, negative error code otherwise.
 410 */
 411int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 412				loff_t end, int sync_mode)
 413{
 414	struct writeback_control wbc = {
 415		.sync_mode = sync_mode,
 416		.nr_to_write = LONG_MAX,
 417		.range_start = start,
 418		.range_end = end,
 419	};
 420
 421	return filemap_fdatawrite_wbc(mapping, &wbc);
 422}
 423
 424static inline int __filemap_fdatawrite(struct address_space *mapping,
 425	int sync_mode)
 426{
 427	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
 428}
 429
 430int filemap_fdatawrite(struct address_space *mapping)
 431{
 432	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
 433}
 434EXPORT_SYMBOL(filemap_fdatawrite);
 435
 436int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 437				loff_t end)
 438{
 439	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
 440}
 441EXPORT_SYMBOL(filemap_fdatawrite_range);
 442
 443/**
 444 * filemap_flush - mostly a non-blocking flush
 445 * @mapping:	target address_space
 446 *
 447 * This is a mostly non-blocking flush.  Not suitable for data-integrity
 448 * purposes - I/O may not be started against all dirty pages.
 449 *
 450 * Return: %0 on success, negative error code otherwise.
 451 */
 452int filemap_flush(struct address_space *mapping)
 453{
 454	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
 455}
 456EXPORT_SYMBOL(filemap_flush);
 457
 458/**
 459 * filemap_range_has_page - check if a page exists in range.
 460 * @mapping:           address space within which to check
 461 * @start_byte:        offset in bytes where the range starts
 462 * @end_byte:          offset in bytes where the range ends (inclusive)
 463 *
 464 * Find at least one page in the range supplied, usually used to check if
 465 * direct writing in this range will trigger a writeback.
 466 *
 467 * Return: %true if at least one page exists in the specified range,
 468 * %false otherwise.
 469 */
 470bool filemap_range_has_page(struct address_space *mapping,
 471			   loff_t start_byte, loff_t end_byte)
 472{
 473	struct folio *folio;
 474	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
 475	pgoff_t max = end_byte >> PAGE_SHIFT;
 476
 477	if (end_byte < start_byte)
 478		return false;
 479
 480	rcu_read_lock();
 481	for (;;) {
 482		folio = xas_find(&xas, max);
 483		if (xas_retry(&xas, folio))
 484			continue;
 485		/* Shadow entries don't count */
 486		if (xa_is_value(folio))
 487			continue;
 488		/*
 489		 * We don't need to try to pin this page; we're about to
 490		 * release the RCU lock anyway.  It is enough to know that
 491		 * there was a page here recently.
 492		 */
 493		break;
 494	}
 495	rcu_read_unlock();
 496
 497	return folio != NULL;
 498}
 499EXPORT_SYMBOL(filemap_range_has_page);
 500
 501static void __filemap_fdatawait_range(struct address_space *mapping,
 502				     loff_t start_byte, loff_t end_byte)
 503{
 504	pgoff_t index = start_byte >> PAGE_SHIFT;
 505	pgoff_t end = end_byte >> PAGE_SHIFT;
 506	struct folio_batch fbatch;
 507	unsigned nr_folios;
 508
 509	folio_batch_init(&fbatch);
 510
 
 511	while (index <= end) {
 512		unsigned i;
 513
 514		nr_folios = filemap_get_folios_tag(mapping, &index, end,
 515				PAGECACHE_TAG_WRITEBACK, &fbatch);
 516
 517		if (!nr_folios)
 518			break;
 519
 520		for (i = 0; i < nr_folios; i++) {
 521			struct folio *folio = fbatch.folios[i];
 522
 523			folio_wait_writeback(folio);
 
 524		}
 525		folio_batch_release(&fbatch);
 526		cond_resched();
 527	}
 528}
 529
 530/**
 531 * filemap_fdatawait_range - wait for writeback to complete
 532 * @mapping:		address space structure to wait for
 533 * @start_byte:		offset in bytes where the range starts
 534 * @end_byte:		offset in bytes where the range ends (inclusive)
 535 *
 536 * Walk the list of under-writeback pages of the given address space
 537 * in the given range and wait for all of them.  Check error status of
 538 * the address space and return it.
 539 *
 540 * Since the error status of the address space is cleared by this function,
 541 * callers are responsible for checking the return value and handling and/or
 542 * reporting the error.
 543 *
 544 * Return: error status of the address space.
 545 */
 546int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
 547			    loff_t end_byte)
 548{
 549	__filemap_fdatawait_range(mapping, start_byte, end_byte);
 550	return filemap_check_errors(mapping);
 551}
 552EXPORT_SYMBOL(filemap_fdatawait_range);
 553
 554/**
 555 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
 556 * @mapping:		address space structure to wait for
 557 * @start_byte:		offset in bytes where the range starts
 558 * @end_byte:		offset in bytes where the range ends (inclusive)
 559 *
 560 * Walk the list of under-writeback pages of the given address space in the
 561 * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
 562 * this function does not clear error status of the address space.
 563 *
 564 * Use this function if callers don't handle errors themselves.  Expected
 565 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
 566 * fsfreeze(8)
 567 */
 568int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
 569		loff_t start_byte, loff_t end_byte)
 570{
 571	__filemap_fdatawait_range(mapping, start_byte, end_byte);
 572	return filemap_check_and_keep_errors(mapping);
 573}
 574EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
 575
 576/**
 577 * file_fdatawait_range - wait for writeback to complete
 578 * @file:		file pointing to address space structure to wait for
 579 * @start_byte:		offset in bytes where the range starts
 580 * @end_byte:		offset in bytes where the range ends (inclusive)
 581 *
 582 * Walk the list of under-writeback pages of the address space that file
 583 * refers to, in the given range and wait for all of them.  Check error
 584 * status of the address space vs. the file->f_wb_err cursor and return it.
 585 *
 586 * Since the error status of the file is advanced by this function,
 587 * callers are responsible for checking the return value and handling and/or
 588 * reporting the error.
 589 *
 590 * Return: error status of the address space vs. the file->f_wb_err cursor.
 591 */
 592int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
 593{
 594	struct address_space *mapping = file->f_mapping;
 595
 596	__filemap_fdatawait_range(mapping, start_byte, end_byte);
 597	return file_check_and_advance_wb_err(file);
 598}
 599EXPORT_SYMBOL(file_fdatawait_range);
 600
 601/**
 602 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
 603 * @mapping: address space structure to wait for
 604 *
 605 * Walk the list of under-writeback pages of the given address space
 606 * and wait for all of them.  Unlike filemap_fdatawait(), this function
 607 * does not clear error status of the address space.
 608 *
 609 * Use this function if callers don't handle errors themselves.  Expected
 610 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
 611 * fsfreeze(8)
 612 *
 613 * Return: error status of the address space.
 614 */
 615int filemap_fdatawait_keep_errors(struct address_space *mapping)
 616{
 617	__filemap_fdatawait_range(mapping, 0, LLONG_MAX);
 618	return filemap_check_and_keep_errors(mapping);
 619}
 620EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
 621
 622/* Returns true if writeback might be needed or already in progress. */
 623static bool mapping_needs_writeback(struct address_space *mapping)
 624{
 625	return mapping->nrpages;
 626}
 627
 628bool filemap_range_has_writeback(struct address_space *mapping,
 629				 loff_t start_byte, loff_t end_byte)
 630{
 631	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
 632	pgoff_t max = end_byte >> PAGE_SHIFT;
 633	struct folio *folio;
 634
 635	if (end_byte < start_byte)
 636		return false;
 637
 638	rcu_read_lock();
 639	xas_for_each(&xas, folio, max) {
 640		if (xas_retry(&xas, folio))
 641			continue;
 642		if (xa_is_value(folio))
 643			continue;
 644		if (folio_test_dirty(folio) || folio_test_locked(folio) ||
 645				folio_test_writeback(folio))
 646			break;
 647	}
 648	rcu_read_unlock();
 649	return folio != NULL;
 650}
 651EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
 652
 653/**
 654 * filemap_write_and_wait_range - write out & wait on a file range
 655 * @mapping:	the address_space for the pages
 656 * @lstart:	offset in bytes where the range starts
 657 * @lend:	offset in bytes where the range ends (inclusive)
 658 *
 659 * Write out and wait upon file offsets lstart->lend, inclusive.
 660 *
 661 * Note that @lend is inclusive (describes the last byte to be written) so
 662 * that this function can be used to write to the very end-of-file (end = -1).
 663 *
 664 * Return: error status of the address space.
 665 */
 666int filemap_write_and_wait_range(struct address_space *mapping,
 667				 loff_t lstart, loff_t lend)
 668{
 669	int err = 0, err2;
 670
 671	if (lend < lstart)
 672		return 0;
 673
 674	if (mapping_needs_writeback(mapping)) {
 675		err = __filemap_fdatawrite_range(mapping, lstart, lend,
 676						 WB_SYNC_ALL);
 677		/*
 678		 * Even if the above returned error, the pages may be
 679		 * written partially (e.g. -ENOSPC), so we wait for it.
 680		 * But the -EIO is special case, it may indicate the worst
 681		 * thing (e.g. bug) happened, so we avoid waiting for it.
 682		 */
 683		if (err != -EIO)
 684			__filemap_fdatawait_range(mapping, lstart, lend);
 685	}
 686	err2 = filemap_check_errors(mapping);
 687	if (!err)
 688		err = err2;
 689	return err;
 690}
 691EXPORT_SYMBOL(filemap_write_and_wait_range);
 692
 693void __filemap_set_wb_err(struct address_space *mapping, int err)
 694{
 695	errseq_t eseq = errseq_set(&mapping->wb_err, err);
 696
 697	trace_filemap_set_wb_err(mapping, eseq);
 698}
 699EXPORT_SYMBOL(__filemap_set_wb_err);
 700
 701/**
 702 * file_check_and_advance_wb_err - report wb error (if any) that was previously
 703 * 				   and advance wb_err to current one
 704 * @file: struct file on which the error is being reported
 705 *
 706 * When userland calls fsync (or something like nfsd does the equivalent), we
 707 * want to report any writeback errors that occurred since the last fsync (or
 708 * since the file was opened if there haven't been any).
 709 *
 710 * Grab the wb_err from the mapping. If it matches what we have in the file,
 711 * then just quickly return 0. The file is all caught up.
 712 *
 713 * If it doesn't match, then take the mapping value, set the "seen" flag in
 714 * it and try to swap it into place. If it works, or another task beat us
 715 * to it with the new value, then update the f_wb_err and return the error
 716 * portion. The error at this point must be reported via proper channels
 717 * (a'la fsync, or NFS COMMIT operation, etc.).
 718 *
 719 * While we handle mapping->wb_err with atomic operations, the f_wb_err
 720 * value is protected by the f_lock since we must ensure that it reflects
 721 * the latest value swapped in for this file descriptor.
 722 *
 723 * Return: %0 on success, negative error code otherwise.
 724 */
 725int file_check_and_advance_wb_err(struct file *file)
 726{
 727	int err = 0;
 728	errseq_t old = READ_ONCE(file->f_wb_err);
 729	struct address_space *mapping = file->f_mapping;
 730
 731	/* Locklessly handle the common case where nothing has changed */
 732	if (errseq_check(&mapping->wb_err, old)) {
 733		/* Something changed, must use slow path */
 734		spin_lock(&file->f_lock);
 735		old = file->f_wb_err;
 736		err = errseq_check_and_advance(&mapping->wb_err,
 737						&file->f_wb_err);
 738		trace_file_check_and_advance_wb_err(file, old);
 739		spin_unlock(&file->f_lock);
 740	}
 741
 742	/*
 743	 * We're mostly using this function as a drop in replacement for
 744	 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
 745	 * that the legacy code would have had on these flags.
 746	 */
 747	clear_bit(AS_EIO, &mapping->flags);
 748	clear_bit(AS_ENOSPC, &mapping->flags);
 749	return err;
 750}
 751EXPORT_SYMBOL(file_check_and_advance_wb_err);
 752
 753/**
 754 * file_write_and_wait_range - write out & wait on a file range
 755 * @file:	file pointing to address_space with pages
 756 * @lstart:	offset in bytes where the range starts
 757 * @lend:	offset in bytes where the range ends (inclusive)
 758 *
 759 * Write out and wait upon file offsets lstart->lend, inclusive.
 760 *
 761 * Note that @lend is inclusive (describes the last byte to be written) so
 762 * that this function can be used to write to the very end-of-file (end = -1).
 763 *
 764 * After writing out and waiting on the data, we check and advance the
 765 * f_wb_err cursor to the latest value, and return any errors detected there.
 766 *
 767 * Return: %0 on success, negative error code otherwise.
 768 */
 769int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
 770{
 771	int err = 0, err2;
 772	struct address_space *mapping = file->f_mapping;
 773
 774	if (lend < lstart)
 775		return 0;
 776
 777	if (mapping_needs_writeback(mapping)) {
 778		err = __filemap_fdatawrite_range(mapping, lstart, lend,
 779						 WB_SYNC_ALL);
 780		/* See comment of filemap_write_and_wait() */
 781		if (err != -EIO)
 782			__filemap_fdatawait_range(mapping, lstart, lend);
 783	}
 784	err2 = file_check_and_advance_wb_err(file);
 785	if (!err)
 786		err = err2;
 787	return err;
 788}
 789EXPORT_SYMBOL(file_write_and_wait_range);
 790
 791/**
 792 * replace_page_cache_folio - replace a pagecache folio with a new one
 793 * @old:	folio to be replaced
 794 * @new:	folio to replace with
 795 *
 796 * This function replaces a folio in the pagecache with a new one.  On
 797 * success it acquires the pagecache reference for the new folio and
 798 * drops it for the old folio.  Both the old and new folios must be
 799 * locked.  This function does not add the new folio to the LRU, the
 800 * caller must do that.
 801 *
 802 * The remove + add is atomic.  This function cannot fail.
 803 */
 804void replace_page_cache_folio(struct folio *old, struct folio *new)
 805{
 806	struct address_space *mapping = old->mapping;
 807	void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
 808	pgoff_t offset = old->index;
 809	XA_STATE(xas, &mapping->i_pages, offset);
 810
 811	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
 812	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
 813	VM_BUG_ON_FOLIO(new->mapping, new);
 814
 815	folio_get(new);
 816	new->mapping = mapping;
 817	new->index = offset;
 818
 819	mem_cgroup_replace_folio(old, new);
 820
 821	xas_lock_irq(&xas);
 822	xas_store(&xas, new);
 823
 824	old->mapping = NULL;
 825	/* hugetlb pages do not participate in page cache accounting. */
 826	if (!folio_test_hugetlb(old))
 827		__lruvec_stat_sub_folio(old, NR_FILE_PAGES);
 828	if (!folio_test_hugetlb(new))
 829		__lruvec_stat_add_folio(new, NR_FILE_PAGES);
 830	if (folio_test_swapbacked(old))
 831		__lruvec_stat_sub_folio(old, NR_SHMEM);
 832	if (folio_test_swapbacked(new))
 833		__lruvec_stat_add_folio(new, NR_SHMEM);
 834	xas_unlock_irq(&xas);
 835	if (free_folio)
 836		free_folio(old);
 837	folio_put(old);
 838}
 839EXPORT_SYMBOL_GPL(replace_page_cache_folio);
 840
 841noinline int __filemap_add_folio(struct address_space *mapping,
 842		struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
 843{
 844	XA_STATE(xas, &mapping->i_pages, index);
 845	void *alloced_shadow = NULL;
 846	int alloced_order = 0;
 847	bool huge;
 848	long nr;
 849
 850	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 851	VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
 852	VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping),
 853			folio);
 854	mapping_set_update(&xas, mapping);
 855
 856	VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
 857	xas_set_order(&xas, index, folio_order(folio));
 858	huge = folio_test_hugetlb(folio);
 859	nr = folio_nr_pages(folio);
 
 
 
 
 
 860
 861	gfp &= GFP_RECLAIM_MASK;
 862	folio_ref_add(folio, nr);
 863	folio->mapping = mapping;
 864	folio->index = xas.xa_index;
 865
 866	for (;;) {
 867		int order = -1, split_order = 0;
 868		void *entry, *old = NULL;
 869
 
 
 
 870		xas_lock_irq(&xas);
 871		xas_for_each_conflict(&xas, entry) {
 872			old = entry;
 873			if (!xa_is_value(entry)) {
 874				xas_set_err(&xas, -EEXIST);
 875				goto unlock;
 876			}
 877			/*
 878			 * If a larger entry exists,
 879			 * it will be the first and only entry iterated.
 880			 */
 881			if (order == -1)
 882				order = xas_get_order(&xas);
 883		}
 884
 885		/* entry may have changed before we re-acquire the lock */
 886		if (alloced_order && (old != alloced_shadow || order != alloced_order)) {
 887			xas_destroy(&xas);
 888			alloced_order = 0;
 889		}
 890
 891		if (old) {
 892			if (order > 0 && order > folio_order(folio)) {
 
 
 
 
 893				/* How to handle large swap entries? */
 894				BUG_ON(shmem_mapping(mapping));
 895				if (!alloced_order) {
 896					split_order = order;
 897					goto unlock;
 898				}
 899				xas_split(&xas, old, order);
 900				xas_reset(&xas);
 901			}
 902			if (shadowp)
 903				*shadowp = old;
 904		}
 905
 906		xas_store(&xas, folio);
 907		if (xas_error(&xas))
 908			goto unlock;
 909
 910		mapping->nrpages += nr;
 911
 912		/* hugetlb pages do not participate in page cache accounting */
 913		if (!huge) {
 914			__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
 915			if (folio_test_pmd_mappable(folio))
 916				__lruvec_stat_mod_folio(folio,
 917						NR_FILE_THPS, nr);
 918		}
 919
 920unlock:
 921		xas_unlock_irq(&xas);
 922
 923		/* split needed, alloc here and retry. */
 924		if (split_order) {
 925			xas_split_alloc(&xas, old, split_order, gfp);
 926			if (xas_error(&xas))
 927				goto error;
 928			alloced_shadow = old;
 929			alloced_order = split_order;
 930			xas_reset(&xas);
 931			continue;
 932		}
 933
 934		if (!xas_nomem(&xas, gfp))
 935			break;
 936	}
 937
 938	if (xas_error(&xas))
 939		goto error;
 940
 941	trace_mm_filemap_add_to_page_cache(folio);
 942	return 0;
 943error:
 
 
 944	folio->mapping = NULL;
 945	/* Leave page->index set: truncation relies upon it */
 946	folio_put_refs(folio, nr);
 947	return xas_error(&xas);
 948}
 949ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
 950
 951int filemap_add_folio(struct address_space *mapping, struct folio *folio,
 952				pgoff_t index, gfp_t gfp)
 953{
 954	void *shadow = NULL;
 955	int ret;
 956
 957	ret = mem_cgroup_charge(folio, NULL, gfp);
 958	if (ret)
 959		return ret;
 960
 961	__folio_set_locked(folio);
 962	ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
 963	if (unlikely(ret)) {
 964		mem_cgroup_uncharge(folio);
 965		__folio_clear_locked(folio);
 966	} else {
 967		/*
 968		 * The folio might have been evicted from cache only
 969		 * recently, in which case it should be activated like
 970		 * any other repeatedly accessed folio.
 971		 * The exception is folios getting rewritten; evicting other
 972		 * data from the working set, only to cache data that will
 973		 * get overwritten with something else, is a waste of memory.
 974		 */
 975		WARN_ON_ONCE(folio_test_active(folio));
 976		if (!(gfp & __GFP_WRITE) && shadow)
 977			workingset_refault(folio, shadow);
 978		folio_add_lru(folio);
 979	}
 980	return ret;
 981}
 982EXPORT_SYMBOL_GPL(filemap_add_folio);
 983
 984#ifdef CONFIG_NUMA
 985struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
 986{
 987	int n;
 988	struct folio *folio;
 989
 990	if (cpuset_do_page_mem_spread()) {
 991		unsigned int cpuset_mems_cookie;
 992		do {
 993			cpuset_mems_cookie = read_mems_allowed_begin();
 994			n = cpuset_mem_spread_node();
 995			folio = __folio_alloc_node_noprof(gfp, order, n);
 996		} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
 997
 998		return folio;
 999	}
1000	return folio_alloc_noprof(gfp, order);
1001}
1002EXPORT_SYMBOL(filemap_alloc_folio_noprof);
1003#endif
1004
1005/*
1006 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
1007 *
1008 * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
1009 *
1010 * @mapping1: the first mapping to lock
1011 * @mapping2: the second mapping to lock
1012 */
1013void filemap_invalidate_lock_two(struct address_space *mapping1,
1014				 struct address_space *mapping2)
1015{
1016	if (mapping1 > mapping2)
1017		swap(mapping1, mapping2);
1018	if (mapping1)
1019		down_write(&mapping1->invalidate_lock);
1020	if (mapping2 && mapping1 != mapping2)
1021		down_write_nested(&mapping2->invalidate_lock, 1);
1022}
1023EXPORT_SYMBOL(filemap_invalidate_lock_two);
1024
1025/*
1026 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
1027 *
1028 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1029 *
1030 * @mapping1: the first mapping to unlock
1031 * @mapping2: the second mapping to unlock
1032 */
1033void filemap_invalidate_unlock_two(struct address_space *mapping1,
1034				   struct address_space *mapping2)
1035{
1036	if (mapping1)
1037		up_write(&mapping1->invalidate_lock);
1038	if (mapping2 && mapping1 != mapping2)
1039		up_write(&mapping2->invalidate_lock);
1040}
1041EXPORT_SYMBOL(filemap_invalidate_unlock_two);
1042
1043/*
1044 * In order to wait for pages to become available there must be
1045 * waitqueues associated with pages. By using a hash table of
1046 * waitqueues where the bucket discipline is to maintain all
1047 * waiters on the same queue and wake all when any of the pages
1048 * become available, and for the woken contexts to check to be
1049 * sure the appropriate page became available, this saves space
1050 * at a cost of "thundering herd" phenomena during rare hash
1051 * collisions.
1052 */
1053#define PAGE_WAIT_TABLE_BITS 8
1054#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1055static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1056
1057static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1058{
1059	return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1060}
1061
1062void __init pagecache_init(void)
1063{
1064	int i;
1065
1066	for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1067		init_waitqueue_head(&folio_wait_table[i]);
1068
1069	page_writeback_init();
1070}
1071
1072/*
1073 * The page wait code treats the "wait->flags" somewhat unusually, because
1074 * we have multiple different kinds of waits, not just the usual "exclusive"
1075 * one.
1076 *
1077 * We have:
1078 *
1079 *  (a) no special bits set:
1080 *
1081 *	We're just waiting for the bit to be released, and when a waker
1082 *	calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1083 *	and remove it from the wait queue.
1084 *
1085 *	Simple and straightforward.
1086 *
1087 *  (b) WQ_FLAG_EXCLUSIVE:
1088 *
1089 *	The waiter is waiting to get the lock, and only one waiter should
1090 *	be woken up to avoid any thundering herd behavior. We'll set the
1091 *	WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1092 *
1093 *	This is the traditional exclusive wait.
1094 *
1095 *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1096 *
1097 *	The waiter is waiting to get the bit, and additionally wants the
1098 *	lock to be transferred to it for fair lock behavior. If the lock
1099 *	cannot be taken, we stop walking the wait queue without waking
1100 *	the waiter.
1101 *
1102 *	This is the "fair lock handoff" case, and in addition to setting
1103 *	WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1104 *	that it now has the lock.
1105 */
1106static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1107{
1108	unsigned int flags;
1109	struct wait_page_key *key = arg;
1110	struct wait_page_queue *wait_page
1111		= container_of(wait, struct wait_page_queue, wait);
1112
1113	if (!wake_page_match(wait_page, key))
1114		return 0;
1115
1116	/*
1117	 * If it's a lock handoff wait, we get the bit for it, and
1118	 * stop walking (and do not wake it up) if we can't.
1119	 */
1120	flags = wait->flags;
1121	if (flags & WQ_FLAG_EXCLUSIVE) {
1122		if (test_bit(key->bit_nr, &key->folio->flags))
1123			return -1;
1124		if (flags & WQ_FLAG_CUSTOM) {
1125			if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1126				return -1;
1127			flags |= WQ_FLAG_DONE;
1128		}
1129	}
1130
1131	/*
1132	 * We are holding the wait-queue lock, but the waiter that
1133	 * is waiting for this will be checking the flags without
1134	 * any locking.
1135	 *
1136	 * So update the flags atomically, and wake up the waiter
1137	 * afterwards to avoid any races. This store-release pairs
1138	 * with the load-acquire in folio_wait_bit_common().
1139	 */
1140	smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1141	wake_up_state(wait->private, mode);
1142
1143	/*
1144	 * Ok, we have successfully done what we're waiting for,
1145	 * and we can unconditionally remove the wait entry.
1146	 *
1147	 * Note that this pairs with the "finish_wait()" in the
1148	 * waiter, and has to be the absolute last thing we do.
1149	 * After this list_del_init(&wait->entry) the wait entry
1150	 * might be de-allocated and the process might even have
1151	 * exited.
1152	 */
1153	list_del_init_careful(&wait->entry);
1154	return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1155}
1156
1157static void folio_wake_bit(struct folio *folio, int bit_nr)
1158{
1159	wait_queue_head_t *q = folio_waitqueue(folio);
1160	struct wait_page_key key;
1161	unsigned long flags;
 
1162
1163	key.folio = folio;
1164	key.bit_nr = bit_nr;
1165	key.page_match = 0;
1166
 
 
 
 
 
1167	spin_lock_irqsave(&q->lock, flags);
1168	__wake_up_locked_key(q, TASK_NORMAL, &key);
 
 
 
 
 
 
 
 
 
 
 
 
 
1169
1170	/*
1171	 * It's possible to miss clearing waiters here, when we woke our page
1172	 * waiters, but the hashed waitqueue has waiters for other pages on it.
1173	 * That's okay, it's a rare case. The next waker will clear it.
1174	 *
1175	 * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,
1176	 * other), the flag may be cleared in the course of freeing the page;
1177	 * but that is not required for correctness.
1178	 */
1179	if (!waitqueue_active(q) || !key.page_match)
1180		folio_clear_waiters(folio);
1181
1182	spin_unlock_irqrestore(&q->lock, flags);
1183}
1184
 
 
 
 
 
 
 
1185/*
1186 * A choice of three behaviors for folio_wait_bit_common():
1187 */
1188enum behavior {
1189	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
1190			 * __folio_lock() waiting on then setting PG_locked.
1191			 */
1192	SHARED,		/* Hold ref to page and check the bit when woken, like
1193			 * folio_wait_writeback() waiting on PG_writeback.
1194			 */
1195	DROP,		/* Drop ref to page before wait, no check when woken,
1196			 * like folio_put_wait_locked() on PG_locked.
1197			 */
1198};
1199
1200/*
1201 * Attempt to check (or get) the folio flag, and mark us done
1202 * if successful.
1203 */
1204static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1205					struct wait_queue_entry *wait)
1206{
1207	if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1208		if (test_and_set_bit(bit_nr, &folio->flags))
1209			return false;
1210	} else if (test_bit(bit_nr, &folio->flags))
1211		return false;
1212
1213	wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1214	return true;
1215}
1216
1217/* How many times do we accept lock stealing from under a waiter? */
1218int sysctl_page_lock_unfairness = 5;
1219
1220static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1221		int state, enum behavior behavior)
1222{
1223	wait_queue_head_t *q = folio_waitqueue(folio);
1224	int unfairness = sysctl_page_lock_unfairness;
1225	struct wait_page_queue wait_page;
1226	wait_queue_entry_t *wait = &wait_page.wait;
1227	bool thrashing = false;
1228	unsigned long pflags;
1229	bool in_thrashing;
1230
1231	if (bit_nr == PG_locked &&
1232	    !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1233		delayacct_thrashing_start(&in_thrashing);
1234		psi_memstall_enter(&pflags);
1235		thrashing = true;
1236	}
1237
1238	init_wait(wait);
1239	wait->func = wake_page_function;
1240	wait_page.folio = folio;
1241	wait_page.bit_nr = bit_nr;
1242
1243repeat:
1244	wait->flags = 0;
1245	if (behavior == EXCLUSIVE) {
1246		wait->flags = WQ_FLAG_EXCLUSIVE;
1247		if (--unfairness < 0)
1248			wait->flags |= WQ_FLAG_CUSTOM;
1249	}
1250
1251	/*
1252	 * Do one last check whether we can get the
1253	 * page bit synchronously.
1254	 *
1255	 * Do the folio_set_waiters() marking before that
1256	 * to let any waker we _just_ missed know they
1257	 * need to wake us up (otherwise they'll never
1258	 * even go to the slow case that looks at the
1259	 * page queue), and add ourselves to the wait
1260	 * queue if we need to sleep.
1261	 *
1262	 * This part needs to be done under the queue
1263	 * lock to avoid races.
1264	 */
1265	spin_lock_irq(&q->lock);
1266	folio_set_waiters(folio);
1267	if (!folio_trylock_flag(folio, bit_nr, wait))
1268		__add_wait_queue_entry_tail(q, wait);
1269	spin_unlock_irq(&q->lock);
1270
1271	/*
1272	 * From now on, all the logic will be based on
1273	 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1274	 * see whether the page bit testing has already
1275	 * been done by the wake function.
1276	 *
1277	 * We can drop our reference to the folio.
1278	 */
1279	if (behavior == DROP)
1280		folio_put(folio);
1281
1282	/*
1283	 * Note that until the "finish_wait()", or until
1284	 * we see the WQ_FLAG_WOKEN flag, we need to
1285	 * be very careful with the 'wait->flags', because
1286	 * we may race with a waker that sets them.
1287	 */
1288	for (;;) {
1289		unsigned int flags;
1290
1291		set_current_state(state);
1292
1293		/* Loop until we've been woken or interrupted */
1294		flags = smp_load_acquire(&wait->flags);
1295		if (!(flags & WQ_FLAG_WOKEN)) {
1296			if (signal_pending_state(state, current))
1297				break;
1298
1299			io_schedule();
1300			continue;
1301		}
1302
1303		/* If we were non-exclusive, we're done */
1304		if (behavior != EXCLUSIVE)
1305			break;
1306
1307		/* If the waker got the lock for us, we're done */
1308		if (flags & WQ_FLAG_DONE)
1309			break;
1310
1311		/*
1312		 * Otherwise, if we're getting the lock, we need to
1313		 * try to get it ourselves.
1314		 *
1315		 * And if that fails, we'll have to retry this all.
1316		 */
1317		if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1318			goto repeat;
1319
1320		wait->flags |= WQ_FLAG_DONE;
1321		break;
1322	}
1323
1324	/*
1325	 * If a signal happened, this 'finish_wait()' may remove the last
1326	 * waiter from the wait-queues, but the folio waiters bit will remain
1327	 * set. That's ok. The next wakeup will take care of it, and trying
1328	 * to do it here would be difficult and prone to races.
1329	 */
1330	finish_wait(q, wait);
1331
1332	if (thrashing) {
1333		delayacct_thrashing_end(&in_thrashing);
1334		psi_memstall_leave(&pflags);
1335	}
1336
1337	/*
1338	 * NOTE! The wait->flags weren't stable until we've done the
1339	 * 'finish_wait()', and we could have exited the loop above due
1340	 * to a signal, and had a wakeup event happen after the signal
1341	 * test but before the 'finish_wait()'.
1342	 *
1343	 * So only after the finish_wait() can we reliably determine
1344	 * if we got woken up or not, so we can now figure out the final
1345	 * return value based on that state without races.
1346	 *
1347	 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1348	 * waiter, but an exclusive one requires WQ_FLAG_DONE.
1349	 */
1350	if (behavior == EXCLUSIVE)
1351		return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1352
1353	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1354}
1355
1356#ifdef CONFIG_MIGRATION
1357/**
1358 * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1359 * @entry: migration swap entry.
 
 
1360 * @ptl: already locked ptl. This function will drop the lock.
1361 *
1362 * Wait for a migration entry referencing the given page to be removed. This is
1363 * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
1364 * this can be called without taking a reference on the page. Instead this
1365 * should be called while holding the ptl for the migration entry referencing
1366 * the page.
1367 *
1368 * Returns after unlocking the ptl.
1369 *
1370 * This follows the same logic as folio_wait_bit_common() so see the comments
1371 * there.
1372 */
1373void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
1374	__releases(ptl)
1375{
1376	struct wait_page_queue wait_page;
1377	wait_queue_entry_t *wait = &wait_page.wait;
1378	bool thrashing = false;
1379	unsigned long pflags;
1380	bool in_thrashing;
1381	wait_queue_head_t *q;
1382	struct folio *folio = pfn_swap_entry_folio(entry);
1383
1384	q = folio_waitqueue(folio);
1385	if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1386		delayacct_thrashing_start(&in_thrashing);
1387		psi_memstall_enter(&pflags);
1388		thrashing = true;
1389	}
1390
1391	init_wait(wait);
1392	wait->func = wake_page_function;
1393	wait_page.folio = folio;
1394	wait_page.bit_nr = PG_locked;
1395	wait->flags = 0;
1396
1397	spin_lock_irq(&q->lock);
1398	folio_set_waiters(folio);
1399	if (!folio_trylock_flag(folio, PG_locked, wait))
1400		__add_wait_queue_entry_tail(q, wait);
1401	spin_unlock_irq(&q->lock);
1402
1403	/*
1404	 * If a migration entry exists for the page the migration path must hold
1405	 * a valid reference to the page, and it must take the ptl to remove the
1406	 * migration entry. So the page is valid until the ptl is dropped.
1407	 */
1408	spin_unlock(ptl);
 
 
 
1409
1410	for (;;) {
1411		unsigned int flags;
1412
1413		set_current_state(TASK_UNINTERRUPTIBLE);
1414
1415		/* Loop until we've been woken or interrupted */
1416		flags = smp_load_acquire(&wait->flags);
1417		if (!(flags & WQ_FLAG_WOKEN)) {
1418			if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
1419				break;
1420
1421			io_schedule();
1422			continue;
1423		}
1424		break;
1425	}
1426
1427	finish_wait(q, wait);
1428
1429	if (thrashing) {
1430		delayacct_thrashing_end(&in_thrashing);
1431		psi_memstall_leave(&pflags);
1432	}
1433}
1434#endif
1435
1436void folio_wait_bit(struct folio *folio, int bit_nr)
1437{
1438	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1439}
1440EXPORT_SYMBOL(folio_wait_bit);
1441
1442int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1443{
1444	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
1445}
1446EXPORT_SYMBOL(folio_wait_bit_killable);
1447
1448/**
1449 * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1450 * @folio: The folio to wait for.
1451 * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
1452 *
1453 * The caller should hold a reference on @folio.  They expect the page to
1454 * become unlocked relatively soon, but do not wish to hold up migration
1455 * (for example) by holding the reference while waiting for the folio to
1456 * come unlocked.  After this function returns, the caller should not
1457 * dereference @folio.
1458 *
1459 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1460 */
1461static int folio_put_wait_locked(struct folio *folio, int state)
1462{
1463	return folio_wait_bit_common(folio, PG_locked, state, DROP);
1464}
1465
1466/**
1467 * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1468 * @folio: Folio defining the wait queue of interest
1469 * @waiter: Waiter to add to the queue
1470 *
1471 * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1472 */
1473void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1474{
1475	wait_queue_head_t *q = folio_waitqueue(folio);
1476	unsigned long flags;
1477
1478	spin_lock_irqsave(&q->lock, flags);
1479	__add_wait_queue_entry_tail(q, waiter);
1480	folio_set_waiters(folio);
1481	spin_unlock_irqrestore(&q->lock, flags);
1482}
1483EXPORT_SYMBOL_GPL(folio_add_wait_queue);
1484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1485/**
1486 * folio_unlock - Unlock a locked folio.
1487 * @folio: The folio.
1488 *
1489 * Unlocks the folio and wakes up any thread sleeping on the page lock.
1490 *
1491 * Context: May be called from interrupt or process context.  May not be
1492 * called from NMI context.
1493 */
1494void folio_unlock(struct folio *folio)
1495{
1496	/* Bit 7 allows x86 to check the byte's sign bit */
1497	BUILD_BUG_ON(PG_waiters != 7);
1498	BUILD_BUG_ON(PG_locked > 7);
1499	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1500	if (folio_xor_flags_has_waiters(folio, 1 << PG_locked))
1501		folio_wake_bit(folio, PG_locked);
1502}
1503EXPORT_SYMBOL(folio_unlock);
1504
1505/**
1506 * folio_end_read - End read on a folio.
1507 * @folio: The folio.
1508 * @success: True if all reads completed successfully.
1509 *
1510 * When all reads against a folio have completed, filesystems should
1511 * call this function to let the pagecache know that no more reads
1512 * are outstanding.  This will unlock the folio and wake up any thread
1513 * sleeping on the lock.  The folio will also be marked uptodate if all
1514 * reads succeeded.
1515 *
1516 * Context: May be called from interrupt or process context.  May not be
1517 * called from NMI context.
1518 */
1519void folio_end_read(struct folio *folio, bool success)
1520{
1521	unsigned long mask = 1 << PG_locked;
1522
1523	/* Must be in bottom byte for x86 to work */
1524	BUILD_BUG_ON(PG_uptodate > 7);
1525	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1526	VM_BUG_ON_FOLIO(success && folio_test_uptodate(folio), folio);
1527
1528	if (likely(success))
1529		mask |= 1 << PG_uptodate;
1530	if (folio_xor_flags_has_waiters(folio, mask))
1531		folio_wake_bit(folio, PG_locked);
1532}
1533EXPORT_SYMBOL(folio_end_read);
1534
1535/**
1536 * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1537 * @folio: The folio.
1538 *
1539 * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1540 * it.  The folio reference held for PG_private_2 being set is released.
1541 *
1542 * This is, for example, used when a netfs folio is being written to a local
1543 * disk cache, thereby allowing writes to the cache for the same folio to be
1544 * serialised.
1545 */
1546void folio_end_private_2(struct folio *folio)
1547{
1548	VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1549	clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1550	folio_wake_bit(folio, PG_private_2);
1551	folio_put(folio);
1552}
1553EXPORT_SYMBOL(folio_end_private_2);
1554
1555/**
1556 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1557 * @folio: The folio to wait on.
1558 *
1559 * Wait for PG_private_2 to be cleared on a folio.
1560 */
1561void folio_wait_private_2(struct folio *folio)
1562{
1563	while (folio_test_private_2(folio))
1564		folio_wait_bit(folio, PG_private_2);
1565}
1566EXPORT_SYMBOL(folio_wait_private_2);
1567
1568/**
1569 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1570 * @folio: The folio to wait on.
1571 *
1572 * Wait for PG_private_2 to be cleared on a folio or until a fatal signal is
1573 * received by the calling task.
1574 *
1575 * Return:
1576 * - 0 if successful.
1577 * - -EINTR if a fatal signal was encountered.
1578 */
1579int folio_wait_private_2_killable(struct folio *folio)
1580{
1581	int ret = 0;
1582
1583	while (folio_test_private_2(folio)) {
1584		ret = folio_wait_bit_killable(folio, PG_private_2);
1585		if (ret < 0)
1586			break;
1587	}
1588
1589	return ret;
1590}
1591EXPORT_SYMBOL(folio_wait_private_2_killable);
1592
1593/**
1594 * folio_end_writeback - End writeback against a folio.
1595 * @folio: The folio.
1596 *
1597 * The folio must actually be under writeback.
1598 *
1599 * Context: May be called from process or interrupt context.
1600 */
1601void folio_end_writeback(struct folio *folio)
1602{
1603	VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
1604
1605	/*
1606	 * folio_test_clear_reclaim() could be used here but it is an
1607	 * atomic operation and overkill in this particular case. Failing
1608	 * to shuffle a folio marked for immediate reclaim is too mild
1609	 * a gain to justify taking an atomic operation penalty at the
1610	 * end of every folio writeback.
1611	 */
1612	if (folio_test_reclaim(folio)) {
1613		folio_clear_reclaim(folio);
1614		folio_rotate_reclaimable(folio);
1615	}
1616
1617	/*
1618	 * Writeback does not hold a folio reference of its own, relying
1619	 * on truncation to wait for the clearing of PG_writeback.
1620	 * But here we must make sure that the folio is not freed and
1621	 * reused before the folio_wake_bit().
1622	 */
1623	folio_get(folio);
1624	if (__folio_end_writeback(folio))
1625		folio_wake_bit(folio, PG_writeback);
 
 
 
1626	acct_reclaim_writeback(folio);
1627	folio_put(folio);
1628}
1629EXPORT_SYMBOL(folio_end_writeback);
1630
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1631/**
1632 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1633 * @folio: The folio to lock
1634 */
1635void __folio_lock(struct folio *folio)
1636{
1637	folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1638				EXCLUSIVE);
1639}
1640EXPORT_SYMBOL(__folio_lock);
1641
1642int __folio_lock_killable(struct folio *folio)
1643{
1644	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1645					EXCLUSIVE);
1646}
1647EXPORT_SYMBOL_GPL(__folio_lock_killable);
1648
1649static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1650{
1651	struct wait_queue_head *q = folio_waitqueue(folio);
1652	int ret;
1653
1654	wait->folio = folio;
1655	wait->bit_nr = PG_locked;
1656
1657	spin_lock_irq(&q->lock);
1658	__add_wait_queue_entry_tail(q, &wait->wait);
1659	folio_set_waiters(folio);
1660	ret = !folio_trylock(folio);
1661	/*
1662	 * If we were successful now, we know we're still on the
1663	 * waitqueue as we're still under the lock. This means it's
1664	 * safe to remove and return success, we know the callback
1665	 * isn't going to trigger.
1666	 */
1667	if (!ret)
1668		__remove_wait_queue(q, &wait->wait);
1669	else
1670		ret = -EIOCBQUEUED;
1671	spin_unlock_irq(&q->lock);
1672	return ret;
1673}
1674
1675/*
1676 * Return values:
1677 * 0 - folio is locked.
1678 * non-zero - folio is not locked.
1679 *     mmap_lock or per-VMA lock has been released (mmap_read_unlock() or
1680 *     vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and
1681 *     FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held.
1682 *
1683 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0
1684 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1685 */
1686vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)
 
1687{
1688	unsigned int flags = vmf->flags;
1689
1690	if (fault_flag_allow_retry_first(flags)) {
1691		/*
1692		 * CAUTION! In this case, mmap_lock/per-VMA lock is not
1693		 * released even though returning VM_FAULT_RETRY.
1694		 */
1695		if (flags & FAULT_FLAG_RETRY_NOWAIT)
1696			return VM_FAULT_RETRY;
1697
1698		release_fault_lock(vmf);
1699		if (flags & FAULT_FLAG_KILLABLE)
1700			folio_wait_locked_killable(folio);
1701		else
1702			folio_wait_locked(folio);
1703		return VM_FAULT_RETRY;
1704	}
1705	if (flags & FAULT_FLAG_KILLABLE) {
1706		bool ret;
1707
1708		ret = __folio_lock_killable(folio);
1709		if (ret) {
1710			release_fault_lock(vmf);
1711			return VM_FAULT_RETRY;
1712		}
1713	} else {
1714		__folio_lock(folio);
1715	}
1716
1717	return 0;
1718}
1719
1720/**
1721 * page_cache_next_miss() - Find the next gap in the page cache.
1722 * @mapping: Mapping.
1723 * @index: Index.
1724 * @max_scan: Maximum range to search.
1725 *
1726 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1727 * gap with the lowest index.
1728 *
1729 * This function may be called under the rcu_read_lock.  However, this will
1730 * not atomically search a snapshot of the cache at a single point in time.
1731 * For example, if a gap is created at index 5, then subsequently a gap is
1732 * created at index 10, page_cache_next_miss covering both indices may
1733 * return 10 if called under the rcu_read_lock.
1734 *
1735 * Return: The index of the gap if found, otherwise an index outside the
1736 * range specified (in which case 'return - index >= max_scan' will be true).
1737 * In the rare case of index wrap-around, 0 will be returned.
1738 */
1739pgoff_t page_cache_next_miss(struct address_space *mapping,
1740			     pgoff_t index, unsigned long max_scan)
1741{
1742	XA_STATE(xas, &mapping->i_pages, index);
1743
1744	while (max_scan--) {
1745		void *entry = xas_next(&xas);
1746		if (!entry || xa_is_value(entry))
1747			return xas.xa_index;
1748		if (xas.xa_index == 0)
1749			return 0;
1750	}
1751
1752	return index + max_scan;
1753}
1754EXPORT_SYMBOL(page_cache_next_miss);
1755
1756/**
1757 * page_cache_prev_miss() - Find the previous gap in the page cache.
1758 * @mapping: Mapping.
1759 * @index: Index.
1760 * @max_scan: Maximum range to search.
1761 *
1762 * Search the range [max(index - max_scan + 1, 0), index] for the
1763 * gap with the highest index.
1764 *
1765 * This function may be called under the rcu_read_lock.  However, this will
1766 * not atomically search a snapshot of the cache at a single point in time.
1767 * For example, if a gap is created at index 10, then subsequently a gap is
1768 * created at index 5, page_cache_prev_miss() covering both indices may
1769 * return 5 if called under the rcu_read_lock.
1770 *
1771 * Return: The index of the gap if found, otherwise an index outside the
1772 * range specified (in which case 'index - return >= max_scan' will be true).
1773 * In the rare case of wrap-around, ULONG_MAX will be returned.
1774 */
1775pgoff_t page_cache_prev_miss(struct address_space *mapping,
1776			     pgoff_t index, unsigned long max_scan)
1777{
1778	XA_STATE(xas, &mapping->i_pages, index);
1779
1780	while (max_scan--) {
1781		void *entry = xas_prev(&xas);
1782		if (!entry || xa_is_value(entry))
1783			break;
1784		if (xas.xa_index == ULONG_MAX)
1785			break;
1786	}
1787
1788	return xas.xa_index;
1789}
1790EXPORT_SYMBOL(page_cache_prev_miss);
1791
1792/*
1793 * Lockless page cache protocol:
1794 * On the lookup side:
1795 * 1. Load the folio from i_pages
1796 * 2. Increment the refcount if it's not zero
1797 * 3. If the folio is not found by xas_reload(), put the refcount and retry
1798 *
1799 * On the removal side:
1800 * A. Freeze the page (by zeroing the refcount if nobody else has a reference)
1801 * B. Remove the page from i_pages
1802 * C. Return the page to the page allocator
1803 *
1804 * This means that any page may have its reference count temporarily
1805 * increased by a speculative page cache (or GUP-fast) lookup as it can
1806 * be allocated by another user before the RCU grace period expires.
1807 * Because the refcount temporarily acquired here may end up being the
1808 * last refcount on the page, any page allocation must be freeable by
1809 * folio_put().
1810 */
1811
1812/*
1813 * filemap_get_entry - Get a page cache entry.
1814 * @mapping: the address_space to search
1815 * @index: The page cache index.
1816 *
1817 * Looks up the page cache entry at @mapping & @index.  If it is a folio,
1818 * it is returned with an increased refcount.  If it is a shadow entry
1819 * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1820 * it is returned without further action.
1821 *
1822 * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1823 */
1824void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
1825{
1826	XA_STATE(xas, &mapping->i_pages, index);
1827	struct folio *folio;
1828
1829	rcu_read_lock();
1830repeat:
1831	xas_reset(&xas);
1832	folio = xas_load(&xas);
1833	if (xas_retry(&xas, folio))
1834		goto repeat;
1835	/*
1836	 * A shadow entry of a recently evicted page, or a swap entry from
1837	 * shmem/tmpfs.  Return it without attempting to raise page count.
1838	 */
1839	if (!folio || xa_is_value(folio))
1840		goto out;
1841
1842	if (!folio_try_get(folio))
1843		goto repeat;
1844
1845	if (unlikely(folio != xas_reload(&xas))) {
1846		folio_put(folio);
1847		goto repeat;
1848	}
1849out:
1850	rcu_read_unlock();
1851
1852	return folio;
1853}
1854
1855/**
1856 * __filemap_get_folio - Find and get a reference to a folio.
1857 * @mapping: The address_space to search.
1858 * @index: The page index.
1859 * @fgp_flags: %FGP flags modify how the folio is returned.
1860 * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
1861 *
1862 * Looks up the page cache entry at @mapping & @index.
1863 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1864 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1865 * if the %GFP flags specified for %FGP_CREAT are atomic.
1866 *
1867 * If this function returns a folio, it is returned with an increased refcount.
1868 *
1869 * Return: The found folio or an ERR_PTR() otherwise.
1870 */
1871struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1872		fgf_t fgp_flags, gfp_t gfp)
1873{
1874	struct folio *folio;
1875
1876repeat:
1877	folio = filemap_get_entry(mapping, index);
1878	if (xa_is_value(folio))
 
 
1879		folio = NULL;
 
1880	if (!folio)
1881		goto no_page;
1882
1883	if (fgp_flags & FGP_LOCK) {
1884		if (fgp_flags & FGP_NOWAIT) {
1885			if (!folio_trylock(folio)) {
1886				folio_put(folio);
1887				return ERR_PTR(-EAGAIN);
1888			}
1889		} else {
1890			folio_lock(folio);
1891		}
1892
1893		/* Has the page been truncated? */
1894		if (unlikely(folio->mapping != mapping)) {
1895			folio_unlock(folio);
1896			folio_put(folio);
1897			goto repeat;
1898		}
1899		VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1900	}
1901
1902	if (fgp_flags & FGP_ACCESSED)
1903		folio_mark_accessed(folio);
1904	else if (fgp_flags & FGP_WRITE) {
1905		/* Clear idle flag for buffer write */
1906		if (folio_test_idle(folio))
1907			folio_clear_idle(folio);
1908	}
1909
1910	if (fgp_flags & FGP_STABLE)
1911		folio_wait_stable(folio);
1912no_page:
1913	if (!folio && (fgp_flags & FGP_CREAT)) {
1914		unsigned int min_order = mapping_min_folio_order(mapping);
1915		unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags));
1916		int err;
1917		index = mapping_align_index(mapping, index);
1918
1919		if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1920			gfp |= __GFP_WRITE;
1921		if (fgp_flags & FGP_NOFS)
1922			gfp &= ~__GFP_FS;
1923		if (fgp_flags & FGP_NOWAIT) {
1924			gfp &= ~GFP_KERNEL;
1925			gfp |= GFP_NOWAIT | __GFP_NOWARN;
1926		}
 
 
 
 
 
1927		if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1928			fgp_flags |= FGP_LOCK;
1929
1930		if (order > mapping_max_folio_order(mapping))
1931			order = mapping_max_folio_order(mapping);
1932		/* If we're not aligned, allocate a smaller folio */
1933		if (index & ((1UL << order) - 1))
1934			order = __ffs(index);
1935
1936		do {
1937			gfp_t alloc_gfp = gfp;
1938
1939			err = -ENOMEM;
1940			if (order > min_order)
1941				alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
1942			folio = filemap_alloc_folio(alloc_gfp, order);
1943			if (!folio)
1944				continue;
1945
1946			/* Init accessed so avoid atomic mark_page_accessed later */
1947			if (fgp_flags & FGP_ACCESSED)
1948				__folio_set_referenced(folio);
1949
1950			err = filemap_add_folio(mapping, folio, index, gfp);
1951			if (!err)
1952				break;
1953			folio_put(folio);
1954			folio = NULL;
1955		} while (order-- > min_order);
 
 
1956
1957		if (err == -EEXIST)
1958			goto repeat;
1959		if (err)
1960			return ERR_PTR(err);
1961		/*
1962		 * filemap_add_folio locks the page, and for mmap
1963		 * we expect an unlocked page.
1964		 */
1965		if (folio && (fgp_flags & FGP_FOR_MMAP))
1966			folio_unlock(folio);
1967	}
1968
1969	if (!folio)
1970		return ERR_PTR(-ENOENT);
1971	return folio;
1972}
1973EXPORT_SYMBOL(__filemap_get_folio);
1974
1975static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1976		xa_mark_t mark)
1977{
1978	struct folio *folio;
1979
1980retry:
1981	if (mark == XA_PRESENT)
1982		folio = xas_find(xas, max);
1983	else
1984		folio = xas_find_marked(xas, max, mark);
1985
1986	if (xas_retry(xas, folio))
1987		goto retry;
1988	/*
1989	 * A shadow entry of a recently evicted page, a swap
1990	 * entry from shmem/tmpfs or a DAX entry.  Return it
1991	 * without attempting to raise page count.
1992	 */
1993	if (!folio || xa_is_value(folio))
1994		return folio;
1995
1996	if (!folio_try_get(folio))
1997		goto reset;
1998
1999	if (unlikely(folio != xas_reload(xas))) {
2000		folio_put(folio);
2001		goto reset;
2002	}
2003
2004	return folio;
2005reset:
2006	xas_reset(xas);
2007	goto retry;
2008}
2009
2010/**
2011 * find_get_entries - gang pagecache lookup
2012 * @mapping:	The address_space to search
2013 * @start:	The starting page cache index
2014 * @end:	The final page index (inclusive).
2015 * @fbatch:	Where the resulting entries are placed.
2016 * @indices:	The cache indices corresponding to the entries in @entries
2017 *
2018 * find_get_entries() will search for and return a batch of entries in
2019 * the mapping.  The entries are placed in @fbatch.  find_get_entries()
2020 * takes a reference on any actual folios it returns.
2021 *
2022 * The entries have ascending indexes.  The indices may not be consecutive
2023 * due to not-present entries or large folios.
2024 *
2025 * Any shadow entries of evicted folios, or swap entries from
2026 * shmem/tmpfs, are included in the returned array.
2027 *
2028 * Return: The number of entries which were found.
2029 */
2030unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
2031		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2032{
2033	XA_STATE(xas, &mapping->i_pages, *start);
2034	struct folio *folio;
2035
2036	rcu_read_lock();
2037	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2038		indices[fbatch->nr] = xas.xa_index;
2039		if (!folio_batch_add(fbatch, folio))
2040			break;
2041	}
 
2042
2043	if (folio_batch_count(fbatch)) {
2044		unsigned long nr;
2045		int idx = folio_batch_count(fbatch) - 1;
2046
2047		folio = fbatch->folios[idx];
2048		if (!xa_is_value(folio))
2049			nr = folio_nr_pages(folio);
2050		else
2051			nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]);
2052		*start = round_down(indices[idx] + nr, nr);
2053	}
2054	rcu_read_unlock();
2055
2056	return folio_batch_count(fbatch);
2057}
2058
2059/**
2060 * find_lock_entries - Find a batch of pagecache entries.
2061 * @mapping:	The address_space to search.
2062 * @start:	The starting page cache index.
2063 * @end:	The final page index (inclusive).
2064 * @fbatch:	Where the resulting entries are placed.
2065 * @indices:	The cache indices of the entries in @fbatch.
2066 *
2067 * find_lock_entries() will return a batch of entries from @mapping.
2068 * Swap, shadow and DAX entries are included.  Folios are returned
2069 * locked and with an incremented refcount.  Folios which are locked
2070 * by somebody else or under writeback are skipped.  Folios which are
2071 * partially outside the range are not returned.
2072 *
2073 * The entries have ascending indexes.  The indices may not be consecutive
2074 * due to not-present entries, large folios, folios which could not be
2075 * locked or folios under writeback.
2076 *
2077 * Return: The number of entries which were found.
2078 */
2079unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
2080		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2081{
2082	XA_STATE(xas, &mapping->i_pages, *start);
2083	struct folio *folio;
2084
2085	rcu_read_lock();
2086	while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2087		unsigned long base;
2088		unsigned long nr;
2089
2090		if (!xa_is_value(folio)) {
2091			nr = folio_nr_pages(folio);
2092			base = folio->index;
2093			/* Omit large folio which begins before the start */
2094			if (base < *start)
2095				goto put;
2096			/* Omit large folio which extends beyond the end */
2097			if (base + nr - 1 > end)
2098				goto put;
2099			if (!folio_trylock(folio))
2100				goto put;
2101			if (folio->mapping != mapping ||
2102			    folio_test_writeback(folio))
2103				goto unlock;
2104			VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2105					folio);
2106		} else {
2107			nr = 1 << xas_get_order(&xas);
2108			base = xas.xa_index & ~(nr - 1);
2109			/* Omit order>0 value which begins before the start */
2110			if (base < *start)
2111				continue;
2112			/* Omit order>0 value which extends beyond the end */
2113			if (base + nr - 1 > end)
2114				break;
2115		}
2116
2117		/* Update start now so that last update is correct on return */
2118		*start = base + nr;
2119		indices[fbatch->nr] = xas.xa_index;
2120		if (!folio_batch_add(fbatch, folio))
2121			break;
2122		continue;
2123unlock:
2124		folio_unlock(folio);
2125put:
2126		folio_put(folio);
2127	}
2128	rcu_read_unlock();
2129
 
 
 
 
 
 
 
 
 
2130	return folio_batch_count(fbatch);
2131}
2132
2133/**
2134 * filemap_get_folios - Get a batch of folios
2135 * @mapping:	The address_space to search
2136 * @start:	The starting page index
2137 * @end:	The final page index (inclusive)
2138 * @fbatch:	The batch to fill.
2139 *
2140 * Search for and return a batch of folios in the mapping starting at
2141 * index @start and up to index @end (inclusive).  The folios are returned
2142 * in @fbatch with an elevated reference count.
2143 *
 
 
 
 
 
 
 
2144 * Return: The number of folios which were found.
2145 * We also update @start to index the next folio for the traversal.
2146 */
2147unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
2148		pgoff_t end, struct folio_batch *fbatch)
2149{
2150	return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2151}
2152EXPORT_SYMBOL(filemap_get_folios);
2153
 
 
 
 
 
 
 
 
 
 
2154/**
2155 * filemap_get_folios_contig - Get a batch of contiguous folios
2156 * @mapping:	The address_space to search
2157 * @start:	The starting page index
2158 * @end:	The final page index (inclusive)
2159 * @fbatch:	The batch to fill
2160 *
2161 * filemap_get_folios_contig() works exactly like filemap_get_folios(),
2162 * except the returned folios are guaranteed to be contiguous. This may
2163 * not return all contiguous folios if the batch gets filled up.
2164 *
2165 * Return: The number of folios found.
2166 * Also update @start to be positioned for traversal of the next folio.
2167 */
2168
2169unsigned filemap_get_folios_contig(struct address_space *mapping,
2170		pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2171{
2172	XA_STATE(xas, &mapping->i_pages, *start);
2173	unsigned long nr;
2174	struct folio *folio;
2175
2176	rcu_read_lock();
2177
2178	for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2179			folio = xas_next(&xas)) {
2180		if (xas_retry(&xas, folio))
2181			continue;
2182		/*
2183		 * If the entry has been swapped out, we can stop looking.
2184		 * No current caller is looking for DAX entries.
2185		 */
2186		if (xa_is_value(folio))
2187			goto update_start;
2188
2189		/* If we landed in the middle of a THP, continue at its end. */
2190		if (xa_is_sibling(folio))
2191			goto update_start;
2192
2193		if (!folio_try_get(folio))
2194			goto retry;
2195
2196		if (unlikely(folio != xas_reload(&xas)))
2197			goto put_folio;
2198
2199		if (!folio_batch_add(fbatch, folio)) {
2200			nr = folio_nr_pages(folio);
 
 
 
2201			*start = folio->index + nr;
2202			goto out;
2203		}
2204		continue;
2205put_folio:
2206		folio_put(folio);
2207
2208retry:
2209		xas_reset(&xas);
2210	}
2211
2212update_start:
2213	nr = folio_batch_count(fbatch);
2214
2215	if (nr) {
2216		folio = fbatch->folios[nr - 1];
2217		*start = folio_next_index(folio);
 
 
 
2218	}
2219out:
2220	rcu_read_unlock();
2221	return folio_batch_count(fbatch);
2222}
2223EXPORT_SYMBOL(filemap_get_folios_contig);
2224
2225/**
2226 * filemap_get_folios_tag - Get a batch of folios matching @tag
2227 * @mapping:    The address_space to search
2228 * @start:      The starting page index
2229 * @end:        The final page index (inclusive)
2230 * @tag:        The tag index
2231 * @fbatch:     The batch to fill
 
2232 *
2233 * The first folio may start before @start; if it does, it will contain
2234 * @start.  The final folio may extend beyond @end; if it does, it will
2235 * contain @end.  The folios have ascending indices.  There may be gaps
2236 * between the folios if there are indices which have no folio in the
2237 * page cache.  If folios are added to or removed from the page cache
2238 * while this is running, they may or may not be found by this call.
2239 * Only returns folios that are tagged with @tag.
2240 *
2241 * Return: The number of folios found.
2242 * Also update @start to index the next folio for traversal.
2243 */
2244unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
2245			pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
 
2246{
2247	XA_STATE(xas, &mapping->i_pages, *start);
2248	struct folio *folio;
 
 
 
 
2249
2250	rcu_read_lock();
2251	while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2252		/*
2253		 * Shadow entries should never be tagged, but this iteration
2254		 * is lockless so there is a window for page reclaim to evict
2255		 * a page we saw tagged. Skip over it.
2256		 */
2257		if (xa_is_value(folio))
2258			continue;
2259		if (!folio_batch_add(fbatch, folio)) {
2260			unsigned long nr = folio_nr_pages(folio);
2261			*start = folio->index + nr;
 
2262			goto out;
2263		}
2264	}
 
2265	/*
2266	 * We come here when there is no page beyond @end. We take care to not
2267	 * overflow the index @start as it confuses some of the callers. This
2268	 * breaks the iteration when there is a page at index -1 but that is
2269	 * already broke anyway.
2270	 */
2271	if (end == (pgoff_t)-1)
2272		*start = (pgoff_t)-1;
2273	else
2274		*start = end + 1;
2275out:
2276	rcu_read_unlock();
2277
2278	return folio_batch_count(fbatch);
2279}
2280EXPORT_SYMBOL(filemap_get_folios_tag);
2281
2282/*
2283 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2284 * a _large_ part of the i/o request. Imagine the worst scenario:
2285 *
2286 *      ---R__________________________________________B__________
2287 *         ^ reading here                             ^ bad block(assume 4k)
2288 *
2289 * read(R) => miss => readahead(R...B) => media error => frustrating retries
2290 * => failing the whole request => read(R) => read(R+1) =>
2291 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2292 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2293 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2294 *
2295 * It is going insane. Fix it by quickly scaling down the readahead size.
2296 */
2297static void shrink_readahead_size_eio(struct file_ra_state *ra)
2298{
2299	ra->ra_pages /= 4;
2300}
2301
2302/*
2303 * filemap_get_read_batch - Get a batch of folios for read
2304 *
2305 * Get a batch of folios which represent a contiguous range of bytes in
2306 * the file.  No exceptional entries will be returned.  If @index is in
2307 * the middle of a folio, the entire folio will be returned.  The last
2308 * folio in the batch may have the readahead flag set or the uptodate flag
2309 * clear so that the caller can take the appropriate action.
2310 */
2311static void filemap_get_read_batch(struct address_space *mapping,
2312		pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2313{
2314	XA_STATE(xas, &mapping->i_pages, index);
2315	struct folio *folio;
2316
2317	rcu_read_lock();
2318	for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2319		if (xas_retry(&xas, folio))
2320			continue;
2321		if (xas.xa_index > max || xa_is_value(folio))
2322			break;
2323		if (xa_is_sibling(folio))
2324			break;
2325		if (!folio_try_get(folio))
2326			goto retry;
2327
2328		if (unlikely(folio != xas_reload(&xas)))
2329			goto put_folio;
2330
2331		if (!folio_batch_add(fbatch, folio))
2332			break;
2333		if (!folio_test_uptodate(folio))
2334			break;
2335		if (folio_test_readahead(folio))
2336			break;
2337		xas_advance(&xas, folio_next_index(folio) - 1);
2338		continue;
2339put_folio:
2340		folio_put(folio);
2341retry:
2342		xas_reset(&xas);
2343	}
2344	rcu_read_unlock();
2345}
2346
2347static int filemap_read_folio(struct file *file, filler_t filler,
2348		struct folio *folio)
2349{
2350	bool workingset = folio_test_workingset(folio);
2351	unsigned long pflags;
2352	int error;
2353
 
 
 
 
 
 
 
2354	/* Start the actual read. The read will unlock the page. */
2355	if (unlikely(workingset))
2356		psi_memstall_enter(&pflags);
2357	error = filler(file, folio);
2358	if (unlikely(workingset))
2359		psi_memstall_leave(&pflags);
2360	if (error)
2361		return error;
2362
2363	error = folio_wait_locked_killable(folio);
2364	if (error)
2365		return error;
2366	if (folio_test_uptodate(folio))
2367		return 0;
2368	if (file)
2369		shrink_readahead_size_eio(&file->f_ra);
2370	return -EIO;
2371}
2372
2373static bool filemap_range_uptodate(struct address_space *mapping,
2374		loff_t pos, size_t count, struct folio *folio,
2375		bool need_uptodate)
2376{
 
 
2377	if (folio_test_uptodate(folio))
2378		return true;
2379	/* pipes can't handle partially uptodate pages */
2380	if (need_uptodate)
2381		return false;
2382	if (!mapping->a_ops->is_partially_uptodate)
2383		return false;
2384	if (mapping->host->i_blkbits >= folio_shift(folio))
2385		return false;
2386
 
2387	if (folio_pos(folio) > pos) {
2388		count -= folio_pos(folio) - pos;
2389		pos = 0;
2390	} else {
2391		pos -= folio_pos(folio);
2392	}
2393
2394	return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2395}
2396
2397static int filemap_update_page(struct kiocb *iocb,
2398		struct address_space *mapping, size_t count,
2399		struct folio *folio, bool need_uptodate)
2400{
2401	int error;
2402
2403	if (iocb->ki_flags & IOCB_NOWAIT) {
2404		if (!filemap_invalidate_trylock_shared(mapping))
2405			return -EAGAIN;
2406	} else {
2407		filemap_invalidate_lock_shared(mapping);
2408	}
2409
2410	if (!folio_trylock(folio)) {
2411		error = -EAGAIN;
2412		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
2413			goto unlock_mapping;
2414		if (!(iocb->ki_flags & IOCB_WAITQ)) {
2415			filemap_invalidate_unlock_shared(mapping);
2416			/*
2417			 * This is where we usually end up waiting for a
2418			 * previously submitted readahead to finish.
2419			 */
2420			folio_put_wait_locked(folio, TASK_KILLABLE);
2421			return AOP_TRUNCATED_PAGE;
2422		}
2423		error = __folio_lock_async(folio, iocb->ki_waitq);
2424		if (error)
2425			goto unlock_mapping;
2426	}
2427
2428	error = AOP_TRUNCATED_PAGE;
2429	if (!folio->mapping)
2430		goto unlock;
2431
2432	error = 0;
2433	if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,
2434				   need_uptodate))
2435		goto unlock;
2436
2437	error = -EAGAIN;
2438	if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
2439		goto unlock;
2440
2441	error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
2442			folio);
2443	goto unlock_mapping;
2444unlock:
2445	folio_unlock(folio);
2446unlock_mapping:
2447	filemap_invalidate_unlock_shared(mapping);
2448	if (error == AOP_TRUNCATED_PAGE)
2449		folio_put(folio);
2450	return error;
2451}
2452
2453static int filemap_create_folio(struct file *file,
2454		struct address_space *mapping, loff_t pos,
2455		struct folio_batch *fbatch)
2456{
2457	struct folio *folio;
2458	int error;
2459	unsigned int min_order = mapping_min_folio_order(mapping);
2460	pgoff_t index;
2461
2462	folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order);
2463	if (!folio)
2464		return -ENOMEM;
2465
2466	/*
2467	 * Protect against truncate / hole punch. Grabbing invalidate_lock
2468	 * here assures we cannot instantiate and bring uptodate new
2469	 * pagecache folios after evicting page cache during truncate
2470	 * and before actually freeing blocks.	Note that we could
2471	 * release invalidate_lock after inserting the folio into
2472	 * the page cache as the locked folio would then be enough to
2473	 * synchronize with hole punching. But there are code paths
2474	 * such as filemap_update_page() filling in partially uptodate
2475	 * pages or ->readahead() that need to hold invalidate_lock
2476	 * while mapping blocks for IO so let's hold the lock here as
2477	 * well to keep locking rules simple.
2478	 */
2479	filemap_invalidate_lock_shared(mapping);
2480	index = (pos >> (PAGE_SHIFT + min_order)) << min_order;
2481	error = filemap_add_folio(mapping, folio, index,
2482			mapping_gfp_constraint(mapping, GFP_KERNEL));
2483	if (error == -EEXIST)
2484		error = AOP_TRUNCATED_PAGE;
2485	if (error)
2486		goto error;
2487
2488	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2489	if (error)
2490		goto error;
2491
2492	filemap_invalidate_unlock_shared(mapping);
2493	folio_batch_add(fbatch, folio);
2494	return 0;
2495error:
2496	filemap_invalidate_unlock_shared(mapping);
2497	folio_put(folio);
2498	return error;
2499}
2500
2501static int filemap_readahead(struct kiocb *iocb, struct file *file,
2502		struct address_space *mapping, struct folio *folio,
2503		pgoff_t last_index)
2504{
2505	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2506
2507	if (iocb->ki_flags & IOCB_NOIO)
2508		return -EAGAIN;
2509	page_cache_async_ra(&ractl, folio, last_index - folio->index);
2510	return 0;
2511}
2512
2513static int filemap_get_pages(struct kiocb *iocb, size_t count,
2514		struct folio_batch *fbatch, bool need_uptodate)
2515{
2516	struct file *filp = iocb->ki_filp;
2517	struct address_space *mapping = filp->f_mapping;
2518	struct file_ra_state *ra = &filp->f_ra;
2519	pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2520	pgoff_t last_index;
2521	struct folio *folio;
2522	unsigned int flags;
2523	int err = 0;
2524
2525	/* "last_index" is the index of the page beyond the end of the read */
2526	last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE);
2527retry:
2528	if (fatal_signal_pending(current))
2529		return -EINTR;
2530
2531	filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2532	if (!folio_batch_count(fbatch)) {
2533		if (iocb->ki_flags & IOCB_NOIO)
2534			return -EAGAIN;
2535		if (iocb->ki_flags & IOCB_NOWAIT)
2536			flags = memalloc_noio_save();
2537		page_cache_sync_readahead(mapping, ra, filp, index,
2538				last_index - index);
2539		if (iocb->ki_flags & IOCB_NOWAIT)
2540			memalloc_noio_restore(flags);
2541		filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2542	}
2543	if (!folio_batch_count(fbatch)) {
2544		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
2545			return -EAGAIN;
2546		err = filemap_create_folio(filp, mapping, iocb->ki_pos, fbatch);
 
2547		if (err == AOP_TRUNCATED_PAGE)
2548			goto retry;
2549		return err;
2550	}
2551
2552	folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2553	if (folio_test_readahead(folio)) {
2554		err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2555		if (err)
2556			goto err;
2557	}
2558	if (!folio_test_uptodate(folio)) {
2559		if ((iocb->ki_flags & IOCB_WAITQ) &&
2560		    folio_batch_count(fbatch) > 1)
2561			iocb->ki_flags |= IOCB_NOWAIT;
2562		err = filemap_update_page(iocb, mapping, count, folio,
2563					  need_uptodate);
2564		if (err)
2565			goto err;
2566	}
2567
2568	trace_mm_filemap_get_pages(mapping, index, last_index - 1);
2569	return 0;
2570err:
2571	if (err < 0)
2572		folio_put(folio);
2573	if (likely(--fbatch->nr))
2574		return 0;
2575	if (err == AOP_TRUNCATED_PAGE)
2576		goto retry;
2577	return err;
2578}
2579
2580static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
2581{
2582	unsigned int shift = folio_shift(folio);
2583
2584	return (pos1 >> shift == pos2 >> shift);
2585}
2586
2587/**
2588 * filemap_read - Read data from the page cache.
2589 * @iocb: The iocb to read.
2590 * @iter: Destination for the data.
2591 * @already_read: Number of bytes already read by the caller.
2592 *
2593 * Copies data from the page cache.  If the data is not currently present,
2594 * uses the readahead and read_folio address_space operations to fetch it.
2595 *
2596 * Return: Total number of bytes copied, including those already read by
2597 * the caller.  If an error happens before any bytes are copied, returns
2598 * a negative error number.
2599 */
2600ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2601		ssize_t already_read)
2602{
2603	struct file *filp = iocb->ki_filp;
2604	struct file_ra_state *ra = &filp->f_ra;
2605	struct address_space *mapping = filp->f_mapping;
2606	struct inode *inode = mapping->host;
2607	struct folio_batch fbatch;
2608	int i, error = 0;
2609	bool writably_mapped;
2610	loff_t isize, end_offset;
2611	loff_t last_pos = ra->prev_pos;
2612
2613	if (unlikely(iocb->ki_pos < 0))
2614		return -EINVAL;
2615	if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
2616		return 0;
2617	if (unlikely(!iov_iter_count(iter)))
2618		return 0;
2619
2620	iov_iter_truncate(iter, inode->i_sb->s_maxbytes - iocb->ki_pos);
2621	folio_batch_init(&fbatch);
2622
2623	do {
2624		cond_resched();
2625
2626		/*
2627		 * If we've already successfully copied some data, then we
2628		 * can no longer safely return -EIOCBQUEUED. Hence mark
2629		 * an async read NOWAIT at that point.
2630		 */
2631		if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
2632			iocb->ki_flags |= IOCB_NOWAIT;
2633
2634		if (unlikely(iocb->ki_pos >= i_size_read(inode)))
2635			break;
2636
2637		error = filemap_get_pages(iocb, iter->count, &fbatch, false);
2638		if (error < 0)
2639			break;
2640
2641		/*
2642		 * i_size must be checked after we know the pages are Uptodate.
2643		 *
2644		 * Checking i_size after the check allows us to calculate
2645		 * the correct value for "nr", which means the zero-filled
2646		 * part of the page is not copied back to userspace (unless
2647		 * another truncate extends the file - this is desired though).
2648		 */
2649		isize = i_size_read(inode);
2650		if (unlikely(iocb->ki_pos >= isize))
2651			goto put_folios;
2652		end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
2653
2654		/*
2655		 * Once we start copying data, we don't want to be touching any
2656		 * cachelines that might be contended:
2657		 */
2658		writably_mapped = mapping_writably_mapped(mapping);
2659
2660		/*
2661		 * When a read accesses the same folio several times, only
2662		 * mark it as accessed the first time.
2663		 */
2664		if (!pos_same_folio(iocb->ki_pos, last_pos - 1,
2665				    fbatch.folios[0]))
2666			folio_mark_accessed(fbatch.folios[0]);
2667
2668		for (i = 0; i < folio_batch_count(&fbatch); i++) {
2669			struct folio *folio = fbatch.folios[i];
2670			size_t fsize = folio_size(folio);
2671			size_t offset = iocb->ki_pos & (fsize - 1);
2672			size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
2673					     fsize - offset);
2674			size_t copied;
2675
2676			if (end_offset < folio_pos(folio))
2677				break;
2678			if (i > 0)
2679				folio_mark_accessed(folio);
2680			/*
2681			 * If users can be writing to this folio using arbitrary
2682			 * virtual addresses, take care of potential aliasing
2683			 * before reading the folio on the kernel side.
2684			 */
2685			if (writably_mapped)
2686				flush_dcache_folio(folio);
2687
2688			copied = copy_folio_to_iter(folio, offset, bytes, iter);
2689
2690			already_read += copied;
2691			iocb->ki_pos += copied;
2692			last_pos = iocb->ki_pos;
2693
2694			if (copied < bytes) {
2695				error = -EFAULT;
2696				break;
2697			}
2698		}
2699put_folios:
2700		for (i = 0; i < folio_batch_count(&fbatch); i++)
2701			folio_put(fbatch.folios[i]);
2702		folio_batch_init(&fbatch);
2703	} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
2704
2705	file_accessed(filp);
2706	ra->prev_pos = last_pos;
2707	return already_read ? already_read : error;
2708}
2709EXPORT_SYMBOL_GPL(filemap_read);
2710
2711int kiocb_write_and_wait(struct kiocb *iocb, size_t count)
2712{
2713	struct address_space *mapping = iocb->ki_filp->f_mapping;
2714	loff_t pos = iocb->ki_pos;
2715	loff_t end = pos + count - 1;
2716
2717	if (iocb->ki_flags & IOCB_NOWAIT) {
2718		if (filemap_range_needs_writeback(mapping, pos, end))
2719			return -EAGAIN;
2720		return 0;
2721	}
2722
2723	return filemap_write_and_wait_range(mapping, pos, end);
2724}
2725EXPORT_SYMBOL_GPL(kiocb_write_and_wait);
2726
2727int filemap_invalidate_pages(struct address_space *mapping,
2728			     loff_t pos, loff_t end, bool nowait)
2729{
2730	int ret;
2731
2732	if (nowait) {
2733		/* we could block if there are any pages in the range */
2734		if (filemap_range_has_page(mapping, pos, end))
2735			return -EAGAIN;
2736	} else {
2737		ret = filemap_write_and_wait_range(mapping, pos, end);
2738		if (ret)
2739			return ret;
2740	}
2741
2742	/*
2743	 * After a write we want buffered reads to be sure to go to disk to get
2744	 * the new data.  We invalidate clean cached page from the region we're
2745	 * about to write.  We do this *before* the write so that we can return
2746	 * without clobbering -EIOCBQUEUED from ->direct_IO().
2747	 */
2748	return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
2749					     end >> PAGE_SHIFT);
2750}
2751
2752int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
2753{
2754	struct address_space *mapping = iocb->ki_filp->f_mapping;
2755
2756	return filemap_invalidate_pages(mapping, iocb->ki_pos,
2757					iocb->ki_pos + count - 1,
2758					iocb->ki_flags & IOCB_NOWAIT);
2759}
2760EXPORT_SYMBOL_GPL(kiocb_invalidate_pages);
2761
2762/**
2763 * generic_file_read_iter - generic filesystem read routine
2764 * @iocb:	kernel I/O control block
2765 * @iter:	destination for the data read
2766 *
2767 * This is the "read_iter()" routine for all filesystems
2768 * that can use the page cache directly.
2769 *
2770 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2771 * be returned when no data can be read without waiting for I/O requests
2772 * to complete; it doesn't prevent readahead.
2773 *
2774 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2775 * requests shall be made for the read or for readahead.  When no data
2776 * can be read, -EAGAIN shall be returned.  When readahead would be
2777 * triggered, a partial, possibly empty read shall be returned.
2778 *
2779 * Return:
2780 * * number of bytes copied, even for partial reads
2781 * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2782 */
2783ssize_t
2784generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2785{
2786	size_t count = iov_iter_count(iter);
2787	ssize_t retval = 0;
2788
2789	if (!count)
2790		return 0; /* skip atime */
2791
2792	if (iocb->ki_flags & IOCB_DIRECT) {
2793		struct file *file = iocb->ki_filp;
2794		struct address_space *mapping = file->f_mapping;
2795		struct inode *inode = mapping->host;
2796
2797		retval = kiocb_write_and_wait(iocb, count);
2798		if (retval < 0)
2799			return retval;
 
 
 
 
 
 
 
 
 
2800		file_accessed(file);
2801
2802		retval = mapping->a_ops->direct_IO(iocb, iter);
2803		if (retval >= 0) {
2804			iocb->ki_pos += retval;
2805			count -= retval;
2806		}
2807		if (retval != -EIOCBQUEUED)
2808			iov_iter_revert(iter, count - iov_iter_count(iter));
2809
2810		/*
2811		 * Btrfs can have a short DIO read if we encounter
2812		 * compressed extents, so if there was an error, or if
2813		 * we've already read everything we wanted to, or if
2814		 * there was a short read because we hit EOF, go ahead
2815		 * and return.  Otherwise fallthrough to buffered io for
2816		 * the rest of the read.  Buffered reads will not work for
2817		 * DAX files, so don't bother trying.
2818		 */
2819		if (retval < 0 || !count || IS_DAX(inode))
2820			return retval;
2821		if (iocb->ki_pos >= i_size_read(inode))
2822			return retval;
2823	}
2824
2825	return filemap_read(iocb, iter, retval);
2826}
2827EXPORT_SYMBOL(generic_file_read_iter);
2828
2829/*
2830 * Splice subpages from a folio into a pipe.
2831 */
2832size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
2833			      struct folio *folio, loff_t fpos, size_t size)
2834{
2835	struct page *page;
2836	size_t spliced = 0, offset = offset_in_folio(folio, fpos);
2837
2838	page = folio_page(folio, offset / PAGE_SIZE);
2839	size = min(size, folio_size(folio) - offset);
2840	offset %= PAGE_SIZE;
2841
2842	while (spliced < size &&
2843	       !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2844		struct pipe_buffer *buf = pipe_head_buf(pipe);
2845		size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced);
2846
2847		*buf = (struct pipe_buffer) {
2848			.ops	= &page_cache_pipe_buf_ops,
2849			.page	= page,
2850			.offset	= offset,
2851			.len	= part,
2852		};
2853		folio_get(folio);
2854		pipe->head++;
2855		page++;
2856		spliced += part;
2857		offset = 0;
2858	}
2859
2860	return spliced;
2861}
2862
2863/**
2864 * filemap_splice_read -  Splice data from a file's pagecache into a pipe
2865 * @in: The file to read from
2866 * @ppos: Pointer to the file position to read from
2867 * @pipe: The pipe to splice into
2868 * @len: The amount to splice
2869 * @flags: The SPLICE_F_* flags
2870 *
2871 * This function gets folios from a file's pagecache and splices them into the
2872 * pipe.  Readahead will be called as necessary to fill more folios.  This may
2873 * be used for blockdevs also.
2874 *
2875 * Return: On success, the number of bytes read will be returned and *@ppos
2876 * will be updated if appropriate; 0 will be returned if there is no more data
2877 * to be read; -EAGAIN will be returned if the pipe had no space, and some
2878 * other negative error code will be returned on error.  A short read may occur
2879 * if the pipe has insufficient space, we reach the end of the data or we hit a
2880 * hole.
2881 */
2882ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
2883			    struct pipe_inode_info *pipe,
2884			    size_t len, unsigned int flags)
2885{
2886	struct folio_batch fbatch;
2887	struct kiocb iocb;
2888	size_t total_spliced = 0, used, npages;
2889	loff_t isize, end_offset;
2890	bool writably_mapped;
2891	int i, error = 0;
2892
2893	if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes))
2894		return 0;
2895
2896	init_sync_kiocb(&iocb, in);
2897	iocb.ki_pos = *ppos;
2898
2899	/* Work out how much data we can actually add into the pipe */
2900	used = pipe_occupancy(pipe->head, pipe->tail);
2901	npages = max_t(ssize_t, pipe->max_usage - used, 0);
2902	len = min_t(size_t, len, npages * PAGE_SIZE);
2903
2904	folio_batch_init(&fbatch);
2905
2906	do {
2907		cond_resched();
2908
2909		if (*ppos >= i_size_read(in->f_mapping->host))
2910			break;
2911
2912		iocb.ki_pos = *ppos;
2913		error = filemap_get_pages(&iocb, len, &fbatch, true);
2914		if (error < 0)
2915			break;
2916
2917		/*
2918		 * i_size must be checked after we know the pages are Uptodate.
2919		 *
2920		 * Checking i_size after the check allows us to calculate
2921		 * the correct value for "nr", which means the zero-filled
2922		 * part of the page is not copied back to userspace (unless
2923		 * another truncate extends the file - this is desired though).
2924		 */
2925		isize = i_size_read(in->f_mapping->host);
2926		if (unlikely(*ppos >= isize))
2927			break;
2928		end_offset = min_t(loff_t, isize, *ppos + len);
2929
2930		/*
2931		 * Once we start copying data, we don't want to be touching any
2932		 * cachelines that might be contended:
2933		 */
2934		writably_mapped = mapping_writably_mapped(in->f_mapping);
2935
2936		for (i = 0; i < folio_batch_count(&fbatch); i++) {
2937			struct folio *folio = fbatch.folios[i];
2938			size_t n;
2939
2940			if (folio_pos(folio) >= end_offset)
2941				goto out;
2942			folio_mark_accessed(folio);
2943
2944			/*
2945			 * If users can be writing to this folio using arbitrary
2946			 * virtual addresses, take care of potential aliasing
2947			 * before reading the folio on the kernel side.
2948			 */
2949			if (writably_mapped)
2950				flush_dcache_folio(folio);
2951
2952			n = min_t(loff_t, len, isize - *ppos);
2953			n = splice_folio_into_pipe(pipe, folio, *ppos, n);
2954			if (!n)
2955				goto out;
2956			len -= n;
2957			total_spliced += n;
2958			*ppos += n;
2959			in->f_ra.prev_pos = *ppos;
2960			if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
2961				goto out;
2962		}
2963
2964		folio_batch_release(&fbatch);
2965	} while (len);
2966
2967out:
2968	folio_batch_release(&fbatch);
2969	file_accessed(in);
2970
2971	return total_spliced ? total_spliced : error;
2972}
2973EXPORT_SYMBOL(filemap_splice_read);
2974
2975static inline loff_t folio_seek_hole_data(struct xa_state *xas,
2976		struct address_space *mapping, struct folio *folio,
2977		loff_t start, loff_t end, bool seek_data)
2978{
2979	const struct address_space_operations *ops = mapping->a_ops;
2980	size_t offset, bsz = i_blocksize(mapping->host);
2981
2982	if (xa_is_value(folio) || folio_test_uptodate(folio))
2983		return seek_data ? start : end;
2984	if (!ops->is_partially_uptodate)
2985		return seek_data ? end : start;
2986
2987	xas_pause(xas);
2988	rcu_read_unlock();
2989	folio_lock(folio);
2990	if (unlikely(folio->mapping != mapping))
2991		goto unlock;
2992
2993	offset = offset_in_folio(folio, start) & ~(bsz - 1);
2994
2995	do {
2996		if (ops->is_partially_uptodate(folio, offset, bsz) ==
2997							seek_data)
2998			break;
2999		start = (start + bsz) & ~((u64)bsz - 1);
3000		offset += bsz;
3001	} while (offset < folio_size(folio));
3002unlock:
3003	folio_unlock(folio);
3004	rcu_read_lock();
3005	return start;
3006}
3007
3008static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
3009{
3010	if (xa_is_value(folio))
3011		return PAGE_SIZE << xas_get_order(xas);
3012	return folio_size(folio);
3013}
3014
3015/**
3016 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
3017 * @mapping: Address space to search.
3018 * @start: First byte to consider.
3019 * @end: Limit of search (exclusive).
3020 * @whence: Either SEEK_HOLE or SEEK_DATA.
3021 *
3022 * If the page cache knows which blocks contain holes and which blocks
3023 * contain data, your filesystem can use this function to implement
3024 * SEEK_HOLE and SEEK_DATA.  This is useful for filesystems which are
3025 * entirely memory-based such as tmpfs, and filesystems which support
3026 * unwritten extents.
3027 *
3028 * Return: The requested offset on success, or -ENXIO if @whence specifies
3029 * SEEK_DATA and there is no data after @start.  There is an implicit hole
3030 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
3031 * and @end contain data.
3032 */
3033loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
3034		loff_t end, int whence)
3035{
3036	XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
3037	pgoff_t max = (end - 1) >> PAGE_SHIFT;
3038	bool seek_data = (whence == SEEK_DATA);
3039	struct folio *folio;
3040
3041	if (end <= start)
3042		return -ENXIO;
3043
3044	rcu_read_lock();
3045	while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
3046		loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
3047		size_t seek_size;
3048
3049		if (start < pos) {
3050			if (!seek_data)
3051				goto unlock;
3052			start = pos;
3053		}
3054
3055		seek_size = seek_folio_size(&xas, folio);
3056		pos = round_up((u64)pos + 1, seek_size);
3057		start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
3058				seek_data);
3059		if (start < pos)
3060			goto unlock;
3061		if (start >= end)
3062			break;
3063		if (seek_size > PAGE_SIZE)
3064			xas_set(&xas, pos >> PAGE_SHIFT);
3065		if (!xa_is_value(folio))
3066			folio_put(folio);
3067	}
3068	if (seek_data)
3069		start = -ENXIO;
3070unlock:
3071	rcu_read_unlock();
3072	if (folio && !xa_is_value(folio))
3073		folio_put(folio);
3074	if (start > end)
3075		return end;
3076	return start;
3077}
3078
3079#ifdef CONFIG_MMU
3080#define MMAP_LOTSAMISS  (100)
3081/*
3082 * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
3083 * @vmf - the vm_fault for this fault.
3084 * @folio - the folio to lock.
3085 * @fpin - the pointer to the file we may pin (or is already pinned).
3086 *
3087 * This works similar to lock_folio_or_retry in that it can drop the
3088 * mmap_lock.  It differs in that it actually returns the folio locked
3089 * if it returns 1 and 0 if it couldn't lock the folio.  If we did have
3090 * to drop the mmap_lock then fpin will point to the pinned file and
3091 * needs to be fput()'ed at a later point.
3092 */
3093static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
3094				     struct file **fpin)
3095{
3096	if (folio_trylock(folio))
3097		return 1;
3098
3099	/*
3100	 * NOTE! This will make us return with VM_FAULT_RETRY, but with
3101	 * the fault lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
3102	 * is supposed to work. We have way too many special cases..
3103	 */
3104	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
3105		return 0;
3106
3107	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
3108	if (vmf->flags & FAULT_FLAG_KILLABLE) {
3109		if (__folio_lock_killable(folio)) {
3110			/*
3111			 * We didn't have the right flags to drop the
3112			 * fault lock, but all fault_handlers only check
3113			 * for fatal signals if we return VM_FAULT_RETRY,
3114			 * so we need to drop the fault lock here and
3115			 * return 0 if we don't have a fpin.
3116			 */
3117			if (*fpin == NULL)
3118				release_fault_lock(vmf);
3119			return 0;
3120		}
3121	} else
3122		__folio_lock(folio);
3123
3124	return 1;
3125}
3126
3127/*
3128 * Synchronous readahead happens when we don't even find a page in the page
3129 * cache at all.  We don't want to perform IO under the mmap sem, so if we have
3130 * to drop the mmap sem we return the file that was pinned in order for us to do
3131 * that.  If we didn't pin a file then we return NULL.  The file that is
3132 * returned needs to be fput()'ed when we're done with it.
3133 */
3134static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
3135{
3136	struct file *file = vmf->vma->vm_file;
3137	struct file_ra_state *ra = &file->f_ra;
3138	struct address_space *mapping = file->f_mapping;
3139	DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
3140	struct file *fpin = NULL;
3141	unsigned long vm_flags = vmf->vma->vm_flags;
3142	unsigned int mmap_miss;
3143
3144#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3145	/* Use the readahead code, even if readahead is disabled */
3146	if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) {
3147		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3148		ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
3149		ra->size = HPAGE_PMD_NR;
3150		/*
3151		 * Fetch two PMD folios, so we get the chance to actually
3152		 * readahead, unless we've been told not to.
3153		 */
3154		if (!(vm_flags & VM_RAND_READ))
3155			ra->size *= 2;
3156		ra->async_size = HPAGE_PMD_NR;
3157		page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
3158		return fpin;
3159	}
3160#endif
3161
3162	/* If we don't want any read-ahead, don't bother */
3163	if (vm_flags & VM_RAND_READ)
3164		return fpin;
3165	if (!ra->ra_pages)
3166		return fpin;
3167
3168	if (vm_flags & VM_SEQ_READ) {
3169		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3170		page_cache_sync_ra(&ractl, ra->ra_pages);
3171		return fpin;
3172	}
3173
3174	/* Avoid banging the cache line if not needed */
3175	mmap_miss = READ_ONCE(ra->mmap_miss);
3176	if (mmap_miss < MMAP_LOTSAMISS * 10)
3177		WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
3178
3179	/*
3180	 * Do we miss much more than hit in this file? If so,
3181	 * stop bothering with read-ahead. It will only hurt.
3182	 */
3183	if (mmap_miss > MMAP_LOTSAMISS)
3184		return fpin;
3185
3186	/*
3187	 * mmap read-around
3188	 */
3189	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3190	ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
3191	ra->size = ra->ra_pages;
3192	ra->async_size = ra->ra_pages / 4;
3193	ractl._index = ra->start;
3194	page_cache_ra_order(&ractl, ra, 0);
3195	return fpin;
3196}
3197
3198/*
3199 * Asynchronous readahead happens when we find the page and PG_readahead,
3200 * so we want to possibly extend the readahead further.  We return the file that
3201 * was pinned if we have to drop the mmap_lock in order to do IO.
3202 */
3203static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3204					    struct folio *folio)
3205{
3206	struct file *file = vmf->vma->vm_file;
3207	struct file_ra_state *ra = &file->f_ra;
3208	DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3209	struct file *fpin = NULL;
3210	unsigned int mmap_miss;
3211
3212	/* If we don't want any read-ahead, don't bother */
3213	if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
3214		return fpin;
3215
3216	mmap_miss = READ_ONCE(ra->mmap_miss);
3217	if (mmap_miss)
3218		WRITE_ONCE(ra->mmap_miss, --mmap_miss);
3219
3220	if (folio_test_readahead(folio)) {
3221		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3222		page_cache_async_ra(&ractl, folio, ra->ra_pages);
3223	}
3224	return fpin;
3225}
3226
3227static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf)
3228{
3229	struct vm_area_struct *vma = vmf->vma;
3230	vm_fault_t ret = 0;
3231	pte_t *ptep;
3232
3233	/*
3234	 * We might have COW'ed a pagecache folio and might now have an mlocked
3235	 * anon folio mapped. The original pagecache folio is not mlocked and
3236	 * might have been evicted. During a read+clear/modify/write update of
3237	 * the PTE, such as done in do_numa_page()/change_pte_range(), we
3238	 * temporarily clear the PTE under PT lock and might detect it here as
3239	 * "none" when not holding the PT lock.
3240	 *
3241	 * Not rechecking the PTE under PT lock could result in an unexpected
3242	 * major fault in an mlock'ed region. Recheck only for this special
3243	 * scenario while holding the PT lock, to not degrade non-mlocked
3244	 * scenarios. Recheck the PTE without PT lock firstly, thereby reducing
3245	 * the number of times we hold PT lock.
3246	 */
3247	if (!(vma->vm_flags & VM_LOCKED))
3248		return 0;
3249
3250	if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
3251		return 0;
3252
3253	ptep = pte_offset_map_ro_nolock(vma->vm_mm, vmf->pmd, vmf->address,
3254					&vmf->ptl);
3255	if (unlikely(!ptep))
3256		return VM_FAULT_NOPAGE;
3257
3258	if (unlikely(!pte_none(ptep_get_lockless(ptep)))) {
3259		ret = VM_FAULT_NOPAGE;
3260	} else {
3261		spin_lock(vmf->ptl);
3262		if (unlikely(!pte_none(ptep_get(ptep))))
3263			ret = VM_FAULT_NOPAGE;
3264		spin_unlock(vmf->ptl);
3265	}
3266	pte_unmap(ptep);
3267	return ret;
3268}
3269
3270/**
3271 * filemap_fault - read in file data for page fault handling
3272 * @vmf:	struct vm_fault containing details of the fault
3273 *
3274 * filemap_fault() is invoked via the vma operations vector for a
3275 * mapped memory region to read in file data during a page fault.
3276 *
3277 * The goto's are kind of ugly, but this streamlines the normal case of having
3278 * it in the page cache, and handles the special cases reasonably without
3279 * having a lot of duplicated code.
3280 *
3281 * vma->vm_mm->mmap_lock must be held on entry.
3282 *
3283 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
3284 * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
3285 *
3286 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
3287 * has not been released.
3288 *
3289 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
3290 *
3291 * Return: bitwise-OR of %VM_FAULT_ codes.
3292 */
3293vm_fault_t filemap_fault(struct vm_fault *vmf)
3294{
3295	int error;
3296	struct file *file = vmf->vma->vm_file;
3297	struct file *fpin = NULL;
3298	struct address_space *mapping = file->f_mapping;
3299	struct inode *inode = mapping->host;
3300	pgoff_t max_idx, index = vmf->pgoff;
3301	struct folio *folio;
3302	vm_fault_t ret = 0;
3303	bool mapping_locked = false;
3304
3305	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3306	if (unlikely(index >= max_idx))
3307		return VM_FAULT_SIGBUS;
3308
3309	trace_mm_filemap_fault(mapping, index);
3310
3311	/*
3312	 * Do we have something in the page cache already?
3313	 */
3314	folio = filemap_get_folio(mapping, index);
3315	if (likely(!IS_ERR(folio))) {
3316		/*
3317		 * We found the page, so try async readahead before waiting for
3318		 * the lock.
3319		 */
3320		if (!(vmf->flags & FAULT_FLAG_TRIED))
3321			fpin = do_async_mmap_readahead(vmf, folio);
3322		if (unlikely(!folio_test_uptodate(folio))) {
3323			filemap_invalidate_lock_shared(mapping);
3324			mapping_locked = true;
3325		}
3326	} else {
3327		ret = filemap_fault_recheck_pte_none(vmf);
3328		if (unlikely(ret))
3329			return ret;
3330
3331		/* No page in the page cache at all */
3332		count_vm_event(PGMAJFAULT);
3333		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
3334		ret = VM_FAULT_MAJOR;
3335		fpin = do_sync_mmap_readahead(vmf);
3336retry_find:
3337		/*
3338		 * See comment in filemap_create_folio() why we need
3339		 * invalidate_lock
3340		 */
3341		if (!mapping_locked) {
3342			filemap_invalidate_lock_shared(mapping);
3343			mapping_locked = true;
3344		}
3345		folio = __filemap_get_folio(mapping, index,
3346					  FGP_CREAT|FGP_FOR_MMAP,
3347					  vmf->gfp_mask);
3348		if (IS_ERR(folio)) {
3349			if (fpin)
3350				goto out_retry;
3351			filemap_invalidate_unlock_shared(mapping);
3352			return VM_FAULT_OOM;
3353		}
3354	}
3355
3356	if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3357		goto out_retry;
3358
3359	/* Did it get truncated? */
3360	if (unlikely(folio->mapping != mapping)) {
3361		folio_unlock(folio);
3362		folio_put(folio);
3363		goto retry_find;
3364	}
3365	VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3366
3367	/*
3368	 * We have a locked folio in the page cache, now we need to check
3369	 * that it's up-to-date. If not, it is going to be due to an error,
3370	 * or because readahead was otherwise unable to retrieve it.
3371	 */
3372	if (unlikely(!folio_test_uptodate(folio))) {
3373		/*
3374		 * If the invalidate lock is not held, the folio was in cache
3375		 * and uptodate and now it is not. Strange but possible since we
3376		 * didn't hold the page lock all the time. Let's drop
3377		 * everything, get the invalidate lock and try again.
3378		 */
3379		if (!mapping_locked) {
3380			folio_unlock(folio);
3381			folio_put(folio);
3382			goto retry_find;
3383		}
3384
3385		/*
3386		 * OK, the folio is really not uptodate. This can be because the
3387		 * VMA has the VM_RAND_READ flag set, or because an error
3388		 * arose. Let's read it in directly.
3389		 */
3390		goto page_not_uptodate;
3391	}
3392
3393	/*
3394	 * We've made it this far and we had to drop our mmap_lock, now is the
3395	 * time to return to the upper layer and have it re-find the vma and
3396	 * redo the fault.
3397	 */
3398	if (fpin) {
3399		folio_unlock(folio);
3400		goto out_retry;
3401	}
3402	if (mapping_locked)
3403		filemap_invalidate_unlock_shared(mapping);
3404
3405	/*
3406	 * Found the page and have a reference on it.
3407	 * We must recheck i_size under page lock.
3408	 */
3409	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3410	if (unlikely(index >= max_idx)) {
3411		folio_unlock(folio);
3412		folio_put(folio);
3413		return VM_FAULT_SIGBUS;
3414	}
3415
3416	vmf->page = folio_file_page(folio, index);
3417	return ret | VM_FAULT_LOCKED;
3418
3419page_not_uptodate:
3420	/*
3421	 * Umm, take care of errors if the page isn't up-to-date.
3422	 * Try to re-read it _once_. We do this synchronously,
3423	 * because there really aren't any performance issues here
3424	 * and we need to check for errors.
3425	 */
3426	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3427	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3428	if (fpin)
3429		goto out_retry;
3430	folio_put(folio);
3431
3432	if (!error || error == AOP_TRUNCATED_PAGE)
3433		goto retry_find;
3434	filemap_invalidate_unlock_shared(mapping);
3435
3436	return VM_FAULT_SIGBUS;
3437
3438out_retry:
3439	/*
3440	 * We dropped the mmap_lock, we need to return to the fault handler to
3441	 * re-find the vma and come back and find our hopefully still populated
3442	 * page.
3443	 */
3444	if (!IS_ERR(folio))
3445		folio_put(folio);
3446	if (mapping_locked)
3447		filemap_invalidate_unlock_shared(mapping);
3448	if (fpin)
3449		fput(fpin);
3450	return ret | VM_FAULT_RETRY;
3451}
3452EXPORT_SYMBOL(filemap_fault);
3453
3454static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
3455		pgoff_t start)
3456{
3457	struct mm_struct *mm = vmf->vma->vm_mm;
3458
3459	/* Huge page is mapped? No need to proceed. */
3460	if (pmd_trans_huge(*vmf->pmd)) {
3461		folio_unlock(folio);
3462		folio_put(folio);
3463		return true;
3464	}
3465
3466	if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
3467		struct page *page = folio_file_page(folio, start);
3468		vm_fault_t ret = do_set_pmd(vmf, page);
3469		if (!ret) {
3470			/* The page is mapped successfully, reference consumed. */
3471			folio_unlock(folio);
3472			return true;
3473		}
3474	}
3475
3476	if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
3477		pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
3478
 
 
 
 
 
 
 
3479	return false;
3480}
3481
3482static struct folio *next_uptodate_folio(struct xa_state *xas,
3483		struct address_space *mapping, pgoff_t end_pgoff)
 
3484{
3485	struct folio *folio = xas_next_entry(xas, end_pgoff);
3486	unsigned long max_idx;
3487
3488	do {
3489		if (!folio)
3490			return NULL;
3491		if (xas_retry(xas, folio))
3492			continue;
3493		if (xa_is_value(folio))
3494			continue;
3495		if (!folio_try_get(folio))
 
 
3496			continue;
3497		if (folio_test_locked(folio))
3498			goto skip;
3499		/* Has the page moved or been split? */
3500		if (unlikely(folio != xas_reload(xas)))
3501			goto skip;
3502		if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3503			goto skip;
3504		if (!folio_trylock(folio))
3505			goto skip;
3506		if (folio->mapping != mapping)
3507			goto unlock;
3508		if (!folio_test_uptodate(folio))
3509			goto unlock;
3510		max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3511		if (xas->xa_index >= max_idx)
3512			goto unlock;
3513		return folio;
3514unlock:
3515		folio_unlock(folio);
3516skip:
3517		folio_put(folio);
3518	} while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3519
3520	return NULL;
3521}
3522
3523/*
3524 * Map page range [start_page, start_page + nr_pages) of folio.
3525 * start_page is gotten from start by folio_page(folio, start)
3526 */
3527static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
3528			struct folio *folio, unsigned long start,
3529			unsigned long addr, unsigned int nr_pages,
3530			unsigned long *rss, unsigned int *mmap_miss)
3531{
3532	vm_fault_t ret = 0;
3533	struct page *page = folio_page(folio, start);
3534	unsigned int count = 0;
3535	pte_t *old_ptep = vmf->pte;
3536
3537	do {
3538		if (PageHWPoison(page + count))
3539			goto skip;
3540
3541		/*
3542		 * If there are too many folios that are recently evicted
3543		 * in a file, they will probably continue to be evicted.
3544		 * In such situation, read-ahead is only a waste of IO.
3545		 * Don't decrease mmap_miss in this scenario to make sure
3546		 * we can stop read-ahead.
3547		 */
3548		if (!folio_test_workingset(folio))
3549			(*mmap_miss)++;
3550
3551		/*
3552		 * NOTE: If there're PTE markers, we'll leave them to be
3553		 * handled in the specific fault path, and it'll prohibit the
3554		 * fault-around logic.
3555		 */
3556		if (!pte_none(ptep_get(&vmf->pte[count])))
3557			goto skip;
3558
3559		count++;
3560		continue;
3561skip:
3562		if (count) {
3563			set_pte_range(vmf, folio, page, count, addr);
3564			*rss += count;
3565			folio_ref_add(folio, count);
3566			if (in_range(vmf->address, addr, count * PAGE_SIZE))
3567				ret = VM_FAULT_NOPAGE;
3568		}
3569
3570		count++;
3571		page += count;
3572		vmf->pte += count;
3573		addr += count * PAGE_SIZE;
3574		count = 0;
3575	} while (--nr_pages > 0);
3576
3577	if (count) {
3578		set_pte_range(vmf, folio, page, count, addr);
3579		*rss += count;
3580		folio_ref_add(folio, count);
3581		if (in_range(vmf->address, addr, count * PAGE_SIZE))
3582			ret = VM_FAULT_NOPAGE;
3583	}
3584
3585	vmf->pte = old_ptep;
3586
3587	return ret;
3588}
3589
3590static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
3591		struct folio *folio, unsigned long addr,
3592		unsigned long *rss, unsigned int *mmap_miss)
3593{
3594	vm_fault_t ret = 0;
3595	struct page *page = &folio->page;
3596
3597	if (PageHWPoison(page))
3598		return ret;
3599
3600	/* See comment of filemap_map_folio_range() */
3601	if (!folio_test_workingset(folio))
3602		(*mmap_miss)++;
3603
3604	/*
3605	 * NOTE: If there're PTE markers, we'll leave them to be
3606	 * handled in the specific fault path, and it'll prohibit
3607	 * the fault-around logic.
3608	 */
3609	if (!pte_none(ptep_get(vmf->pte)))
3610		return ret;
3611
3612	if (vmf->address == addr)
3613		ret = VM_FAULT_NOPAGE;
3614
3615	set_pte_range(vmf, folio, page, 1, addr);
3616	(*rss)++;
3617	folio_ref_inc(folio);
3618
3619	return ret;
3620}
3621
3622vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3623			     pgoff_t start_pgoff, pgoff_t end_pgoff)
3624{
3625	struct vm_area_struct *vma = vmf->vma;
3626	struct file *file = vma->vm_file;
3627	struct address_space *mapping = file->f_mapping;
3628	pgoff_t file_end, last_pgoff = start_pgoff;
3629	unsigned long addr;
3630	XA_STATE(xas, &mapping->i_pages, start_pgoff);
3631	struct folio *folio;
 
 
3632	vm_fault_t ret = 0;
3633	unsigned long rss = 0;
3634	unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved, folio_type;
3635
3636	rcu_read_lock();
3637	folio = next_uptodate_folio(&xas, mapping, end_pgoff);
3638	if (!folio)
3639		goto out;
3640
3641	if (filemap_map_pmd(vmf, folio, start_pgoff)) {
3642		ret = VM_FAULT_NOPAGE;
3643		goto out;
3644	}
3645
3646	addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3647	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3648	if (!vmf->pte) {
3649		folio_unlock(folio);
3650		folio_put(folio);
3651		goto out;
3652	}
3653
3654	file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
3655	if (end_pgoff > file_end)
3656		end_pgoff = file_end;
3657
3658	folio_type = mm_counter_file(folio);
3659	do {
3660		unsigned long end;
3661
3662		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3663		vmf->pte += xas.xa_index - last_pgoff;
3664		last_pgoff = xas.xa_index;
3665		end = folio_next_index(folio) - 1;
3666		nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
3667
3668		if (!folio_test_large(folio))
3669			ret |= filemap_map_order0_folio(vmf,
3670					folio, addr, &rss, &mmap_miss);
3671		else
3672			ret |= filemap_map_folio_range(vmf, folio,
3673					xas.xa_index - folio->index, addr,
3674					nr_pages, &rss, &mmap_miss);
 
 
 
 
3675
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3676		folio_unlock(folio);
3677		folio_put(folio);
3678	} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
3679	add_mm_counter(vma->vm_mm, folio_type, rss);
3680	pte_unmap_unlock(vmf->pte, vmf->ptl);
3681	trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff);
3682out:
3683	rcu_read_unlock();
3684
3685	mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
3686	if (mmap_miss >= mmap_miss_saved)
3687		WRITE_ONCE(file->f_ra.mmap_miss, 0);
3688	else
3689		WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
3690
3691	return ret;
3692}
3693EXPORT_SYMBOL(filemap_map_pages);
3694
3695vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3696{
3697	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3698	struct folio *folio = page_folio(vmf->page);
3699	vm_fault_t ret = VM_FAULT_LOCKED;
3700
3701	sb_start_pagefault(mapping->host->i_sb);
3702	file_update_time(vmf->vma->vm_file);
3703	folio_lock(folio);
3704	if (folio->mapping != mapping) {
3705		folio_unlock(folio);
3706		ret = VM_FAULT_NOPAGE;
3707		goto out;
3708	}
3709	/*
3710	 * We mark the folio dirty already here so that when freeze is in
3711	 * progress, we are guaranteed that writeback during freezing will
3712	 * see the dirty folio and writeprotect it again.
3713	 */
3714	folio_mark_dirty(folio);
3715	folio_wait_stable(folio);
3716out:
3717	sb_end_pagefault(mapping->host->i_sb);
3718	return ret;
3719}
3720
3721const struct vm_operations_struct generic_file_vm_ops = {
3722	.fault		= filemap_fault,
3723	.map_pages	= filemap_map_pages,
3724	.page_mkwrite	= filemap_page_mkwrite,
3725};
3726
3727/* This is used for a general mmap of a disk file */
3728
3729int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3730{
3731	struct address_space *mapping = file->f_mapping;
3732
3733	if (!mapping->a_ops->read_folio)
3734		return -ENOEXEC;
3735	file_accessed(file);
3736	vma->vm_ops = &generic_file_vm_ops;
3737	return 0;
3738}
3739
3740/*
3741 * This is for filesystems which do not implement ->writepage.
3742 */
3743int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3744{
3745	if (vma_is_shared_maywrite(vma))
3746		return -EINVAL;
3747	return generic_file_mmap(file, vma);
3748}
3749#else
3750vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3751{
3752	return VM_FAULT_SIGBUS;
3753}
3754int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3755{
3756	return -ENOSYS;
3757}
3758int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3759{
3760	return -ENOSYS;
3761}
3762#endif /* CONFIG_MMU */
3763
3764EXPORT_SYMBOL(filemap_page_mkwrite);
3765EXPORT_SYMBOL(generic_file_mmap);
3766EXPORT_SYMBOL(generic_file_readonly_mmap);
3767
3768static struct folio *do_read_cache_folio(struct address_space *mapping,
3769		pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
3770{
3771	struct folio *folio;
3772	int err;
3773
3774	if (!filler)
3775		filler = mapping->a_ops->read_folio;
3776repeat:
3777	folio = filemap_get_folio(mapping, index);
3778	if (IS_ERR(folio)) {
3779		folio = filemap_alloc_folio(gfp,
3780					    mapping_min_folio_order(mapping));
3781		if (!folio)
3782			return ERR_PTR(-ENOMEM);
3783		index = mapping_align_index(mapping, index);
3784		err = filemap_add_folio(mapping, folio, index, gfp);
3785		if (unlikely(err)) {
3786			folio_put(folio);
3787			if (err == -EEXIST)
3788				goto repeat;
3789			/* Presumably ENOMEM for xarray node */
3790			return ERR_PTR(err);
3791		}
3792
3793		goto filler;
3794	}
3795	if (folio_test_uptodate(folio))
3796		goto out;
3797
3798	if (!folio_trylock(folio)) {
3799		folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3800		goto repeat;
3801	}
3802
3803	/* Folio was truncated from mapping */
3804	if (!folio->mapping) {
3805		folio_unlock(folio);
3806		folio_put(folio);
3807		goto repeat;
3808	}
3809
3810	/* Someone else locked and filled the page in a very small window */
3811	if (folio_test_uptodate(folio)) {
3812		folio_unlock(folio);
3813		goto out;
3814	}
3815
3816filler:
3817	err = filemap_read_folio(file, filler, folio);
3818	if (err) {
3819		folio_put(folio);
3820		if (err == AOP_TRUNCATED_PAGE)
3821			goto repeat;
3822		return ERR_PTR(err);
3823	}
3824
3825out:
3826	folio_mark_accessed(folio);
3827	return folio;
3828}
3829
3830/**
3831 * read_cache_folio - Read into page cache, fill it if needed.
3832 * @mapping: The address_space to read from.
3833 * @index: The index to read.
3834 * @filler: Function to perform the read, or NULL to use aops->read_folio().
3835 * @file: Passed to filler function, may be NULL if not required.
3836 *
3837 * Read one page into the page cache.  If it succeeds, the folio returned
3838 * will contain @index, but it may not be the first page of the folio.
3839 *
3840 * If the filler function returns an error, it will be returned to the
3841 * caller.
3842 *
3843 * Context: May sleep.  Expects mapping->invalidate_lock to be held.
3844 * Return: An uptodate folio on success, ERR_PTR() on failure.
3845 */
3846struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3847		filler_t filler, struct file *file)
3848{
3849	return do_read_cache_folio(mapping, index, filler, file,
3850			mapping_gfp_mask(mapping));
3851}
3852EXPORT_SYMBOL(read_cache_folio);
3853
3854/**
3855 * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
3856 * @mapping:	The address_space for the folio.
3857 * @index:	The index that the allocated folio will contain.
3858 * @gfp:	The page allocator flags to use if allocating.
3859 *
3860 * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with
3861 * any new memory allocations done using the specified allocation flags.
3862 *
3863 * The most likely error from this function is EIO, but ENOMEM is
3864 * possible and so is EINTR.  If ->read_folio returns another error,
3865 * that will be returned to the caller.
3866 *
3867 * The function expects mapping->invalidate_lock to be already held.
3868 *
3869 * Return: Uptodate folio on success, ERR_PTR() on failure.
3870 */
3871struct folio *mapping_read_folio_gfp(struct address_space *mapping,
3872		pgoff_t index, gfp_t gfp)
3873{
3874	return do_read_cache_folio(mapping, index, NULL, NULL, gfp);
3875}
3876EXPORT_SYMBOL(mapping_read_folio_gfp);
3877
3878static struct page *do_read_cache_page(struct address_space *mapping,
3879		pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
3880{
3881	struct folio *folio;
3882
3883	folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3884	if (IS_ERR(folio))
3885		return &folio->page;
3886	return folio_file_page(folio, index);
3887}
3888
3889struct page *read_cache_page(struct address_space *mapping,
3890			pgoff_t index, filler_t *filler, struct file *file)
3891{
3892	return do_read_cache_page(mapping, index, filler, file,
3893			mapping_gfp_mask(mapping));
3894}
3895EXPORT_SYMBOL(read_cache_page);
3896
3897/**
3898 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3899 * @mapping:	the page's address_space
3900 * @index:	the page index
3901 * @gfp:	the page allocator flags to use if allocating
3902 *
3903 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3904 * any new page allocations done using the specified allocation flags.
3905 *
3906 * If the page does not get brought uptodate, return -EIO.
3907 *
3908 * The function expects mapping->invalidate_lock to be already held.
3909 *
3910 * Return: up to date page on success, ERR_PTR() on failure.
3911 */
3912struct page *read_cache_page_gfp(struct address_space *mapping,
3913				pgoff_t index,
3914				gfp_t gfp)
3915{
3916	return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3917}
3918EXPORT_SYMBOL(read_cache_page_gfp);
3919
3920/*
3921 * Warn about a page cache invalidation failure during a direct I/O write.
3922 */
3923static void dio_warn_stale_pagecache(struct file *filp)
3924{
3925	static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3926	char pathname[128];
3927	char *path;
3928
3929	errseq_set(&filp->f_mapping->wb_err, -EIO);
3930	if (__ratelimit(&_rs)) {
3931		path = file_path(filp, pathname, sizeof(pathname));
3932		if (IS_ERR(path))
3933			path = "(unknown)";
3934		pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
3935		pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3936			current->comm);
3937	}
3938}
3939
3940void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count)
 
3941{
3942	struct address_space *mapping = iocb->ki_filp->f_mapping;
 
 
 
 
 
 
3943
3944	if (mapping->nrpages &&
3945	    invalidate_inode_pages2_range(mapping,
3946			iocb->ki_pos >> PAGE_SHIFT,
3947			(iocb->ki_pos + count - 1) >> PAGE_SHIFT))
3948		dio_warn_stale_pagecache(iocb->ki_filp);
3949}
3950
3951ssize_t
3952generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3953{
3954	struct address_space *mapping = iocb->ki_filp->f_mapping;
3955	size_t write_len = iov_iter_count(from);
3956	ssize_t written;
 
 
 
 
 
3957
3958	/*
 
 
 
 
 
 
 
 
3959	 * If a page can not be invalidated, return 0 to fall back
3960	 * to buffered write.
3961	 */
3962	written = kiocb_invalidate_pages(iocb, write_len);
3963	if (written) {
3964		if (written == -EBUSY)
3965			return 0;
3966		return written;
3967	}
3968
3969	written = mapping->a_ops->direct_IO(iocb, from);
3970
3971	/*
3972	 * Finally, try again to invalidate clean pages which might have been
3973	 * cached by non-direct readahead, or faulted in by get_user_pages()
3974	 * if the source of the write was an mmap'ed region of the file
3975	 * we're writing.  Either one is a pretty crazy thing to do,
3976	 * so we don't support it 100%.  If this invalidation
3977	 * fails, tough, the write still worked...
3978	 *
3979	 * Most of the time we do not need this since dio_complete() will do
3980	 * the invalidation for us. However there are some file systems that
3981	 * do not end up with dio_complete() being called, so let's not break
3982	 * them by removing it completely.
3983	 *
3984	 * Noticeable example is a blkdev_direct_IO().
3985	 *
3986	 * Skip invalidation for async writes or if mapping has no pages.
3987	 */
 
 
 
 
3988	if (written > 0) {
3989		struct inode *inode = mapping->host;
3990		loff_t pos = iocb->ki_pos;
3991
3992		kiocb_invalidate_post_direct_write(iocb, written);
3993		pos += written;
3994		write_len -= written;
3995		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3996			i_size_write(inode, pos);
3997			mark_inode_dirty(inode);
3998		}
3999		iocb->ki_pos = pos;
4000	}
4001	if (written != -EIOCBQUEUED)
4002		iov_iter_revert(from, write_len - iov_iter_count(from));
 
4003	return written;
4004}
4005EXPORT_SYMBOL(generic_file_direct_write);
4006
4007ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
4008{
4009	struct file *file = iocb->ki_filp;
4010	loff_t pos = iocb->ki_pos;
4011	struct address_space *mapping = file->f_mapping;
4012	const struct address_space_operations *a_ops = mapping->a_ops;
4013	size_t chunk = mapping_max_folio_size(mapping);
4014	long status = 0;
4015	ssize_t written = 0;
4016
4017	do {
4018		struct folio *folio;
4019		size_t offset;		/* Offset into folio */
4020		size_t bytes;		/* Bytes to write to folio */
4021		size_t copied;		/* Bytes copied from user */
4022		void *fsdata = NULL;
4023
4024		bytes = iov_iter_count(i);
4025retry:
4026		offset = pos & (chunk - 1);
4027		bytes = min(chunk - offset, bytes);
4028		balance_dirty_pages_ratelimited(mapping);
4029
 
4030		/*
4031		 * Bring in the user page that we will copy from _first_.
4032		 * Otherwise there's a nasty deadlock on copying from the
4033		 * same page as we're writing to, without it being marked
4034		 * up-to-date.
4035		 */
4036		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
4037			status = -EFAULT;
4038			break;
4039		}
4040
4041		if (fatal_signal_pending(current)) {
4042			status = -EINTR;
4043			break;
4044		}
4045
4046		status = a_ops->write_begin(file, mapping, pos, bytes,
4047						&folio, &fsdata);
4048		if (unlikely(status < 0))
4049			break;
4050
4051		offset = offset_in_folio(folio, pos);
4052		if (bytes > folio_size(folio) - offset)
4053			bytes = folio_size(folio) - offset;
4054
4055		if (mapping_writably_mapped(mapping))
4056			flush_dcache_folio(folio);
4057
4058		copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
4059		flush_dcache_folio(folio);
4060
4061		status = a_ops->write_end(file, mapping, pos, bytes, copied,
4062						folio, fsdata);
4063		if (unlikely(status != copied)) {
4064			iov_iter_revert(i, copied - max(status, 0L));
4065			if (unlikely(status < 0))
4066				break;
4067		}
4068		cond_resched();
4069
4070		if (unlikely(status == 0)) {
4071			/*
4072			 * A short copy made ->write_end() reject the
4073			 * thing entirely.  Might be memory poisoning
4074			 * halfway through, might be a race with munmap,
4075			 * might be severe memory pressure.
4076			 */
4077			if (chunk > PAGE_SIZE)
4078				chunk /= 2;
4079			if (copied) {
4080				bytes = copied;
4081				goto retry;
4082			}
4083		} else {
4084			pos += status;
4085			written += status;
4086		}
 
 
 
 
4087	} while (iov_iter_count(i));
4088
4089	if (!written)
4090		return status;
4091	iocb->ki_pos += written;
4092	return written;
4093}
4094EXPORT_SYMBOL(generic_perform_write);
4095
4096/**
4097 * __generic_file_write_iter - write data to a file
4098 * @iocb:	IO state structure (file, offset, etc.)
4099 * @from:	iov_iter with data to write
4100 *
4101 * This function does all the work needed for actually writing data to a
4102 * file. It does all basic checks, removes SUID from the file, updates
4103 * modification times and calls proper subroutines depending on whether we
4104 * do direct IO or a standard buffered write.
4105 *
4106 * It expects i_rwsem to be grabbed unless we work on a block device or similar
4107 * object which does not need locking at all.
4108 *
4109 * This function does *not* take care of syncing data in case of O_SYNC write.
4110 * A caller has to handle it. This is mainly due to the fact that we want to
4111 * avoid syncing under i_rwsem.
4112 *
4113 * Return:
4114 * * number of bytes written, even for truncated writes
4115 * * negative error code if no data has been written at all
4116 */
4117ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4118{
4119	struct file *file = iocb->ki_filp;
4120	struct address_space *mapping = file->f_mapping;
4121	struct inode *inode = mapping->host;
4122	ssize_t ret;
 
 
 
 
 
 
 
 
4123
4124	ret = file_remove_privs(file);
4125	if (ret)
4126		return ret;
4127
4128	ret = file_update_time(file);
4129	if (ret)
4130		return ret;
4131
4132	if (iocb->ki_flags & IOCB_DIRECT) {
4133		ret = generic_file_direct_write(iocb, from);
 
 
4134		/*
4135		 * If the write stopped short of completing, fall back to
4136		 * buffered writes.  Some filesystems do this for writes to
4137		 * holes, for example.  For DAX files, a buffered write will
4138		 * not succeed (even if it did, DAX does not handle dirty
4139		 * page-cache pages correctly).
4140		 */
4141		if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode))
4142			return ret;
4143		return direct_write_fallback(iocb, from, ret,
4144				generic_perform_write(iocb, from));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4145	}
4146
4147	return generic_perform_write(iocb, from);
 
4148}
4149EXPORT_SYMBOL(__generic_file_write_iter);
4150
4151/**
4152 * generic_file_write_iter - write data to a file
4153 * @iocb:	IO state structure
4154 * @from:	iov_iter with data to write
4155 *
4156 * This is a wrapper around __generic_file_write_iter() to be used by most
4157 * filesystems. It takes care of syncing the file in case of O_SYNC file
4158 * and acquires i_rwsem as needed.
4159 * Return:
4160 * * negative error code if no data has been written at all of
4161 *   vfs_fsync_range() failed for a synchronous write
4162 * * number of bytes written, even for truncated writes
4163 */
4164ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4165{
4166	struct file *file = iocb->ki_filp;
4167	struct inode *inode = file->f_mapping->host;
4168	ssize_t ret;
4169
4170	inode_lock(inode);
4171	ret = generic_write_checks(iocb, from);
4172	if (ret > 0)
4173		ret = __generic_file_write_iter(iocb, from);
4174	inode_unlock(inode);
4175
4176	if (ret > 0)
4177		ret = generic_write_sync(iocb, ret);
4178	return ret;
4179}
4180EXPORT_SYMBOL(generic_file_write_iter);
4181
4182/**
4183 * filemap_release_folio() - Release fs-specific metadata on a folio.
4184 * @folio: The folio which the kernel is trying to free.
4185 * @gfp: Memory allocation flags (and I/O mode).
4186 *
4187 * The address_space is trying to release any data attached to a folio
4188 * (presumably at folio->private).
4189 *
4190 * This will also be called if the private_2 flag is set on a page,
4191 * indicating that the folio has other metadata associated with it.
4192 *
4193 * The @gfp argument specifies whether I/O may be performed to release
4194 * this page (__GFP_IO), and whether the call may block
4195 * (__GFP_RECLAIM & __GFP_FS).
4196 *
4197 * Return: %true if the release was successful, otherwise %false.
4198 */
4199bool filemap_release_folio(struct folio *folio, gfp_t gfp)
4200{
4201	struct address_space * const mapping = folio->mapping;
4202
4203	BUG_ON(!folio_test_locked(folio));
4204	if (!folio_needs_release(folio))
4205		return true;
4206	if (folio_test_writeback(folio))
4207		return false;
4208
4209	if (mapping && mapping->a_ops->release_folio)
4210		return mapping->a_ops->release_folio(folio, gfp);
4211	return try_to_free_buffers(folio);
4212}
4213EXPORT_SYMBOL(filemap_release_folio);
4214
4215/**
4216 * filemap_invalidate_inode - Invalidate/forcibly write back a range of an inode's pagecache
4217 * @inode: The inode to flush
4218 * @flush: Set to write back rather than simply invalidate.
4219 * @start: First byte to in range.
4220 * @end: Last byte in range (inclusive), or LLONG_MAX for everything from start
4221 *       onwards.
4222 *
4223 * Invalidate all the folios on an inode that contribute to the specified
4224 * range, possibly writing them back first.  Whilst the operation is
4225 * undertaken, the invalidate lock is held to prevent new folios from being
4226 * installed.
4227 */
4228int filemap_invalidate_inode(struct inode *inode, bool flush,
4229			     loff_t start, loff_t end)
4230{
4231	struct address_space *mapping = inode->i_mapping;
4232	pgoff_t first = start >> PAGE_SHIFT;
4233	pgoff_t last = end >> PAGE_SHIFT;
4234	pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1;
4235
4236	if (!mapping || !mapping->nrpages || end < start)
4237		goto out;
4238
4239	/* Prevent new folios from being added to the inode. */
4240	filemap_invalidate_lock(mapping);
4241
4242	if (!mapping->nrpages)
4243		goto unlock;
4244
4245	unmap_mapping_pages(mapping, first, nr, false);
4246
4247	/* Write back the data if we're asked to. */
4248	if (flush) {
4249		struct writeback_control wbc = {
4250			.sync_mode	= WB_SYNC_ALL,
4251			.nr_to_write	= LONG_MAX,
4252			.range_start	= start,
4253			.range_end	= end,
4254		};
4255
4256		filemap_fdatawrite_wbc(mapping, &wbc);
4257	}
4258
4259	/* Wait for writeback to complete on all folios and discard. */
4260	invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE);
4261
4262unlock:
4263	filemap_invalidate_unlock(mapping);
4264out:
4265	return filemap_check_errors(mapping);
4266}
4267EXPORT_SYMBOL_GPL(filemap_invalidate_inode);
4268
4269#ifdef CONFIG_CACHESTAT_SYSCALL
4270/**
4271 * filemap_cachestat() - compute the page cache statistics of a mapping
4272 * @mapping:	The mapping to compute the statistics for.
4273 * @first_index:	The starting page cache index.
4274 * @last_index:	The final page index (inclusive).
4275 * @cs:	the cachestat struct to write the result to.
4276 *
4277 * This will query the page cache statistics of a mapping in the
4278 * page range of [first_index, last_index] (inclusive). The statistics
4279 * queried include: number of dirty pages, number of pages marked for
4280 * writeback, and the number of (recently) evicted pages.
4281 */
4282static void filemap_cachestat(struct address_space *mapping,
4283		pgoff_t first_index, pgoff_t last_index, struct cachestat *cs)
4284{
4285	XA_STATE(xas, &mapping->i_pages, first_index);
4286	struct folio *folio;
4287
4288	/* Flush stats (and potentially sleep) outside the RCU read section. */
4289	mem_cgroup_flush_stats_ratelimited(NULL);
4290
4291	rcu_read_lock();
4292	xas_for_each(&xas, folio, last_index) {
4293		int order;
4294		unsigned long nr_pages;
4295		pgoff_t folio_first_index, folio_last_index;
4296
4297		/*
4298		 * Don't deref the folio. It is not pinned, and might
4299		 * get freed (and reused) underneath us.
4300		 *
4301		 * We *could* pin it, but that would be expensive for
4302		 * what should be a fast and lightweight syscall.
4303		 *
4304		 * Instead, derive all information of interest from
4305		 * the rcu-protected xarray.
4306		 */
4307
4308		if (xas_retry(&xas, folio))
4309			continue;
4310
4311		order = xas_get_order(&xas);
4312		nr_pages = 1 << order;
4313		folio_first_index = round_down(xas.xa_index, 1 << order);
4314		folio_last_index = folio_first_index + nr_pages - 1;
4315
4316		/* Folios might straddle the range boundaries, only count covered pages */
4317		if (folio_first_index < first_index)
4318			nr_pages -= first_index - folio_first_index;
4319
4320		if (folio_last_index > last_index)
4321			nr_pages -= folio_last_index - last_index;
4322
4323		if (xa_is_value(folio)) {
4324			/* page is evicted */
4325			void *shadow = (void *)folio;
4326			bool workingset; /* not used */
4327
4328			cs->nr_evicted += nr_pages;
4329
4330#ifdef CONFIG_SWAP /* implies CONFIG_MMU */
4331			if (shmem_mapping(mapping)) {
4332				/* shmem file - in swap cache */
4333				swp_entry_t swp = radix_to_swp_entry(folio);
4334
4335				/* swapin error results in poisoned entry */
4336				if (non_swap_entry(swp))
4337					goto resched;
4338
4339				/*
4340				 * Getting a swap entry from the shmem
4341				 * inode means we beat
4342				 * shmem_unuse(). rcu_read_lock()
4343				 * ensures swapoff waits for us before
4344				 * freeing the swapper space. However,
4345				 * we can race with swapping and
4346				 * invalidation, so there might not be
4347				 * a shadow in the swapcache (yet).
4348				 */
4349				shadow = get_shadow_from_swap_cache(swp);
4350				if (!shadow)
4351					goto resched;
4352			}
4353#endif
4354			if (workingset_test_recent(shadow, true, &workingset, false))
4355				cs->nr_recently_evicted += nr_pages;
4356
4357			goto resched;
4358		}
4359
4360		/* page is in cache */
4361		cs->nr_cache += nr_pages;
4362
4363		if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
4364			cs->nr_dirty += nr_pages;
4365
4366		if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
4367			cs->nr_writeback += nr_pages;
4368
4369resched:
4370		if (need_resched()) {
4371			xas_pause(&xas);
4372			cond_resched_rcu();
4373		}
4374	}
4375	rcu_read_unlock();
4376}
4377
4378/*
4379 * See mincore: reveal pagecache information only for files
4380 * that the calling process has write access to, or could (if
4381 * tried) open for writing.
4382 */
4383static inline bool can_do_cachestat(struct file *f)
4384{
4385	if (f->f_mode & FMODE_WRITE)
4386		return true;
4387	if (inode_owner_or_capable(file_mnt_idmap(f), file_inode(f)))
4388		return true;
4389	return file_permission(f, MAY_WRITE) == 0;
4390}
4391
4392/*
4393 * The cachestat(2) system call.
4394 *
4395 * cachestat() returns the page cache statistics of a file in the
4396 * bytes range specified by `off` and `len`: number of cached pages,
4397 * number of dirty pages, number of pages marked for writeback,
4398 * number of evicted pages, and number of recently evicted pages.
4399 *
4400 * An evicted page is a page that is previously in the page cache
4401 * but has been evicted since. A page is recently evicted if its last
4402 * eviction was recent enough that its reentry to the cache would
4403 * indicate that it is actively being used by the system, and that
4404 * there is memory pressure on the system.
4405 *
4406 * `off` and `len` must be non-negative integers. If `len` > 0,
4407 * the queried range is [`off`, `off` + `len`]. If `len` == 0,
4408 * we will query in the range from `off` to the end of the file.
4409 *
4410 * The `flags` argument is unused for now, but is included for future
4411 * extensibility. User should pass 0 (i.e no flag specified).
4412 *
4413 * Currently, hugetlbfs is not supported.
4414 *
4415 * Because the status of a page can change after cachestat() checks it
4416 * but before it returns to the application, the returned values may
4417 * contain stale information.
4418 *
4419 * return values:
4420 *  zero        - success
4421 *  -EFAULT     - cstat or cstat_range points to an illegal address
4422 *  -EINVAL     - invalid flags
4423 *  -EBADF      - invalid file descriptor
4424 *  -EOPNOTSUPP - file descriptor is of a hugetlbfs file
4425 */
4426SYSCALL_DEFINE4(cachestat, unsigned int, fd,
4427		struct cachestat_range __user *, cstat_range,
4428		struct cachestat __user *, cstat, unsigned int, flags)
4429{
4430	CLASS(fd, f)(fd);
4431	struct address_space *mapping;
4432	struct cachestat_range csr;
4433	struct cachestat cs;
4434	pgoff_t first_index, last_index;
4435
4436	if (fd_empty(f))
4437		return -EBADF;
4438
4439	if (copy_from_user(&csr, cstat_range,
4440			sizeof(struct cachestat_range)))
4441		return -EFAULT;
4442
4443	/* hugetlbfs is not supported */
4444	if (is_file_hugepages(fd_file(f)))
4445		return -EOPNOTSUPP;
4446
4447	if (!can_do_cachestat(fd_file(f)))
4448		return -EPERM;
4449
4450	if (flags != 0)
4451		return -EINVAL;
4452
4453	first_index = csr.off >> PAGE_SHIFT;
4454	last_index =
4455		csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;
4456	memset(&cs, 0, sizeof(struct cachestat));
4457	mapping = fd_file(f)->f_mapping;
4458	filemap_cachestat(mapping, first_index, last_index, &cs);
4459
4460	if (copy_to_user(cstat, &cs, sizeof(struct cachestat)))
4461		return -EFAULT;
4462
4463	return 0;
4464}
4465#endif /* CONFIG_CACHESTAT_SYSCALL */