Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	linux/mm/filemap.c
   4 *
   5 * Copyright (C) 1994-1999  Linus Torvalds
   6 */
   7
   8/*
   9 * This file handles the generic file mmap semantics used by
  10 * most "normal" filesystems (but you don't /have/ to use this:
  11 * the NFS filesystem used to do this differently, for example)
  12 */
  13#include <linux/export.h>
  14#include <linux/compiler.h>
  15#include <linux/dax.h>
  16#include <linux/fs.h>
  17#include <linux/sched/signal.h>
  18#include <linux/uaccess.h>
  19#include <linux/capability.h>
  20#include <linux/kernel_stat.h>
  21#include <linux/gfp.h>
  22#include <linux/mm.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
  25#include <linux/syscalls.h>
  26#include <linux/mman.h>
  27#include <linux/pagemap.h>
  28#include <linux/file.h>
  29#include <linux/uio.h>
  30#include <linux/error-injection.h>
  31#include <linux/hash.h>
  32#include <linux/writeback.h>
  33#include <linux/backing-dev.h>
  34#include <linux/pagevec.h>
 
  35#include <linux/security.h>
  36#include <linux/cpuset.h>
 
  37#include <linux/hugetlb.h>
  38#include <linux/memcontrol.h>
  39#include <linux/shmem_fs.h>
  40#include <linux/rmap.h>
  41#include <linux/delayacct.h>
  42#include <linux/psi.h>
  43#include <linux/ramfs.h>
  44#include <linux/page_idle.h>
  45#include <linux/migrate.h>
  46#include <linux/pipe_fs_i.h>
  47#include <linux/splice.h>
  48#include <linux/rcupdate_wait.h>
  49#include <asm/pgalloc.h>
  50#include <asm/tlbflush.h>
  51#include "internal.h"
  52
  53#define CREATE_TRACE_POINTS
  54#include <trace/events/filemap.h>
  55
  56/*
  57 * FIXME: remove all knowledge of the buffer layer from the core VM
  58 */
  59#include <linux/buffer_head.h> /* for try_to_free_buffers */
  60
  61#include <asm/mman.h>
  62
  63#include "swap.h"
  64
  65/*
  66 * Shared mappings implemented 30.11.1994. It's not fully working yet,
  67 * though.
  68 *
  69 * Shared mappings now work. 15.8.1995  Bruno.
  70 *
  71 * finished 'unifying' the page and buffer cache and SMP-threaded the
  72 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
  73 *
  74 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
  75 */
  76
  77/*
  78 * Lock ordering:
  79 *
  80 *  ->i_mmap_rwsem		(truncate_pagecache)
  81 *    ->private_lock		(__free_pte->block_dirty_folio)
  82 *      ->swap_lock		(exclusive_swap_page, others)
  83 *        ->i_pages lock
  84 *
  85 *  ->i_rwsem
  86 *    ->invalidate_lock		(acquired by fs in truncate path)
  87 *      ->i_mmap_rwsem		(truncate->unmap_mapping_range)
  88 *
  89 *  ->mmap_lock
  90 *    ->i_mmap_rwsem
  91 *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
  92 *        ->i_pages lock	(arch-dependent flush_dcache_mmap_lock)
  93 *
  94 *  ->mmap_lock
  95 *    ->invalidate_lock		(filemap_fault)
  96 *      ->lock_page		(filemap_fault, access_process_vm)
  97 *
  98 *  ->i_rwsem			(generic_perform_write)
  99 *    ->mmap_lock		(fault_in_readable->do_page_fault)
 100 *
 101 *  bdi->wb.list_lock
 102 *    sb_lock			(fs/fs-writeback.c)
 103 *    ->i_pages lock		(__sync_single_inode)
 104 *
 105 *  ->i_mmap_rwsem
 106 *    ->anon_vma.lock		(vma_merge)
 107 *
 108 *  ->anon_vma.lock
 109 *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
 110 *
 111 *  ->page_table_lock or pte_lock
 112 *    ->swap_lock		(try_to_unmap_one)
 113 *    ->private_lock		(try_to_unmap_one)
 114 *    ->i_pages lock		(try_to_unmap_one)
 115 *    ->lruvec->lru_lock	(follow_page->mark_page_accessed)
 116 *    ->lruvec->lru_lock	(check_pte_range->isolate_lru_page)
 117 *    ->private_lock		(folio_remove_rmap_pte->set_page_dirty)
 118 *    ->i_pages lock		(folio_remove_rmap_pte->set_page_dirty)
 119 *    bdi.wb->list_lock		(folio_remove_rmap_pte->set_page_dirty)
 120 *    ->inode->i_lock		(folio_remove_rmap_pte->set_page_dirty)
 121 *    ->memcg->move_lock	(folio_remove_rmap_pte->folio_memcg_lock)
 122 *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
 123 *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
 124 *    ->private_lock		(zap_pte_range->block_dirty_folio)
 
 
 
 125 */
 126
 127static void page_cache_delete(struct address_space *mapping,
 128				   struct folio *folio, void *shadow)
 129{
 130	XA_STATE(xas, &mapping->i_pages, folio->index);
 131	long nr = 1;
 132
 133	mapping_set_update(&xas, mapping);
 134
 135	xas_set_order(&xas, folio->index, folio_order(folio));
 136	nr = folio_nr_pages(folio);
 137
 138	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
 
 
 
 
 139
 140	xas_store(&xas, shadow);
 141	xas_init_marks(&xas);
 
 142
 143	folio->mapping = NULL;
 144	/* Leave page->index set: truncation lookup relies upon it */
 145	mapping->nrpages -= nr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 146}
 147
 148static void filemap_unaccount_folio(struct address_space *mapping,
 149		struct folio *folio)
 150{
 151	long nr;
 152
 153	VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
 154	if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
 155		pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
 156			 current->comm, folio_pfn(folio));
 157		dump_page(&folio->page, "still mapped when deleted");
 158		dump_stack();
 159		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 160
 161		if (mapping_exiting(mapping) && !folio_test_large(folio)) {
 162			int mapcount = page_mapcount(&folio->page);
 
 163
 164			if (folio_ref_count(folio) >= mapcount + 2) {
 165				/*
 166				 * All vmas have already been torn down, so it's
 167				 * a good bet that actually the page is unmapped
 168				 * and we'd rather not leak it: if we're wrong,
 169				 * another bad page check should catch it later.
 170				 */
 171				page_mapcount_reset(&folio->page);
 172				folio_ref_sub(folio, mapcount);
 173			}
 174		}
 175	}
 176
 177	/* hugetlb folios do not participate in page cache accounting. */
 178	if (folio_test_hugetlb(folio))
 179		return;
 180
 181	nr = folio_nr_pages(folio);
 182
 183	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
 184	if (folio_test_swapbacked(folio)) {
 185		__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
 186		if (folio_test_pmd_mappable(folio))
 187			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
 188	} else if (folio_test_pmd_mappable(folio)) {
 189		__lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
 190		filemap_nr_thps_dec(mapping);
 191	}
 192
 193	/*
 194	 * At this point folio must be either written or cleaned by
 195	 * truncate.  Dirty folio here signals a bug and loss of
 196	 * unwritten data - on ordinary filesystems.
 197	 *
 198	 * But it's harmless on in-memory filesystems like tmpfs; and can
 199	 * occur when a driver which did get_user_pages() sets page dirty
 200	 * before putting it, while the inode is being finally evicted.
 201	 *
 202	 * Below fixes dirty accounting after removing the folio entirely
 203	 * but leaves the dirty flag set: it has no effect for truncated
 204	 * folio and anyway will be cleared before returning folio to
 205	 * buddy allocator.
 206	 */
 207	if (WARN_ON_ONCE(folio_test_dirty(folio) &&
 208			 mapping_can_writeback(mapping)))
 209		folio_account_cleaned(folio, inode_to_wb(mapping->host));
 210}
 211
 212/*
 213 * Delete a page from the page cache and free it. Caller has to make
 214 * sure the page is locked and that nobody else uses it - or that usage
 215 * is safe.  The caller must hold the i_pages lock.
 216 */
 217void __filemap_remove_folio(struct folio *folio, void *shadow)
 218{
 219	struct address_space *mapping = folio->mapping;
 220
 221	trace_mm_filemap_delete_from_page_cache(folio);
 222	filemap_unaccount_folio(mapping, folio);
 223	page_cache_delete(mapping, folio, shadow);
 224}
 225
 226void filemap_free_folio(struct address_space *mapping, struct folio *folio)
 227{
 228	void (*free_folio)(struct folio *);
 229	int refs = 1;
 230
 231	free_folio = mapping->a_ops->free_folio;
 232	if (free_folio)
 233		free_folio(folio);
 234
 235	if (folio_test_large(folio))
 236		refs = folio_nr_pages(folio);
 237	folio_put_refs(folio, refs);
 238}
 239
 240/**
 241 * filemap_remove_folio - Remove folio from page cache.
 242 * @folio: The folio.
 243 *
 244 * This must be called only on folios that are locked and have been
 245 * verified to be in the page cache.  It will never put the folio into
 246 * the free list because the caller has a reference on the page.
 247 */
 248void filemap_remove_folio(struct folio *folio)
 249{
 250	struct address_space *mapping = folio->mapping;
 251
 252	BUG_ON(!folio_test_locked(folio));
 253	spin_lock(&mapping->host->i_lock);
 254	xa_lock_irq(&mapping->i_pages);
 255	__filemap_remove_folio(folio, NULL);
 256	xa_unlock_irq(&mapping->i_pages);
 257	if (mapping_shrinkable(mapping))
 258		inode_add_lru(mapping->host);
 259	spin_unlock(&mapping->host->i_lock);
 260
 261	filemap_free_folio(mapping, folio);
 262}
 
 
 263
 264/*
 265 * page_cache_delete_batch - delete several folios from page cache
 266 * @mapping: the mapping to which folios belong
 267 * @fbatch: batch of folios to delete
 268 *
 269 * The function walks over mapping->i_pages and removes folios passed in
 270 * @fbatch from the mapping. The function expects @fbatch to be sorted
 271 * by page index and is optimised for it to be dense.
 272 * It tolerates holes in @fbatch (mapping entries at those indices are not
 273 * modified).
 274 *
 275 * The function expects the i_pages lock to be held.
 276 */
 277static void page_cache_delete_batch(struct address_space *mapping,
 278			     struct folio_batch *fbatch)
 279{
 280	XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
 281	long total_pages = 0;
 282	int i = 0;
 283	struct folio *folio;
 284
 285	mapping_set_update(&xas, mapping);
 286	xas_for_each(&xas, folio, ULONG_MAX) {
 287		if (i >= folio_batch_count(fbatch))
 288			break;
 289
 290		/* A swap/dax/shadow entry got inserted? Skip it. */
 291		if (xa_is_value(folio))
 292			continue;
 293		/*
 294		 * A page got inserted in our range? Skip it. We have our
 295		 * pages locked so they are protected from being removed.
 296		 * If we see a page whose index is higher than ours, it
 297		 * means our page has been removed, which shouldn't be
 298		 * possible because we're holding the PageLock.
 299		 */
 300		if (folio != fbatch->folios[i]) {
 301			VM_BUG_ON_FOLIO(folio->index >
 302					fbatch->folios[i]->index, folio);
 303			continue;
 304		}
 
 305
 306		WARN_ON_ONCE(!folio_test_locked(folio));
 307
 308		folio->mapping = NULL;
 309		/* Leave folio->index set: truncation lookup relies on it */
 310
 311		i++;
 312		xas_store(&xas, NULL);
 313		total_pages += folio_nr_pages(folio);
 
 
 
 
 
 
 314	}
 315	mapping->nrpages -= total_pages;
 
 
 
 
 
 
 
 
 
 
 316}
 317
 318void delete_from_page_cache_batch(struct address_space *mapping,
 319				  struct folio_batch *fbatch)
 
 
 
 
 
 
 
 320{
 321	int i;
 
 
 322
 323	if (!folio_batch_count(fbatch))
 324		return;
 
 325
 326	spin_lock(&mapping->host->i_lock);
 327	xa_lock_irq(&mapping->i_pages);
 328	for (i = 0; i < folio_batch_count(fbatch); i++) {
 329		struct folio *folio = fbatch->folios[i];
 330
 331		trace_mm_filemap_delete_from_page_cache(folio);
 332		filemap_unaccount_folio(mapping, folio);
 333	}
 334	page_cache_delete_batch(mapping, fbatch);
 335	xa_unlock_irq(&mapping->i_pages);
 336	if (mapping_shrinkable(mapping))
 337		inode_add_lru(mapping->host);
 338	spin_unlock(&mapping->host->i_lock);
 339
 340	for (i = 0; i < folio_batch_count(fbatch); i++)
 341		filemap_free_folio(mapping, fbatch->folios[i]);
 
 
 
 
 
 
 
 342}
 
 343
 344int filemap_check_errors(struct address_space *mapping)
 345{
 346	int ret = 0;
 347	/* Check for outstanding write errors */
 348	if (test_bit(AS_ENOSPC, &mapping->flags) &&
 349	    test_and_clear_bit(AS_ENOSPC, &mapping->flags))
 350		ret = -ENOSPC;
 351	if (test_bit(AS_EIO, &mapping->flags) &&
 352	    test_and_clear_bit(AS_EIO, &mapping->flags))
 353		ret = -EIO;
 354	return ret;
 355}
 356EXPORT_SYMBOL(filemap_check_errors);
 357
 358static int filemap_check_and_keep_errors(struct address_space *mapping)
 359{
 360	/* Check for outstanding write errors */
 361	if (test_bit(AS_EIO, &mapping->flags))
 362		return -EIO;
 363	if (test_bit(AS_ENOSPC, &mapping->flags))
 364		return -ENOSPC;
 365	return 0;
 366}
 367
 368/**
 369 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
 370 * @mapping:	address space structure to write
 371 * @wbc:	the writeback_control controlling the writeout
 372 *
 373 * Call writepages on the mapping using the provided wbc to control the
 374 * writeout.
 375 *
 376 * Return: %0 on success, negative error code otherwise.
 377 */
 378int filemap_fdatawrite_wbc(struct address_space *mapping,
 379			   struct writeback_control *wbc)
 380{
 381	int ret;
 382
 383	if (!mapping_can_writeback(mapping) ||
 384	    !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
 385		return 0;
 386
 387	wbc_attach_fdatawrite_inode(wbc, mapping->host);
 388	ret = do_writepages(mapping, wbc);
 389	wbc_detach_inode(wbc);
 390	return ret;
 391}
 392EXPORT_SYMBOL(filemap_fdatawrite_wbc);
 393
 394/**
 395 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
 396 * @mapping:	address space structure to write
 397 * @start:	offset in bytes where the range starts
 398 * @end:	offset in bytes where the range ends (inclusive)
 399 * @sync_mode:	enable synchronous operation
 400 *
 401 * Start writeback against all of a mapping's dirty pages that lie
 402 * within the byte offsets <start, end> inclusive.
 403 *
 404 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
 405 * opposed to a regular memory cleansing writeback.  The difference between
 406 * these two operations is that if a dirty page/buffer is encountered, it must
 407 * be waited upon, and not just skipped over.
 408 *
 409 * Return: %0 on success, negative error code otherwise.
 410 */
 411int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 412				loff_t end, int sync_mode)
 413{
 
 414	struct writeback_control wbc = {
 415		.sync_mode = sync_mode,
 416		.nr_to_write = LONG_MAX,
 417		.range_start = start,
 418		.range_end = end,
 419	};
 420
 421	return filemap_fdatawrite_wbc(mapping, &wbc);
 
 
 
 
 
 
 422}
 423
 424static inline int __filemap_fdatawrite(struct address_space *mapping,
 425	int sync_mode)
 426{
 427	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
 428}
 429
 430int filemap_fdatawrite(struct address_space *mapping)
 431{
 432	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
 433}
 434EXPORT_SYMBOL(filemap_fdatawrite);
 435
 436int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 437				loff_t end)
 438{
 439	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
 440}
 441EXPORT_SYMBOL(filemap_fdatawrite_range);
 442
 443/**
 444 * filemap_flush - mostly a non-blocking flush
 445 * @mapping:	target address_space
 446 *
 447 * This is a mostly non-blocking flush.  Not suitable for data-integrity
 448 * purposes - I/O may not be started against all dirty pages.
 449 *
 450 * Return: %0 on success, negative error code otherwise.
 451 */
 452int filemap_flush(struct address_space *mapping)
 453{
 454	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
 455}
 456EXPORT_SYMBOL(filemap_flush);
 457
 458/**
 459 * filemap_range_has_page - check if a page exists in range.
 460 * @mapping:           address space within which to check
 461 * @start_byte:        offset in bytes where the range starts
 462 * @end_byte:          offset in bytes where the range ends (inclusive)
 463 *
 464 * Find at least one page in the range supplied, usually used to check if
 465 * direct writing in this range will trigger a writeback.
 466 *
 467 * Return: %true if at least one page exists in the specified range,
 468 * %false otherwise.
 469 */
 470bool filemap_range_has_page(struct address_space *mapping,
 471			   loff_t start_byte, loff_t end_byte)
 472{
 473	struct folio *folio;
 474	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
 475	pgoff_t max = end_byte >> PAGE_SHIFT;
 476
 477	if (end_byte < start_byte)
 478		return false;
 479
 480	rcu_read_lock();
 481	for (;;) {
 482		folio = xas_find(&xas, max);
 483		if (xas_retry(&xas, folio))
 484			continue;
 485		/* Shadow entries don't count */
 486		if (xa_is_value(folio))
 487			continue;
 488		/*
 489		 * We don't need to try to pin this page; we're about to
 490		 * release the RCU lock anyway.  It is enough to know that
 491		 * there was a page here recently.
 492		 */
 493		break;
 494	}
 495	rcu_read_unlock();
 496
 497	return folio != NULL;
 498}
 499EXPORT_SYMBOL(filemap_range_has_page);
 500
 501static void __filemap_fdatawait_range(struct address_space *mapping,
 502				     loff_t start_byte, loff_t end_byte)
 503{
 504	pgoff_t index = start_byte >> PAGE_SHIFT;
 505	pgoff_t end = end_byte >> PAGE_SHIFT;
 506	struct folio_batch fbatch;
 507	unsigned nr_folios;
 
 508
 509	folio_batch_init(&fbatch);
 
 510
 511	while (index <= end) {
 
 
 
 
 512		unsigned i;
 513
 514		nr_folios = filemap_get_folios_tag(mapping, &index, end,
 515				PAGECACHE_TAG_WRITEBACK, &fbatch);
 516
 517		if (!nr_folios)
 518			break;
 519
 520		for (i = 0; i < nr_folios; i++) {
 521			struct folio *folio = fbatch.folios[i];
 
 522
 523			folio_wait_writeback(folio);
 524			folio_clear_error(folio);
 
 525		}
 526		folio_batch_release(&fbatch);
 527		cond_resched();
 528	}
 
 
 529}
 530
 531/**
 532 * filemap_fdatawait_range - wait for writeback to complete
 533 * @mapping:		address space structure to wait for
 534 * @start_byte:		offset in bytes where the range starts
 535 * @end_byte:		offset in bytes where the range ends (inclusive)
 536 *
 537 * Walk the list of under-writeback pages of the given address space
 538 * in the given range and wait for all of them.  Check error status of
 539 * the address space and return it.
 540 *
 541 * Since the error status of the address space is cleared by this function,
 542 * callers are responsible for checking the return value and handling and/or
 543 * reporting the error.
 544 *
 545 * Return: error status of the address space.
 546 */
 547int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
 548			    loff_t end_byte)
 549{
 550	__filemap_fdatawait_range(mapping, start_byte, end_byte);
 551	return filemap_check_errors(mapping);
 552}
 553EXPORT_SYMBOL(filemap_fdatawait_range);
 554
 555/**
 556 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
 557 * @mapping:		address space structure to wait for
 558 * @start_byte:		offset in bytes where the range starts
 559 * @end_byte:		offset in bytes where the range ends (inclusive)
 560 *
 561 * Walk the list of under-writeback pages of the given address space in the
 562 * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
 563 * this function does not clear error status of the address space.
 564 *
 565 * Use this function if callers don't handle errors themselves.  Expected
 566 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
 567 * fsfreeze(8)
 568 */
 569int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
 570		loff_t start_byte, loff_t end_byte)
 571{
 572	__filemap_fdatawait_range(mapping, start_byte, end_byte);
 573	return filemap_check_and_keep_errors(mapping);
 574}
 575EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
 576
 577/**
 578 * file_fdatawait_range - wait for writeback to complete
 579 * @file:		file pointing to address space structure to wait for
 580 * @start_byte:		offset in bytes where the range starts
 581 * @end_byte:		offset in bytes where the range ends (inclusive)
 582 *
 583 * Walk the list of under-writeback pages of the address space that file
 584 * refers to, in the given range and wait for all of them.  Check error
 585 * status of the address space vs. the file->f_wb_err cursor and return it.
 586 *
 587 * Since the error status of the file is advanced by this function,
 588 * callers are responsible for checking the return value and handling and/or
 589 * reporting the error.
 590 *
 591 * Return: error status of the address space vs. the file->f_wb_err cursor.
 592 */
 593int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
 594{
 595	struct address_space *mapping = file->f_mapping;
 596
 597	__filemap_fdatawait_range(mapping, start_byte, end_byte);
 598	return file_check_and_advance_wb_err(file);
 599}
 600EXPORT_SYMBOL(file_fdatawait_range);
 601
 602/**
 603 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
 604 * @mapping: address space structure to wait for
 605 *
 606 * Walk the list of under-writeback pages of the given address space
 607 * and wait for all of them.  Unlike filemap_fdatawait(), this function
 608 * does not clear error status of the address space.
 609 *
 610 * Use this function if callers don't handle errors themselves.  Expected
 611 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
 612 * fsfreeze(8)
 613 *
 614 * Return: error status of the address space.
 615 */
 616int filemap_fdatawait_keep_errors(struct address_space *mapping)
 617{
 618	__filemap_fdatawait_range(mapping, 0, LLONG_MAX);
 619	return filemap_check_and_keep_errors(mapping);
 620}
 621EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
 622
 623/* Returns true if writeback might be needed or already in progress. */
 624static bool mapping_needs_writeback(struct address_space *mapping)
 625{
 626	return mapping->nrpages;
 627}
 628
 629bool filemap_range_has_writeback(struct address_space *mapping,
 630				 loff_t start_byte, loff_t end_byte)
 631{
 632	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
 633	pgoff_t max = end_byte >> PAGE_SHIFT;
 634	struct folio *folio;
 635
 636	if (end_byte < start_byte)
 637		return false;
 638
 639	rcu_read_lock();
 640	xas_for_each(&xas, folio, max) {
 641		if (xas_retry(&xas, folio))
 642			continue;
 643		if (xa_is_value(folio))
 644			continue;
 645		if (folio_test_dirty(folio) || folio_test_locked(folio) ||
 646				folio_test_writeback(folio))
 647			break;
 648	}
 649	rcu_read_unlock();
 650	return folio != NULL;
 651}
 652EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
 653
 654/**
 655 * filemap_write_and_wait_range - write out & wait on a file range
 656 * @mapping:	the address_space for the pages
 657 * @lstart:	offset in bytes where the range starts
 658 * @lend:	offset in bytes where the range ends (inclusive)
 659 *
 660 * Write out and wait upon file offsets lstart->lend, inclusive.
 661 *
 662 * Note that @lend is inclusive (describes the last byte to be written) so
 663 * that this function can be used to write to the very end-of-file (end = -1).
 
 664 *
 665 * Return: error status of the address space.
 
 
 666 */
 667int filemap_write_and_wait_range(struct address_space *mapping,
 668				 loff_t lstart, loff_t lend)
 669{
 670	int err = 0, err2;
 671
 672	if (lend < lstart)
 673		return 0;
 674
 675	if (mapping_needs_writeback(mapping)) {
 676		err = __filemap_fdatawrite_range(mapping, lstart, lend,
 677						 WB_SYNC_ALL);
 
 
 
 
 
 
 
 
 678		/*
 679		 * Even if the above returned error, the pages may be
 680		 * written partially (e.g. -ENOSPC), so we wait for it.
 681		 * But the -EIO is special case, it may indicate the worst
 682		 * thing (e.g. bug) happened, so we avoid waiting for it.
 683		 */
 684		if (err != -EIO)
 685			__filemap_fdatawait_range(mapping, lstart, lend);
 686	}
 687	err2 = filemap_check_errors(mapping);
 688	if (!err)
 689		err = err2;
 690	return err;
 691}
 692EXPORT_SYMBOL(filemap_write_and_wait_range);
 693
 694void __filemap_set_wb_err(struct address_space *mapping, int err)
 695{
 696	errseq_t eseq = errseq_set(&mapping->wb_err, err);
 697
 698	trace_filemap_set_wb_err(mapping, eseq);
 699}
 700EXPORT_SYMBOL(__filemap_set_wb_err);
 701
 702/**
 703 * file_check_and_advance_wb_err - report wb error (if any) that was previously
 704 * 				   and advance wb_err to current one
 705 * @file: struct file on which the error is being reported
 706 *
 707 * When userland calls fsync (or something like nfsd does the equivalent), we
 708 * want to report any writeback errors that occurred since the last fsync (or
 709 * since the file was opened if there haven't been any).
 710 *
 711 * Grab the wb_err from the mapping. If it matches what we have in the file,
 712 * then just quickly return 0. The file is all caught up.
 713 *
 714 * If it doesn't match, then take the mapping value, set the "seen" flag in
 715 * it and try to swap it into place. If it works, or another task beat us
 716 * to it with the new value, then update the f_wb_err and return the error
 717 * portion. The error at this point must be reported via proper channels
 718 * (a'la fsync, or NFS COMMIT operation, etc.).
 719 *
 720 * While we handle mapping->wb_err with atomic operations, the f_wb_err
 721 * value is protected by the f_lock since we must ensure that it reflects
 722 * the latest value swapped in for this file descriptor.
 723 *
 724 * Return: %0 on success, negative error code otherwise.
 725 */
 726int file_check_and_advance_wb_err(struct file *file)
 727{
 728	int err = 0;
 729	errseq_t old = READ_ONCE(file->f_wb_err);
 730	struct address_space *mapping = file->f_mapping;
 731
 732	/* Locklessly handle the common case where nothing has changed */
 733	if (errseq_check(&mapping->wb_err, old)) {
 734		/* Something changed, must use slow path */
 735		spin_lock(&file->f_lock);
 736		old = file->f_wb_err;
 737		err = errseq_check_and_advance(&mapping->wb_err,
 738						&file->f_wb_err);
 739		trace_file_check_and_advance_wb_err(file, old);
 740		spin_unlock(&file->f_lock);
 741	}
 742
 743	/*
 744	 * We're mostly using this function as a drop in replacement for
 745	 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
 746	 * that the legacy code would have had on these flags.
 747	 */
 748	clear_bit(AS_EIO, &mapping->flags);
 749	clear_bit(AS_ENOSPC, &mapping->flags);
 750	return err;
 751}
 752EXPORT_SYMBOL(file_check_and_advance_wb_err);
 753
 754/**
 755 * file_write_and_wait_range - write out & wait on a file range
 756 * @file:	file pointing to address_space with pages
 757 * @lstart:	offset in bytes where the range starts
 758 * @lend:	offset in bytes where the range ends (inclusive)
 759 *
 760 * Write out and wait upon file offsets lstart->lend, inclusive.
 761 *
 762 * Note that @lend is inclusive (describes the last byte to be written) so
 763 * that this function can be used to write to the very end-of-file (end = -1).
 764 *
 765 * After writing out and waiting on the data, we check and advance the
 766 * f_wb_err cursor to the latest value, and return any errors detected there.
 767 *
 768 * Return: %0 on success, negative error code otherwise.
 769 */
 770int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
 
 771{
 772	int err = 0, err2;
 773	struct address_space *mapping = file->f_mapping;
 774
 775	if (lend < lstart)
 776		return 0;
 777
 778	if (mapping_needs_writeback(mapping)) {
 
 779		err = __filemap_fdatawrite_range(mapping, lstart, lend,
 780						 WB_SYNC_ALL);
 781		/* See comment of filemap_write_and_wait() */
 782		if (err != -EIO)
 783			__filemap_fdatawait_range(mapping, lstart, lend);
 
 
 
 
 
 
 784	}
 785	err2 = file_check_and_advance_wb_err(file);
 786	if (!err)
 787		err = err2;
 788	return err;
 789}
 790EXPORT_SYMBOL(file_write_and_wait_range);
 791
 792/**
 793 * replace_page_cache_folio - replace a pagecache folio with a new one
 794 * @old:	folio to be replaced
 795 * @new:	folio to replace with
 796 *
 797 * This function replaces a folio in the pagecache with a new one.  On
 798 * success it acquires the pagecache reference for the new folio and
 799 * drops it for the old folio.  Both the old and new folios must be
 800 * locked.  This function does not add the new folio to the LRU, the
 
 801 * caller must do that.
 802 *
 803 * The remove + add is atomic.  This function cannot fail.
 
 804 */
 805void replace_page_cache_folio(struct folio *old, struct folio *new)
 806{
 807	struct address_space *mapping = old->mapping;
 808	void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
 809	pgoff_t offset = old->index;
 810	XA_STATE(xas, &mapping->i_pages, offset);
 811
 812	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
 813	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
 814	VM_BUG_ON_FOLIO(new->mapping, new);
 815
 816	folio_get(new);
 817	new->mapping = mapping;
 818	new->index = offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 819
 820	mem_cgroup_replace_folio(old, new);
 
 
 821
 822	xas_lock_irq(&xas);
 823	xas_store(&xas, new);
 
 
 
 
 
 
 824
 825	old->mapping = NULL;
 826	/* hugetlb pages do not participate in page cache accounting. */
 827	if (!folio_test_hugetlb(old))
 828		__lruvec_stat_sub_folio(old, NR_FILE_PAGES);
 829	if (!folio_test_hugetlb(new))
 830		__lruvec_stat_add_folio(new, NR_FILE_PAGES);
 831	if (folio_test_swapbacked(old))
 832		__lruvec_stat_sub_folio(old, NR_SHMEM);
 833	if (folio_test_swapbacked(new))
 834		__lruvec_stat_add_folio(new, NR_SHMEM);
 835	xas_unlock_irq(&xas);
 836	if (free_folio)
 837		free_folio(old);
 838	folio_put(old);
 839}
 840EXPORT_SYMBOL_GPL(replace_page_cache_folio);
 841
 842noinline int __filemap_add_folio(struct address_space *mapping,
 843		struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
 844{
 845	XA_STATE(xas, &mapping->i_pages, index);
 846	int huge = folio_test_hugetlb(folio);
 847	bool charged = false;
 848	long nr = 1;
 849
 850	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 851	VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
 852	mapping_set_update(&xas, mapping);
 853
 854	if (!huge) {
 855		int error = mem_cgroup_charge(folio, NULL, gfp);
 
 856		if (error)
 857			return error;
 858		charged = true;
 859	}
 860
 861	VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
 862	xas_set_order(&xas, index, folio_order(folio));
 863	nr = folio_nr_pages(folio);
 864
 865	gfp &= GFP_RECLAIM_MASK;
 866	folio_ref_add(folio, nr);
 867	folio->mapping = mapping;
 868	folio->index = xas.xa_index;
 869
 870	do {
 871		unsigned int order = xa_get_order(xas.xa, xas.xa_index);
 872		void *entry, *old = NULL;
 873
 874		if (order > folio_order(folio))
 875			xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
 876					order, gfp);
 877		xas_lock_irq(&xas);
 878		xas_for_each_conflict(&xas, entry) {
 879			old = entry;
 880			if (!xa_is_value(entry)) {
 881				xas_set_err(&xas, -EEXIST);
 882				goto unlock;
 883			}
 884		}
 885
 886		if (old) {
 887			if (shadowp)
 888				*shadowp = old;
 889			/* entry may have been split before we acquired lock */
 890			order = xa_get_order(xas.xa, xas.xa_index);
 891			if (order > folio_order(folio)) {
 892				/* How to handle large swap entries? */
 893				BUG_ON(shmem_mapping(mapping));
 894				xas_split(&xas, old, order);
 895				xas_reset(&xas);
 896			}
 897		}
 898
 899		xas_store(&xas, folio);
 900		if (xas_error(&xas))
 901			goto unlock;
 902
 903		mapping->nrpages += nr;
 904
 905		/* hugetlb pages do not participate in page cache accounting */
 906		if (!huge) {
 907			__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
 908			if (folio_test_pmd_mappable(folio))
 909				__lruvec_stat_mod_folio(folio,
 910						NR_FILE_THPS, nr);
 911		}
 912unlock:
 913		xas_unlock_irq(&xas);
 914	} while (xas_nomem(&xas, gfp));
 915
 916	if (xas_error(&xas))
 917		goto error;
 
 
 
 
 
 
 
 918
 919	trace_mm_filemap_add_to_page_cache(folio);
 
 
 
 
 
 
 920	return 0;
 921error:
 922	if (charged)
 923		mem_cgroup_uncharge(folio);
 924	folio->mapping = NULL;
 925	/* Leave page->index set: truncation relies upon it */
 926	folio_put_refs(folio, nr);
 927	return xas_error(&xas);
 
 
 
 928}
 929ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
 930
 931int filemap_add_folio(struct address_space *mapping, struct folio *folio,
 932				pgoff_t index, gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 933{
 934	void *shadow = NULL;
 935	int ret;
 936
 937	__folio_set_locked(folio);
 938	ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
 
 939	if (unlikely(ret))
 940		__folio_clear_locked(folio);
 941	else {
 942		/*
 943		 * The folio might have been evicted from cache only
 944		 * recently, in which case it should be activated like
 945		 * any other repeatedly accessed folio.
 946		 * The exception is folios getting rewritten; evicting other
 947		 * data from the working set, only to cache data that will
 948		 * get overwritten with something else, is a waste of memory.
 949		 */
 950		WARN_ON_ONCE(folio_test_active(folio));
 951		if (!(gfp & __GFP_WRITE) && shadow)
 952			workingset_refault(folio, shadow);
 953		folio_add_lru(folio);
 
 
 
 954	}
 955	return ret;
 956}
 957EXPORT_SYMBOL_GPL(filemap_add_folio);
 958
 959#ifdef CONFIG_NUMA
 960struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
 961{
 962	int n;
 963	struct folio *folio;
 964
 965	if (cpuset_do_page_mem_spread()) {
 966		unsigned int cpuset_mems_cookie;
 967		do {
 968			cpuset_mems_cookie = read_mems_allowed_begin();
 969			n = cpuset_mem_spread_node();
 970			folio = __folio_alloc_node(gfp, order, n);
 971		} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
 972
 973		return folio;
 974	}
 975	return folio_alloc(gfp, order);
 976}
 977EXPORT_SYMBOL(filemap_alloc_folio);
 978#endif
 979
 980/*
 981 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
 982 *
 983 * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
 984 *
 985 * @mapping1: the first mapping to lock
 986 * @mapping2: the second mapping to lock
 987 */
 988void filemap_invalidate_lock_two(struct address_space *mapping1,
 989				 struct address_space *mapping2)
 990{
 991	if (mapping1 > mapping2)
 992		swap(mapping1, mapping2);
 993	if (mapping1)
 994		down_write(&mapping1->invalidate_lock);
 995	if (mapping2 && mapping1 != mapping2)
 996		down_write_nested(&mapping2->invalidate_lock, 1);
 997}
 998EXPORT_SYMBOL(filemap_invalidate_lock_two);
 999
1000/*
1001 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
1002 *
1003 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1004 *
1005 * @mapping1: the first mapping to unlock
1006 * @mapping2: the second mapping to unlock
1007 */
1008void filemap_invalidate_unlock_two(struct address_space *mapping1,
1009				   struct address_space *mapping2)
1010{
1011	if (mapping1)
1012		up_write(&mapping1->invalidate_lock);
1013	if (mapping2 && mapping1 != mapping2)
1014		up_write(&mapping2->invalidate_lock);
1015}
1016EXPORT_SYMBOL(filemap_invalidate_unlock_two);
1017
1018/*
1019 * In order to wait for pages to become available there must be
1020 * waitqueues associated with pages. By using a hash table of
1021 * waitqueues where the bucket discipline is to maintain all
1022 * waiters on the same queue and wake all when any of the pages
1023 * become available, and for the woken contexts to check to be
1024 * sure the appropriate page became available, this saves space
1025 * at a cost of "thundering herd" phenomena during rare hash
1026 * collisions.
1027 */
1028#define PAGE_WAIT_TABLE_BITS 8
1029#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1030static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1031
1032static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1033{
1034	return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1035}
1036
1037void __init pagecache_init(void)
1038{
1039	int i;
1040
1041	for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1042		init_waitqueue_head(&folio_wait_table[i]);
1043
1044	page_writeback_init();
1045}
1046
1047/*
1048 * The page wait code treats the "wait->flags" somewhat unusually, because
1049 * we have multiple different kinds of waits, not just the usual "exclusive"
1050 * one.
1051 *
1052 * We have:
1053 *
1054 *  (a) no special bits set:
1055 *
1056 *	We're just waiting for the bit to be released, and when a waker
1057 *	calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1058 *	and remove it from the wait queue.
1059 *
1060 *	Simple and straightforward.
1061 *
1062 *  (b) WQ_FLAG_EXCLUSIVE:
1063 *
1064 *	The waiter is waiting to get the lock, and only one waiter should
1065 *	be woken up to avoid any thundering herd behavior. We'll set the
1066 *	WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1067 *
1068 *	This is the traditional exclusive wait.
1069 *
1070 *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1071 *
1072 *	The waiter is waiting to get the bit, and additionally wants the
1073 *	lock to be transferred to it for fair lock behavior. If the lock
1074 *	cannot be taken, we stop walking the wait queue without waking
1075 *	the waiter.
1076 *
1077 *	This is the "fair lock handoff" case, and in addition to setting
1078 *	WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1079 *	that it now has the lock.
1080 */
1081static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1082{
1083	unsigned int flags;
1084	struct wait_page_key *key = arg;
1085	struct wait_page_queue *wait_page
1086		= container_of(wait, struct wait_page_queue, wait);
1087
1088	if (!wake_page_match(wait_page, key))
1089		return 0;
1090
1091	/*
1092	 * If it's a lock handoff wait, we get the bit for it, and
1093	 * stop walking (and do not wake it up) if we can't.
1094	 */
1095	flags = wait->flags;
1096	if (flags & WQ_FLAG_EXCLUSIVE) {
1097		if (test_bit(key->bit_nr, &key->folio->flags))
1098			return -1;
1099		if (flags & WQ_FLAG_CUSTOM) {
1100			if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1101				return -1;
1102			flags |= WQ_FLAG_DONE;
1103		}
1104	}
1105
1106	/*
1107	 * We are holding the wait-queue lock, but the waiter that
1108	 * is waiting for this will be checking the flags without
1109	 * any locking.
1110	 *
1111	 * So update the flags atomically, and wake up the waiter
1112	 * afterwards to avoid any races. This store-release pairs
1113	 * with the load-acquire in folio_wait_bit_common().
1114	 */
1115	smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1116	wake_up_state(wait->private, mode);
1117
1118	/*
1119	 * Ok, we have successfully done what we're waiting for,
1120	 * and we can unconditionally remove the wait entry.
1121	 *
1122	 * Note that this pairs with the "finish_wait()" in the
1123	 * waiter, and has to be the absolute last thing we do.
1124	 * After this list_del_init(&wait->entry) the wait entry
1125	 * might be de-allocated and the process might even have
1126	 * exited.
1127	 */
1128	list_del_init_careful(&wait->entry);
1129	return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1130}
1131
1132static void folio_wake_bit(struct folio *folio, int bit_nr)
1133{
1134	wait_queue_head_t *q = folio_waitqueue(folio);
1135	struct wait_page_key key;
1136	unsigned long flags;
1137
1138	key.folio = folio;
1139	key.bit_nr = bit_nr;
1140	key.page_match = 0;
1141
1142	spin_lock_irqsave(&q->lock, flags);
1143	__wake_up_locked_key(q, TASK_NORMAL, &key);
1144
1145	/*
1146	 * It's possible to miss clearing waiters here, when we woke our page
1147	 * waiters, but the hashed waitqueue has waiters for other pages on it.
1148	 * That's okay, it's a rare case. The next waker will clear it.
1149	 *
1150	 * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,
1151	 * other), the flag may be cleared in the course of freeing the page;
1152	 * but that is not required for correctness.
1153	 */
1154	if (!waitqueue_active(q) || !key.page_match)
1155		folio_clear_waiters(folio);
1156
 
 
 
 
 
 
 
1157	spin_unlock_irqrestore(&q->lock, flags);
1158}
 
1159
1160/*
1161 * A choice of three behaviors for folio_wait_bit_common():
1162 */
1163enum behavior {
1164	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
1165			 * __folio_lock() waiting on then setting PG_locked.
1166			 */
1167	SHARED,		/* Hold ref to page and check the bit when woken, like
1168			 * folio_wait_writeback() waiting on PG_writeback.
1169			 */
1170	DROP,		/* Drop ref to page before wait, no check when woken,
1171			 * like folio_put_wait_locked() on PG_locked.
1172			 */
1173};
1174
1175/*
1176 * Attempt to check (or get) the folio flag, and mark us done
1177 * if successful.
1178 */
1179static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1180					struct wait_queue_entry *wait)
1181{
1182	if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1183		if (test_and_set_bit(bit_nr, &folio->flags))
1184			return false;
1185	} else if (test_bit(bit_nr, &folio->flags))
1186		return false;
1187
1188	wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1189	return true;
1190}
1191
1192/* How many times do we accept lock stealing from under a waiter? */
1193int sysctl_page_lock_unfairness = 5;
1194
1195static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1196		int state, enum behavior behavior)
1197{
1198	wait_queue_head_t *q = folio_waitqueue(folio);
1199	int unfairness = sysctl_page_lock_unfairness;
1200	struct wait_page_queue wait_page;
1201	wait_queue_entry_t *wait = &wait_page.wait;
1202	bool thrashing = false;
1203	unsigned long pflags;
1204	bool in_thrashing;
1205
1206	if (bit_nr == PG_locked &&
1207	    !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1208		delayacct_thrashing_start(&in_thrashing);
1209		psi_memstall_enter(&pflags);
1210		thrashing = true;
1211	}
1212
1213	init_wait(wait);
1214	wait->func = wake_page_function;
1215	wait_page.folio = folio;
1216	wait_page.bit_nr = bit_nr;
1217
1218repeat:
1219	wait->flags = 0;
1220	if (behavior == EXCLUSIVE) {
1221		wait->flags = WQ_FLAG_EXCLUSIVE;
1222		if (--unfairness < 0)
1223			wait->flags |= WQ_FLAG_CUSTOM;
1224	}
1225
1226	/*
1227	 * Do one last check whether we can get the
1228	 * page bit synchronously.
1229	 *
1230	 * Do the folio_set_waiters() marking before that
1231	 * to let any waker we _just_ missed know they
1232	 * need to wake us up (otherwise they'll never
1233	 * even go to the slow case that looks at the
1234	 * page queue), and add ourselves to the wait
1235	 * queue if we need to sleep.
1236	 *
1237	 * This part needs to be done under the queue
1238	 * lock to avoid races.
1239	 */
1240	spin_lock_irq(&q->lock);
1241	folio_set_waiters(folio);
1242	if (!folio_trylock_flag(folio, bit_nr, wait))
1243		__add_wait_queue_entry_tail(q, wait);
1244	spin_unlock_irq(&q->lock);
1245
1246	/*
1247	 * From now on, all the logic will be based on
1248	 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1249	 * see whether the page bit testing has already
1250	 * been done by the wake function.
1251	 *
1252	 * We can drop our reference to the folio.
1253	 */
1254	if (behavior == DROP)
1255		folio_put(folio);
1256
1257	/*
1258	 * Note that until the "finish_wait()", or until
1259	 * we see the WQ_FLAG_WOKEN flag, we need to
1260	 * be very careful with the 'wait->flags', because
1261	 * we may race with a waker that sets them.
1262	 */
1263	for (;;) {
1264		unsigned int flags;
 
 
 
 
 
 
 
 
1265
1266		set_current_state(state);
1267
1268		/* Loop until we've been woken or interrupted */
1269		flags = smp_load_acquire(&wait->flags);
1270		if (!(flags & WQ_FLAG_WOKEN)) {
1271			if (signal_pending_state(state, current))
1272				break;
1273
 
1274			io_schedule();
1275			continue;
 
 
 
1276		}
1277
1278		/* If we were non-exclusive, we're done */
1279		if (behavior != EXCLUSIVE)
1280			break;
1281
1282		/* If the waker got the lock for us, we're done */
1283		if (flags & WQ_FLAG_DONE)
1284			break;
1285
1286		/*
1287		 * Otherwise, if we're getting the lock, we need to
1288		 * try to get it ourselves.
1289		 *
1290		 * And if that fails, we'll have to retry this all.
1291		 */
1292		if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1293			goto repeat;
1294
1295		wait->flags |= WQ_FLAG_DONE;
1296		break;
1297	}
1298
1299	/*
1300	 * If a signal happened, this 'finish_wait()' may remove the last
1301	 * waiter from the wait-queues, but the folio waiters bit will remain
1302	 * set. That's ok. The next wakeup will take care of it, and trying
1303	 * to do it here would be difficult and prone to races.
1304	 */
1305	finish_wait(q, wait);
1306
1307	if (thrashing) {
1308		delayacct_thrashing_end(&in_thrashing);
1309		psi_memstall_leave(&pflags);
1310	}
1311
1312	/*
1313	 * NOTE! The wait->flags weren't stable until we've done the
1314	 * 'finish_wait()', and we could have exited the loop above due
1315	 * to a signal, and had a wakeup event happen after the signal
1316	 * test but before the 'finish_wait()'.
1317	 *
1318	 * So only after the finish_wait() can we reliably determine
1319	 * if we got woken up or not, so we can now figure out the final
1320	 * return value based on that state without races.
1321	 *
1322	 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1323	 * waiter, but an exclusive one requires WQ_FLAG_DONE.
1324	 */
1325	if (behavior == EXCLUSIVE)
1326		return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1327
1328	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1329}
1330
1331#ifdef CONFIG_MIGRATION
1332/**
1333 * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1334 * @entry: migration swap entry.
1335 * @ptl: already locked ptl. This function will drop the lock.
1336 *
1337 * Wait for a migration entry referencing the given page to be removed. This is
1338 * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
1339 * this can be called without taking a reference on the page. Instead this
1340 * should be called while holding the ptl for the migration entry referencing
1341 * the page.
1342 *
1343 * Returns after unlocking the ptl.
1344 *
1345 * This follows the same logic as folio_wait_bit_common() so see the comments
1346 * there.
1347 */
1348void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
1349	__releases(ptl)
1350{
1351	struct wait_page_queue wait_page;
1352	wait_queue_entry_t *wait = &wait_page.wait;
1353	bool thrashing = false;
1354	unsigned long pflags;
1355	bool in_thrashing;
1356	wait_queue_head_t *q;
1357	struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
1358
1359	q = folio_waitqueue(folio);
1360	if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1361		delayacct_thrashing_start(&in_thrashing);
1362		psi_memstall_enter(&pflags);
1363		thrashing = true;
1364	}
1365
1366	init_wait(wait);
1367	wait->func = wake_page_function;
1368	wait_page.folio = folio;
1369	wait_page.bit_nr = PG_locked;
1370	wait->flags = 0;
1371
1372	spin_lock_irq(&q->lock);
1373	folio_set_waiters(folio);
1374	if (!folio_trylock_flag(folio, PG_locked, wait))
1375		__add_wait_queue_entry_tail(q, wait);
1376	spin_unlock_irq(&q->lock);
1377
1378	/*
1379	 * If a migration entry exists for the page the migration path must hold
1380	 * a valid reference to the page, and it must take the ptl to remove the
1381	 * migration entry. So the page is valid until the ptl is dropped.
 
 
1382	 */
1383	spin_unlock(ptl);
1384
1385	for (;;) {
1386		unsigned int flags;
1387
1388		set_current_state(TASK_UNINTERRUPTIBLE);
1389
1390		/* Loop until we've been woken or interrupted */
1391		flags = smp_load_acquire(&wait->flags);
1392		if (!(flags & WQ_FLAG_WOKEN)) {
1393			if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
1394				break;
1395
1396			io_schedule();
1397			continue;
1398		}
1399		break;
1400	}
1401
1402	finish_wait(q, wait);
1403
1404	if (thrashing) {
1405		delayacct_thrashing_end(&in_thrashing);
1406		psi_memstall_leave(&pflags);
1407	}
1408}
1409#endif
1410
1411void folio_wait_bit(struct folio *folio, int bit_nr)
1412{
1413	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
 
1414}
1415EXPORT_SYMBOL(folio_wait_bit);
1416
1417int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1418{
1419	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
 
1420}
1421EXPORT_SYMBOL(folio_wait_bit_killable);
1422
1423/**
1424 * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1425 * @folio: The folio to wait for.
1426 * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
1427 *
1428 * The caller should hold a reference on @folio.  They expect the page to
1429 * become unlocked relatively soon, but do not wish to hold up migration
1430 * (for example) by holding the reference while waiting for the folio to
1431 * come unlocked.  After this function returns, the caller should not
1432 * dereference @folio.
1433 *
1434 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1435 */
1436static int folio_put_wait_locked(struct folio *folio, int state)
1437{
1438	return folio_wait_bit_common(folio, PG_locked, state, DROP);
1439}
1440
1441/**
1442 * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1443 * @folio: Folio defining the wait queue of interest
1444 * @waiter: Waiter to add to the queue
1445 *
1446 * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1447 */
1448void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1449{
1450	wait_queue_head_t *q = folio_waitqueue(folio);
1451	unsigned long flags;
1452
1453	spin_lock_irqsave(&q->lock, flags);
1454	__add_wait_queue_entry_tail(q, waiter);
1455	folio_set_waiters(folio);
1456	spin_unlock_irqrestore(&q->lock, flags);
1457}
1458EXPORT_SYMBOL_GPL(folio_add_wait_queue);
1459
1460/**
1461 * folio_unlock - Unlock a locked folio.
1462 * @folio: The folio.
1463 *
1464 * Unlocks the folio and wakes up any thread sleeping on the page lock.
1465 *
1466 * Context: May be called from interrupt or process context.  May not be
1467 * called from NMI context.
1468 */
1469void folio_unlock(struct folio *folio)
1470{
1471	/* Bit 7 allows x86 to check the byte's sign bit */
1472	BUILD_BUG_ON(PG_waiters != 7);
1473	BUILD_BUG_ON(PG_locked > 7);
1474	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1475	if (folio_xor_flags_has_waiters(folio, 1 << PG_locked))
1476		folio_wake_bit(folio, PG_locked);
1477}
1478EXPORT_SYMBOL(folio_unlock);
1479
1480/**
1481 * folio_end_read - End read on a folio.
1482 * @folio: The folio.
1483 * @success: True if all reads completed successfully.
1484 *
1485 * When all reads against a folio have completed, filesystems should
1486 * call this function to let the pagecache know that no more reads
1487 * are outstanding.  This will unlock the folio and wake up any thread
1488 * sleeping on the lock.  The folio will also be marked uptodate if all
1489 * reads succeeded.
1490 *
1491 * Context: May be called from interrupt or process context.  May not be
1492 * called from NMI context.
1493 */
1494void folio_end_read(struct folio *folio, bool success)
1495{
1496	unsigned long mask = 1 << PG_locked;
1497
1498	/* Must be in bottom byte for x86 to work */
1499	BUILD_BUG_ON(PG_uptodate > 7);
1500	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1501	VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
1502
1503	if (likely(success))
1504		mask |= 1 << PG_uptodate;
1505	if (folio_xor_flags_has_waiters(folio, mask))
1506		folio_wake_bit(folio, PG_locked);
1507}
1508EXPORT_SYMBOL(folio_end_read);
1509
1510/**
1511 * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1512 * @folio: The folio.
1513 *
1514 * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1515 * it.  The folio reference held for PG_private_2 being set is released.
 
 
1516 *
1517 * This is, for example, used when a netfs folio is being written to a local
1518 * disk cache, thereby allowing writes to the cache for the same folio to be
1519 * serialised.
1520 */
1521void folio_end_private_2(struct folio *folio)
1522{
1523	VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1524	clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1525	folio_wake_bit(folio, PG_private_2);
1526	folio_put(folio);
1527}
1528EXPORT_SYMBOL(folio_end_private_2);
 
1529
1530/**
1531 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1532 * @folio: The folio to wait on.
1533 *
1534 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio.
 
 
 
 
 
 
 
 
 
1535 */
1536void folio_wait_private_2(struct folio *folio)
1537{
1538	while (folio_test_private_2(folio))
1539		folio_wait_bit(folio, PG_private_2);
 
 
 
1540}
1541EXPORT_SYMBOL(folio_wait_private_2);
1542
1543/**
1544 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1545 * @folio: The folio to wait on.
1546 *
1547 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a
1548 * fatal signal is received by the calling task.
1549 *
1550 * Return:
1551 * - 0 if successful.
1552 * - -EINTR if a fatal signal was encountered.
1553 */
1554int folio_wait_private_2_killable(struct folio *folio)
1555{
1556	int ret = 0;
1557
1558	while (folio_test_private_2(folio)) {
1559		ret = folio_wait_bit_killable(folio, PG_private_2);
1560		if (ret < 0)
1561			break;
 
 
 
 
1562	}
1563
1564	return ret;
 
 
 
 
1565}
1566EXPORT_SYMBOL(folio_wait_private_2_killable);
1567
1568/**
1569 * folio_end_writeback - End writeback against a folio.
1570 * @folio: The folio.
1571 *
1572 * The folio must actually be under writeback.
1573 *
1574 * Context: May be called from process or interrupt context.
1575 */
1576void folio_end_writeback(struct folio *folio)
1577{
1578	VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
 
 
 
 
 
 
 
 
 
 
1579
1580	/*
1581	 * folio_test_clear_reclaim() could be used here but it is an
1582	 * atomic operation and overkill in this particular case. Failing
1583	 * to shuffle a folio marked for immediate reclaim is too mild
1584	 * a gain to justify taking an atomic operation penalty at the
1585	 * end of every folio writeback.
1586	 */
1587	if (folio_test_reclaim(folio)) {
1588		folio_clear_reclaim(folio);
1589		folio_rotate_reclaimable(folio);
1590	}
1591
1592	/*
1593	 * Writeback does not hold a folio reference of its own, relying
1594	 * on truncation to wait for the clearing of PG_writeback.
1595	 * But here we must make sure that the folio is not freed and
1596	 * reused before the folio_wake_bit().
1597	 */
1598	folio_get(folio);
1599	if (__folio_end_writeback(folio))
1600		folio_wake_bit(folio, PG_writeback);
1601	acct_reclaim_writeback(folio);
1602	folio_put(folio);
1603}
1604EXPORT_SYMBOL(folio_end_writeback);
1605
1606/**
1607 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1608 * @folio: The folio to lock
1609 */
1610void __folio_lock(struct folio *folio)
1611{
1612	folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1613				EXCLUSIVE);
 
1614}
1615EXPORT_SYMBOL(__folio_lock);
1616
1617int __folio_lock_killable(struct folio *folio)
1618{
1619	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1620					EXCLUSIVE);
1621}
1622EXPORT_SYMBOL_GPL(__folio_lock_killable);
1623
1624static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1625{
1626	struct wait_queue_head *q = folio_waitqueue(folio);
1627	int ret;
1628
1629	wait->folio = folio;
1630	wait->bit_nr = PG_locked;
1631
1632	spin_lock_irq(&q->lock);
1633	__add_wait_queue_entry_tail(q, &wait->wait);
1634	folio_set_waiters(folio);
1635	ret = !folio_trylock(folio);
1636	/*
1637	 * If we were successful now, we know we're still on the
1638	 * waitqueue as we're still under the lock. This means it's
1639	 * safe to remove and return success, we know the callback
1640	 * isn't going to trigger.
1641	 */
1642	if (!ret)
1643		__remove_wait_queue(q, &wait->wait);
1644	else
1645		ret = -EIOCBQUEUED;
1646	spin_unlock_irq(&q->lock);
1647	return ret;
1648}
 
1649
1650/*
1651 * Return values:
1652 * 0 - folio is locked.
1653 * non-zero - folio is not locked.
1654 *     mmap_lock or per-VMA lock has been released (mmap_read_unlock() or
1655 *     vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and
1656 *     FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held.
1657 *
1658 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0
1659 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1660 */
1661vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)
 
1662{
1663	unsigned int flags = vmf->flags;
1664
1665	if (fault_flag_allow_retry_first(flags)) {
1666		/*
1667		 * CAUTION! In this case, mmap_lock/per-VMA lock is not
1668		 * released even though returning VM_FAULT_RETRY.
1669		 */
1670		if (flags & FAULT_FLAG_RETRY_NOWAIT)
1671			return VM_FAULT_RETRY;
1672
1673		release_fault_lock(vmf);
1674		if (flags & FAULT_FLAG_KILLABLE)
1675			folio_wait_locked_killable(folio);
1676		else
1677			folio_wait_locked(folio);
1678		return VM_FAULT_RETRY;
1679	}
1680	if (flags & FAULT_FLAG_KILLABLE) {
1681		bool ret;
1682
1683		ret = __folio_lock_killable(folio);
1684		if (ret) {
1685			release_fault_lock(vmf);
1686			return VM_FAULT_RETRY;
1687		}
1688	} else {
1689		__folio_lock(folio);
1690	}
1691
1692	return 0;
 
 
 
 
 
 
 
 
1693}
1694
1695/**
1696 * page_cache_next_miss() - Find the next gap in the page cache.
1697 * @mapping: Mapping.
1698 * @index: Index.
1699 * @max_scan: Maximum range to search.
1700 *
1701 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1702 * gap with the lowest index.
1703 *
1704 * This function may be called under the rcu_read_lock.  However, this will
1705 * not atomically search a snapshot of the cache at a single point in time.
1706 * For example, if a gap is created at index 5, then subsequently a gap is
1707 * created at index 10, page_cache_next_miss covering both indices may
1708 * return 10 if called under the rcu_read_lock.
1709 *
1710 * Return: The index of the gap if found, otherwise an index outside the
1711 * range specified (in which case 'return - index >= max_scan' will be true).
1712 * In the rare case of index wrap-around, 0 will be returned.
 
 
1713 */
1714pgoff_t page_cache_next_miss(struct address_space *mapping,
1715			     pgoff_t index, unsigned long max_scan)
1716{
1717	XA_STATE(xas, &mapping->i_pages, index);
 
 
 
1718
1719	while (max_scan--) {
1720		void *entry = xas_next(&xas);
1721		if (!entry || xa_is_value(entry))
1722			break;
1723		if (xas.xa_index == 0)
 
1724			break;
1725	}
1726
1727	return xas.xa_index;
1728}
1729EXPORT_SYMBOL(page_cache_next_miss);
1730
1731/**
1732 * page_cache_prev_miss() - Find the previous gap in the page cache.
1733 * @mapping: Mapping.
1734 * @index: Index.
1735 * @max_scan: Maximum range to search.
1736 *
1737 * Search the range [max(index - max_scan + 1, 0), index] for the
1738 * gap with the highest index.
1739 *
1740 * This function may be called under the rcu_read_lock.  However, this will
1741 * not atomically search a snapshot of the cache at a single point in time.
1742 * For example, if a gap is created at index 10, then subsequently a gap is
1743 * created at index 5, page_cache_prev_miss() covering both indices may
1744 * return 5 if called under the rcu_read_lock.
1745 *
1746 * Return: The index of the gap if found, otherwise an index outside the
1747 * range specified (in which case 'index - return >= max_scan' will be true).
1748 * In the rare case of wrap-around, ULONG_MAX will be returned.
 
 
1749 */
1750pgoff_t page_cache_prev_miss(struct address_space *mapping,
1751			     pgoff_t index, unsigned long max_scan)
1752{
1753	XA_STATE(xas, &mapping->i_pages, index);
1754
1755	while (max_scan--) {
1756		void *entry = xas_prev(&xas);
1757		if (!entry || xa_is_value(entry))
 
 
1758			break;
1759		if (xas.xa_index == ULONG_MAX)
 
1760			break;
1761	}
1762
1763	return xas.xa_index;
1764}
1765EXPORT_SYMBOL(page_cache_prev_miss);
1766
1767/*
1768 * Lockless page cache protocol:
1769 * On the lookup side:
1770 * 1. Load the folio from i_pages
1771 * 2. Increment the refcount if it's not zero
1772 * 3. If the folio is not found by xas_reload(), put the refcount and retry
1773 *
1774 * On the removal side:
1775 * A. Freeze the page (by zeroing the refcount if nobody else has a reference)
1776 * B. Remove the page from i_pages
1777 * C. Return the page to the page allocator
1778 *
1779 * This means that any page may have its reference count temporarily
1780 * increased by a speculative page cache (or fast GUP) lookup as it can
1781 * be allocated by another user before the RCU grace period expires.
1782 * Because the refcount temporarily acquired here may end up being the
1783 * last refcount on the page, any page allocation must be freeable by
1784 * folio_put().
1785 */
1786
1787/*
1788 * filemap_get_entry - Get a page cache entry.
1789 * @mapping: the address_space to search
1790 * @index: The page cache index.
 
 
 
1791 *
1792 * Looks up the page cache entry at @mapping & @index.  If it is a folio,
1793 * it is returned with an increased refcount.  If it is a shadow entry
1794 * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1795 * it is returned without further action.
1796 *
1797 * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1798 */
1799void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
1800{
1801	XA_STATE(xas, &mapping->i_pages, index);
1802	struct folio *folio;
1803
1804	rcu_read_lock();
1805repeat:
1806	xas_reset(&xas);
1807	folio = xas_load(&xas);
1808	if (xas_retry(&xas, folio))
1809		goto repeat;
1810	/*
1811	 * A shadow entry of a recently evicted page, or a swap entry from
1812	 * shmem/tmpfs.  Return it without attempting to raise page count.
1813	 */
1814	if (!folio || xa_is_value(folio))
1815		goto out;
 
 
 
 
 
 
1816
1817	if (!folio_try_get_rcu(folio))
1818		goto repeat;
 
1819
1820	if (unlikely(folio != xas_reload(&xas))) {
1821		folio_put(folio);
1822		goto repeat;
 
 
 
 
 
 
 
 
 
 
 
 
1823	}
1824out:
1825	rcu_read_unlock();
1826
1827	return folio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1828}
 
1829
1830/**
1831 * __filemap_get_folio - Find and get a reference to a folio.
1832 * @mapping: The address_space to search.
1833 * @index: The page index.
1834 * @fgp_flags: %FGP flags modify how the folio is returned.
1835 * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
 
 
1836 *
1837 * Looks up the page cache entry at @mapping & @index.
1838 *
1839 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1840 * if the %GFP flags specified for %FGP_CREAT are atomic.
 
 
 
 
1841 *
1842 * If this function returns a folio, it is returned with an increased refcount.
 
1843 *
1844 * Return: The found folio or an ERR_PTR() otherwise.
1845 */
1846struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1847		fgf_t fgp_flags, gfp_t gfp)
1848{
1849	struct folio *folio;
1850
1851repeat:
1852	folio = filemap_get_entry(mapping, index);
1853	if (xa_is_value(folio))
1854		folio = NULL;
1855	if (!folio)
1856		goto no_page;
1857
1858	if (fgp_flags & FGP_LOCK) {
1859		if (fgp_flags & FGP_NOWAIT) {
1860			if (!folio_trylock(folio)) {
1861				folio_put(folio);
1862				return ERR_PTR(-EAGAIN);
1863			}
1864		} else {
1865			folio_lock(folio);
1866		}
1867
1868		/* Has the page been truncated? */
1869		if (unlikely(folio->mapping != mapping)) {
1870			folio_unlock(folio);
1871			folio_put(folio);
1872			goto repeat;
1873		}
1874		VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1875	}
1876
1877	if (fgp_flags & FGP_ACCESSED)
1878		folio_mark_accessed(folio);
1879	else if (fgp_flags & FGP_WRITE) {
1880		/* Clear idle flag for buffer write */
1881		if (folio_test_idle(folio))
1882			folio_clear_idle(folio);
1883	}
1884
1885	if (fgp_flags & FGP_STABLE)
1886		folio_wait_stable(folio);
1887no_page:
1888	if (!folio && (fgp_flags & FGP_CREAT)) {
1889		unsigned order = FGF_GET_ORDER(fgp_flags);
1890		int err;
1891
1892		if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1893			gfp |= __GFP_WRITE;
1894		if (fgp_flags & FGP_NOFS)
1895			gfp &= ~__GFP_FS;
1896		if (fgp_flags & FGP_NOWAIT) {
1897			gfp &= ~GFP_KERNEL;
1898			gfp |= GFP_NOWAIT | __GFP_NOWARN;
1899		}
1900		if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1901			fgp_flags |= FGP_LOCK;
1902
1903		if (!mapping_large_folio_support(mapping))
1904			order = 0;
1905		if (order > MAX_PAGECACHE_ORDER)
1906			order = MAX_PAGECACHE_ORDER;
1907		/* If we're not aligned, allocate a smaller folio */
1908		if (index & ((1UL << order) - 1))
1909			order = __ffs(index);
1910
1911		do {
1912			gfp_t alloc_gfp = gfp;
1913
1914			err = -ENOMEM;
1915			if (order == 1)
1916				order = 0;
1917			if (order > 0)
1918				alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
1919			folio = filemap_alloc_folio(alloc_gfp, order);
1920			if (!folio)
1921				continue;
1922
1923			/* Init accessed so avoid atomic mark_page_accessed later */
1924			if (fgp_flags & FGP_ACCESSED)
1925				__folio_set_referenced(folio);
1926
1927			err = filemap_add_folio(mapping, folio, index, gfp);
1928			if (!err)
1929				break;
1930			folio_put(folio);
1931			folio = NULL;
1932		} while (order-- > 0);
1933
1934		if (err == -EEXIST)
1935			goto repeat;
1936		if (err)
1937			return ERR_PTR(err);
1938		/*
1939		 * filemap_add_folio locks the page, and for mmap
1940		 * we expect an unlocked page.
1941		 */
1942		if (folio && (fgp_flags & FGP_FOR_MMAP))
1943			folio_unlock(folio);
1944	}
1945
1946	if (!folio)
1947		return ERR_PTR(-ENOENT);
1948	return folio;
1949}
1950EXPORT_SYMBOL(__filemap_get_folio);
1951
1952static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1953		xa_mark_t mark)
1954{
1955	struct folio *folio;
1956
1957retry:
1958	if (mark == XA_PRESENT)
1959		folio = xas_find(xas, max);
1960	else
1961		folio = xas_find_marked(xas, max, mark);
1962
1963	if (xas_retry(xas, folio))
1964		goto retry;
1965	/*
1966	 * A shadow entry of a recently evicted page, a swap
1967	 * entry from shmem/tmpfs or a DAX entry.  Return it
1968	 * without attempting to raise page count.
1969	 */
1970	if (!folio || xa_is_value(folio))
1971		return folio;
1972
1973	if (!folio_try_get_rcu(folio))
1974		goto reset;
 
1975
1976	if (unlikely(folio != xas_reload(xas))) {
1977		folio_put(folio);
1978		goto reset;
 
 
 
 
 
1979	}
1980
1981	return folio;
1982reset:
1983	xas_reset(xas);
1984	goto retry;
1985}
 
1986
1987/**
1988 * find_get_entries - gang pagecache lookup
1989 * @mapping:	The address_space to search
1990 * @start:	The starting page cache index
1991 * @end:	The final page index (inclusive).
1992 * @fbatch:	Where the resulting entries are placed.
1993 * @indices:	The cache indices corresponding to the entries in @entries
1994 *
1995 * find_get_entries() will search for and return a batch of entries in
1996 * the mapping.  The entries are placed in @fbatch.  find_get_entries()
1997 * takes a reference on any actual folios it returns.
1998 *
1999 * The entries have ascending indexes.  The indices may not be consecutive
2000 * due to not-present entries or large folios.
 
 
2001 *
2002 * Any shadow entries of evicted folios, or swap entries from
2003 * shmem/tmpfs, are included in the returned array.
2004 *
2005 * Return: The number of entries which were found.
 
2006 */
2007unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
2008		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2009{
2010	XA_STATE(xas, &mapping->i_pages, *start);
2011	struct folio *folio;
 
 
 
 
 
2012
2013	rcu_read_lock();
2014	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2015		indices[fbatch->nr] = xas.xa_index;
2016		if (!folio_batch_add(fbatch, folio))
2017			break;
2018	}
2019	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
2020
2021	if (folio_batch_count(fbatch)) {
2022		unsigned long nr = 1;
2023		int idx = folio_batch_count(fbatch) - 1;
2024
2025		folio = fbatch->folios[idx];
2026		if (!xa_is_value(folio))
2027			nr = folio_nr_pages(folio);
2028		*start = indices[idx] + nr;
2029	}
2030	return folio_batch_count(fbatch);
2031}
2032
2033/**
2034 * find_lock_entries - Find a batch of pagecache entries.
2035 * @mapping:	The address_space to search.
2036 * @start:	The starting page cache index.
2037 * @end:	The final page index (inclusive).
2038 * @fbatch:	Where the resulting entries are placed.
2039 * @indices:	The cache indices of the entries in @fbatch.
2040 *
2041 * find_lock_entries() will return a batch of entries from @mapping.
2042 * Swap, shadow and DAX entries are included.  Folios are returned
2043 * locked and with an incremented refcount.  Folios which are locked
2044 * by somebody else or under writeback are skipped.  Folios which are
2045 * partially outside the range are not returned.
2046 *
2047 * The entries have ascending indexes.  The indices may not be consecutive
2048 * due to not-present entries, large folios, folios which could not be
2049 * locked or folios under writeback.
2050 *
2051 * Return: The number of entries which were found.
2052 */
2053unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
2054		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2055{
2056	XA_STATE(xas, &mapping->i_pages, *start);
2057	struct folio *folio;
2058
2059	rcu_read_lock();
2060	while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2061		if (!xa_is_value(folio)) {
2062			if (folio->index < *start)
2063				goto put;
2064			if (folio_next_index(folio) - 1 > end)
2065				goto put;
2066			if (!folio_trylock(folio))
2067				goto put;
2068			if (folio->mapping != mapping ||
2069			    folio_test_writeback(folio))
2070				goto unlock;
2071			VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2072					folio);
2073		}
2074		indices[fbatch->nr] = xas.xa_index;
2075		if (!folio_batch_add(fbatch, folio))
 
 
2076			break;
2077		continue;
2078unlock:
2079		folio_unlock(folio);
2080put:
2081		folio_put(folio);
2082	}
2083	rcu_read_unlock();
2084
2085	if (folio_batch_count(fbatch)) {
2086		unsigned long nr = 1;
2087		int idx = folio_batch_count(fbatch) - 1;
2088
2089		folio = fbatch->folios[idx];
2090		if (!xa_is_value(folio))
2091			nr = folio_nr_pages(folio);
2092		*start = indices[idx] + nr;
2093	}
2094	return folio_batch_count(fbatch);
2095}
2096
2097/**
2098 * filemap_get_folios - Get a batch of folios
2099 * @mapping:	The address_space to search
2100 * @start:	The starting page index
2101 * @end:	The final page index (inclusive)
2102 * @fbatch:	The batch to fill.
2103 *
2104 * Search for and return a batch of folios in the mapping starting at
2105 * index @start and up to index @end (inclusive).  The folios are returned
2106 * in @fbatch with an elevated reference count.
2107 *
2108 * Return: The number of folios which were found.
2109 * We also update @start to index the next folio for the traversal.
2110 */
2111unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
2112		pgoff_t end, struct folio_batch *fbatch)
2113{
2114	return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch);
2115}
2116EXPORT_SYMBOL(filemap_get_folios);
2117
2118/**
2119 * filemap_get_folios_contig - Get a batch of contiguous folios
2120 * @mapping:	The address_space to search
2121 * @start:	The starting page index
2122 * @end:	The final page index (inclusive)
2123 * @fbatch:	The batch to fill
2124 *
2125 * filemap_get_folios_contig() works exactly like filemap_get_folios(),
2126 * except the returned folios are guaranteed to be contiguous. This may
2127 * not return all contiguous folios if the batch gets filled up.
2128 *
2129 * Return: The number of folios found.
2130 * Also update @start to be positioned for traversal of the next folio.
2131 */
2132
2133unsigned filemap_get_folios_contig(struct address_space *mapping,
2134		pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2135{
2136	XA_STATE(xas, &mapping->i_pages, *start);
2137	unsigned long nr;
2138	struct folio *folio;
 
 
 
2139
2140	rcu_read_lock();
 
 
 
 
 
 
2141
2142	for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2143			folio = xas_next(&xas)) {
2144		if (xas_retry(&xas, folio))
 
 
 
 
 
 
 
2145			continue;
2146		/*
2147		 * If the entry has been swapped out, we can stop looking.
2148		 * No current caller is looking for DAX entries.
2149		 */
2150		if (xa_is_value(folio))
2151			goto update_start;
2152
2153		if (!folio_try_get_rcu(folio))
2154			goto retry;
 
2155
2156		if (unlikely(folio != xas_reload(&xas)))
2157			goto put_folio;
 
 
 
2158
2159		if (!folio_batch_add(fbatch, folio)) {
2160			nr = folio_nr_pages(folio);
2161			*start = folio->index + nr;
2162			goto out;
2163		}
2164		continue;
2165put_folio:
2166		folio_put(folio);
2167
2168retry:
2169		xas_reset(&xas);
 
2170	}
2171
2172update_start:
2173	nr = folio_batch_count(fbatch);
2174
2175	if (nr) {
2176		folio = fbatch->folios[nr - 1];
2177		*start = folio_next_index(folio);
2178	}
2179out:
2180	rcu_read_unlock();
2181	return folio_batch_count(fbatch);
2182}
2183EXPORT_SYMBOL(filemap_get_folios_contig);
2184
2185/**
2186 * filemap_get_folios_tag - Get a batch of folios matching @tag
2187 * @mapping:    The address_space to search
2188 * @start:      The starting page index
2189 * @end:        The final page index (inclusive)
2190 * @tag:        The tag index
2191 * @fbatch:     The batch to fill
2192 *
2193 * The first folio may start before @start; if it does, it will contain
2194 * @start.  The final folio may extend beyond @end; if it does, it will
2195 * contain @end.  The folios have ascending indices.  There may be gaps
2196 * between the folios if there are indices which have no folio in the
2197 * page cache.  If folios are added to or removed from the page cache
2198 * while this is running, they may or may not be found by this call.
2199 * Only returns folios that are tagged with @tag.
2200 *
2201 * Return: The number of folios found.
2202 * Also update @start to index the next folio for traversal.
2203 */
2204unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
2205			pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
2206{
2207	XA_STATE(xas, &mapping->i_pages, *start);
2208	struct folio *folio;
2209
2210	rcu_read_lock();
2211	while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2212		/*
2213		 * Shadow entries should never be tagged, but this iteration
2214		 * is lockless so there is a window for page reclaim to evict
2215		 * a page we saw tagged. Skip over it.
2216		 */
2217		if (xa_is_value(folio))
2218			continue;
2219		if (!folio_batch_add(fbatch, folio)) {
2220			unsigned long nr = folio_nr_pages(folio);
2221			*start = folio->index + nr;
2222			goto out;
2223		}
2224	}
2225	/*
2226	 * We come here when there is no page beyond @end. We take care to not
2227	 * overflow the index @start as it confuses some of the callers. This
2228	 * breaks the iteration when there is a page at index -1 but that is
2229	 * already broke anyway.
2230	 */
2231	if (end == (pgoff_t)-1)
2232		*start = (pgoff_t)-1;
2233	else
2234		*start = end + 1;
2235out:
2236	rcu_read_unlock();
2237
2238	return folio_batch_count(fbatch);
2239}
2240EXPORT_SYMBOL(filemap_get_folios_tag);
2241
2242/*
2243 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2244 * a _large_ part of the i/o request. Imagine the worst scenario:
2245 *
2246 *      ---R__________________________________________B__________
2247 *         ^ reading here                             ^ bad block(assume 4k)
2248 *
2249 * read(R) => miss => readahead(R...B) => media error => frustrating retries
2250 * => failing the whole request => read(R) => read(R+1) =>
2251 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2252 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2253 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2254 *
2255 * It is going insane. Fix it by quickly scaling down the readahead size.
2256 */
2257static void shrink_readahead_size_eio(struct file_ra_state *ra)
 
2258{
2259	ra->ra_pages /= 4;
2260}
 
2261
2262/*
2263 * filemap_get_read_batch - Get a batch of folios for read
2264 *
2265 * Get a batch of folios which represent a contiguous range of bytes in
2266 * the file.  No exceptional entries will be returned.  If @index is in
2267 * the middle of a folio, the entire folio will be returned.  The last
2268 * folio in the batch may have the readahead flag set or the uptodate flag
2269 * clear so that the caller can take the appropriate action.
2270 */
2271static void filemap_get_read_batch(struct address_space *mapping,
2272		pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2273{
2274	XA_STATE(xas, &mapping->i_pages, index);
2275	struct folio *folio;
2276
2277	rcu_read_lock();
2278	for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2279		if (xas_retry(&xas, folio))
2280			continue;
2281		if (xas.xa_index > max || xa_is_value(folio))
 
 
2282			break;
2283		if (xa_is_sibling(folio))
 
 
 
 
 
 
 
 
 
 
2284			break;
2285		if (!folio_try_get_rcu(folio))
2286			goto retry;
2287
2288		if (unlikely(folio != xas_reload(&xas)))
2289			goto put_folio;
 
2290
2291		if (!folio_batch_add(fbatch, folio))
2292			break;
2293		if (!folio_test_uptodate(folio))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2294			break;
2295		if (folio_test_readahead(folio))
 
 
 
2296			break;
2297		xas_advance(&xas, folio_next_index(folio) - 1);
2298		continue;
2299put_folio:
2300		folio_put(folio);
2301retry:
2302		xas_reset(&xas);
2303	}
2304	rcu_read_unlock();
 
2305}
 
2306
2307static int filemap_read_folio(struct file *file, filler_t filler,
2308		struct folio *folio)
 
 
 
 
 
 
 
 
 
 
 
2309{
2310	bool workingset = folio_test_workingset(folio);
2311	unsigned long pflags;
2312	int error;
2313
2314	/*
2315	 * A previous I/O error may have been due to temporary failures,
2316	 * eg. multipath errors.  PG_error will be set again if read_folio
2317	 * fails.
2318	 */
2319	folio_clear_error(folio);
2320
2321	/* Start the actual read. The read will unlock the page. */
2322	if (unlikely(workingset))
2323		psi_memstall_enter(&pflags);
2324	error = filler(file, folio);
2325	if (unlikely(workingset))
2326		psi_memstall_leave(&pflags);
2327	if (error)
2328		return error;
2329
2330	error = folio_wait_locked_killable(folio);
2331	if (error)
2332		return error;
2333	if (folio_test_uptodate(folio))
2334		return 0;
2335	if (file)
2336		shrink_readahead_size_eio(&file->f_ra);
2337	return -EIO;
2338}
2339
2340static bool filemap_range_uptodate(struct address_space *mapping,
2341		loff_t pos, size_t count, struct folio *folio,
2342		bool need_uptodate)
2343{
2344	if (folio_test_uptodate(folio))
2345		return true;
2346	/* pipes can't handle partially uptodate pages */
2347	if (need_uptodate)
2348		return false;
2349	if (!mapping->a_ops->is_partially_uptodate)
2350		return false;
2351	if (mapping->host->i_blkbits >= folio_shift(folio))
2352		return false;
2353
2354	if (folio_pos(folio) > pos) {
2355		count -= folio_pos(folio) - pos;
2356		pos = 0;
2357	} else {
2358		pos -= folio_pos(folio);
2359	}
2360
2361	return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2362}
2363
2364static int filemap_update_page(struct kiocb *iocb,
2365		struct address_space *mapping, size_t count,
2366		struct folio *folio, bool need_uptodate)
2367{
2368	int error;
2369
2370	if (iocb->ki_flags & IOCB_NOWAIT) {
2371		if (!filemap_invalidate_trylock_shared(mapping))
2372			return -EAGAIN;
2373	} else {
2374		filemap_invalidate_lock_shared(mapping);
2375	}
2376
2377	if (!folio_trylock(folio)) {
2378		error = -EAGAIN;
2379		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
2380			goto unlock_mapping;
2381		if (!(iocb->ki_flags & IOCB_WAITQ)) {
2382			filemap_invalidate_unlock_shared(mapping);
2383			/*
2384			 * This is where we usually end up waiting for a
2385			 * previously submitted readahead to finish.
 
 
 
 
 
 
 
2386			 */
2387			folio_put_wait_locked(folio, TASK_KILLABLE);
2388			return AOP_TRUNCATED_PAGE;
2389		}
2390		error = __folio_lock_async(folio, iocb->ki_waitq);
2391		if (error)
2392			goto unlock_mapping;
2393	}
2394
2395	error = AOP_TRUNCATED_PAGE;
2396	if (!folio->mapping)
2397		goto unlock;
2398
2399	error = 0;
2400	if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,
2401				   need_uptodate))
2402		goto unlock;
 
2403
2404	error = -EAGAIN;
2405	if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
2406		goto unlock;
 
 
2407
2408	error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
2409			folio);
2410	goto unlock_mapping;
2411unlock:
2412	folio_unlock(folio);
2413unlock_mapping:
2414	filemap_invalidate_unlock_shared(mapping);
2415	if (error == AOP_TRUNCATED_PAGE)
2416		folio_put(folio);
2417	return error;
 
2418}
 
2419
2420static int filemap_create_folio(struct file *file,
2421		struct address_space *mapping, pgoff_t index,
2422		struct folio_batch *fbatch)
 
 
 
 
 
 
 
 
 
 
 
 
2423{
2424	struct folio *folio;
2425	int error;
 
2426
2427	folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
2428	if (!folio)
2429		return -ENOMEM;
2430
2431	/*
2432	 * Protect against truncate / hole punch. Grabbing invalidate_lock
2433	 * here assures we cannot instantiate and bring uptodate new
2434	 * pagecache folios after evicting page cache during truncate
2435	 * and before actually freeing blocks.	Note that we could
2436	 * release invalidate_lock after inserting the folio into
2437	 * the page cache as the locked folio would then be enough to
2438	 * synchronize with hole punching. But there are code paths
2439	 * such as filemap_update_page() filling in partially uptodate
2440	 * pages or ->readahead() that need to hold invalidate_lock
2441	 * while mapping blocks for IO so let's hold the lock here as
2442	 * well to keep locking rules simple.
2443	 */
2444	filemap_invalidate_lock_shared(mapping);
2445	error = filemap_add_folio(mapping, folio, index,
2446			mapping_gfp_constraint(mapping, GFP_KERNEL));
2447	if (error == -EEXIST)
2448		error = AOP_TRUNCATED_PAGE;
2449	if (error)
2450		goto error;
2451
2452	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2453	if (error)
2454		goto error;
 
 
 
 
 
 
 
 
 
 
2455
2456	filemap_invalidate_unlock_shared(mapping);
2457	folio_batch_add(fbatch, folio);
2458	return 0;
2459error:
2460	filemap_invalidate_unlock_shared(mapping);
2461	folio_put(folio);
2462	return error;
2463}
2464
2465static int filemap_readahead(struct kiocb *iocb, struct file *file,
2466		struct address_space *mapping, struct folio *folio,
2467		pgoff_t last_index)
2468{
2469	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2470
2471	if (iocb->ki_flags & IOCB_NOIO)
2472		return -EAGAIN;
2473	page_cache_async_ra(&ractl, folio, last_index - folio->index);
2474	return 0;
2475}
2476
2477static int filemap_get_pages(struct kiocb *iocb, size_t count,
2478		struct folio_batch *fbatch, bool need_uptodate)
2479{
2480	struct file *filp = iocb->ki_filp;
2481	struct address_space *mapping = filp->f_mapping;
2482	struct file_ra_state *ra = &filp->f_ra;
2483	pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2484	pgoff_t last_index;
2485	struct folio *folio;
2486	int err = 0;
2487
2488	/* "last_index" is the index of the page beyond the end of the read */
2489	last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE);
2490retry:
2491	if (fatal_signal_pending(current))
2492		return -EINTR;
2493
2494	filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2495	if (!folio_batch_count(fbatch)) {
2496		if (iocb->ki_flags & IOCB_NOIO)
2497			return -EAGAIN;
2498		page_cache_sync_readahead(mapping, ra, filp, index,
2499				last_index - index);
2500		filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2501	}
2502	if (!folio_batch_count(fbatch)) {
2503		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
2504			return -EAGAIN;
2505		err = filemap_create_folio(filp, mapping,
2506				iocb->ki_pos >> PAGE_SHIFT, fbatch);
2507		if (err == AOP_TRUNCATED_PAGE)
2508			goto retry;
2509		return err;
2510	}
2511
2512	folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2513	if (folio_test_readahead(folio)) {
2514		err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2515		if (err)
2516			goto err;
2517	}
2518	if (!folio_test_uptodate(folio)) {
2519		if ((iocb->ki_flags & IOCB_WAITQ) &&
2520		    folio_batch_count(fbatch) > 1)
2521			iocb->ki_flags |= IOCB_NOWAIT;
2522		err = filemap_update_page(iocb, mapping, count, folio,
2523					  need_uptodate);
2524		if (err)
2525			goto err;
2526	}
2527
2528	return 0;
2529err:
2530	if (err < 0)
2531		folio_put(folio);
2532	if (likely(--fbatch->nr))
2533		return 0;
2534	if (err == AOP_TRUNCATED_PAGE)
2535		goto retry;
2536	return err;
2537}
 
2538
2539static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2540{
2541	unsigned int shift = folio_shift(folio);
2542
2543	return (pos1 >> shift == pos2 >> shift);
2544}
2545
2546/**
2547 * filemap_read - Read data from the page cache.
2548 * @iocb: The iocb to read.
2549 * @iter: Destination for the data.
2550 * @already_read: Number of bytes already read by the caller.
2551 *
2552 * Copies data from the page cache.  If the data is not currently present,
2553 * uses the readahead and read_folio address_space operations to fetch it.
2554 *
2555 * Return: Total number of bytes copied, including those already read by
2556 * the caller.  If an error happens before any bytes are copied, returns
2557 * a negative error number.
2558 */
2559ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2560		ssize_t already_read)
2561{
2562	struct file *filp = iocb->ki_filp;
2563	struct file_ra_state *ra = &filp->f_ra;
2564	struct address_space *mapping = filp->f_mapping;
2565	struct inode *inode = mapping->host;
2566	struct folio_batch fbatch;
2567	int i, error = 0;
2568	bool writably_mapped;
2569	loff_t isize, end_offset;
2570	loff_t last_pos = ra->prev_pos;
 
 
2571
2572	if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
2573		return 0;
2574	if (unlikely(!iov_iter_count(iter)))
2575		return 0;
2576
2577	iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2578	folio_batch_init(&fbatch);
2579
2580	do {
2581		cond_resched();
2582
2583		/*
2584		 * If we've already successfully copied some data, then we
2585		 * can no longer safely return -EIOCBQUEUED. Hence mark
2586		 * an async read NOWAIT at that point.
2587		 */
2588		if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
2589			iocb->ki_flags |= IOCB_NOWAIT;
2590
2591		if (unlikely(iocb->ki_pos >= i_size_read(inode)))
2592			break;
 
 
 
2593
2594		error = filemap_get_pages(iocb, iter->count, &fbatch, false);
2595		if (error < 0)
2596			break;
 
 
 
2597
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2598		/*
2599		 * i_size must be checked after we know the pages are Uptodate.
2600		 *
2601		 * Checking i_size after the check allows us to calculate
2602		 * the correct value for "nr", which means the zero-filled
2603		 * part of the page is not copied back to userspace (unless
2604		 * another truncate extends the file - this is desired though).
2605		 */
 
2606		isize = i_size_read(inode);
2607		if (unlikely(iocb->ki_pos >= isize))
2608			goto put_folios;
2609		end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
 
 
2610
2611		/*
2612		 * Pairs with a barrier in
2613		 * block_write_end()->mark_buffer_dirty() or other page
2614		 * dirtying routines like iomap_write_end() to ensure
2615		 * changes to page contents are visible before we see
2616		 * increased inode size.
 
 
 
 
 
 
 
 
2617		 */
2618		smp_rmb();
 
2619
2620		/*
2621		 * Once we start copying data, we don't want to be touching any
2622		 * cachelines that might be contended:
2623		 */
2624		writably_mapped = mapping_writably_mapped(mapping);
 
 
2625
2626		/*
2627		 * When a read accesses the same folio several times, only
2628		 * mark it as accessed the first time.
2629		 */
2630		if (!pos_same_folio(iocb->ki_pos, last_pos - 1,
2631				    fbatch.folios[0]))
2632			folio_mark_accessed(fbatch.folios[0]);
2633
2634		for (i = 0; i < folio_batch_count(&fbatch); i++) {
2635			struct folio *folio = fbatch.folios[i];
2636			size_t fsize = folio_size(folio);
2637			size_t offset = iocb->ki_pos & (fsize - 1);
2638			size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
2639					     fsize - offset);
2640			size_t copied;
2641
2642			if (end_offset < folio_pos(folio))
2643				break;
2644			if (i > 0)
2645				folio_mark_accessed(folio);
2646			/*
2647			 * If users can be writing to this folio using arbitrary
2648			 * virtual addresses, take care of potential aliasing
2649			 * before reading the folio on the kernel side.
2650			 */
2651			if (writably_mapped)
2652				flush_dcache_folio(folio);
2653
2654			copied = copy_folio_to_iter(folio, offset, bytes, iter);
 
 
 
 
 
 
 
 
2655
2656			already_read += copied;
2657			iocb->ki_pos += copied;
2658			last_pos = iocb->ki_pos;
 
 
 
 
 
 
 
 
 
 
2659
2660			if (copied < bytes) {
2661				error = -EFAULT;
2662				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2663			}
 
2664		}
2665put_folios:
2666		for (i = 0; i < folio_batch_count(&fbatch); i++)
2667			folio_put(fbatch.folios[i]);
2668		folio_batch_init(&fbatch);
2669	} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
2670
2671	file_accessed(filp);
2672	ra->prev_pos = last_pos;
2673	return already_read ? already_read : error;
2674}
2675EXPORT_SYMBOL_GPL(filemap_read);
2676
2677int kiocb_write_and_wait(struct kiocb *iocb, size_t count)
2678{
2679	struct address_space *mapping = iocb->ki_filp->f_mapping;
2680	loff_t pos = iocb->ki_pos;
2681	loff_t end = pos + count - 1;
2682
2683	if (iocb->ki_flags & IOCB_NOWAIT) {
2684		if (filemap_range_needs_writeback(mapping, pos, end))
2685			return -EAGAIN;
2686		return 0;
2687	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2688
2689	return filemap_write_and_wait_range(mapping, pos, end);
2690}
2691EXPORT_SYMBOL_GPL(kiocb_write_and_wait);
2692
2693int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
2694{
2695	struct address_space *mapping = iocb->ki_filp->f_mapping;
2696	loff_t pos = iocb->ki_pos;
2697	loff_t end = pos + count - 1;
2698	int ret;
2699
2700	if (iocb->ki_flags & IOCB_NOWAIT) {
2701		/* we could block if there are any pages in the range */
2702		if (filemap_range_has_page(mapping, pos, end))
2703			return -EAGAIN;
2704	} else {
2705		ret = filemap_write_and_wait_range(mapping, pos, end);
2706		if (ret)
2707			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
2708	}
2709
2710	/*
2711	 * After a write we want buffered reads to be sure to go to disk to get
2712	 * the new data.  We invalidate clean cached page from the region we're
2713	 * about to write.  We do this *before* the write so that we can return
2714	 * without clobbering -EIOCBQUEUED from ->direct_IO().
2715	 */
2716	return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
2717					     end >> PAGE_SHIFT);
2718}
2719EXPORT_SYMBOL_GPL(kiocb_invalidate_pages);
2720
2721/**
2722 * generic_file_read_iter - generic filesystem read routine
2723 * @iocb:	kernel I/O control block
2724 * @iter:	destination for the data read
2725 *
2726 * This is the "read_iter()" routine for all filesystems
2727 * that can use the page cache directly.
2728 *
2729 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2730 * be returned when no data can be read without waiting for I/O requests
2731 * to complete; it doesn't prevent readahead.
2732 *
2733 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2734 * requests shall be made for the read or for readahead.  When no data
2735 * can be read, -EAGAIN shall be returned.  When readahead would be
2736 * triggered, a partial, possibly empty read shall be returned.
2737 *
2738 * Return:
2739 * * number of bytes copied, even for partial reads
2740 * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2741 */
2742ssize_t
2743generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2744{
2745	size_t count = iov_iter_count(iter);
2746	ssize_t retval = 0;
 
2747
2748	if (!count)
2749		return 0; /* skip atime */
2750
2751	if (iocb->ki_flags & IOCB_DIRECT) {
2752		struct file *file = iocb->ki_filp;
2753		struct address_space *mapping = file->f_mapping;
2754		struct inode *inode = mapping->host;
 
 
2755
2756		retval = kiocb_write_and_wait(iocb, count);
 
 
2757		if (retval < 0)
2758			return retval;
 
2759		file_accessed(file);
2760
2761		retval = mapping->a_ops->direct_IO(iocb, iter);
2762		if (retval >= 0) {
2763			iocb->ki_pos += retval;
2764			count -= retval;
2765		}
2766		if (retval != -EIOCBQUEUED)
2767			iov_iter_revert(iter, count - iov_iter_count(iter));
2768
2769		/*
2770		 * Btrfs can have a short DIO read if we encounter
2771		 * compressed extents, so if there was an error, or if
2772		 * we've already read everything we wanted to, or if
2773		 * there was a short read because we hit EOF, go ahead
2774		 * and return.  Otherwise fallthrough to buffered io for
2775		 * the rest of the read.  Buffered reads will not work for
2776		 * DAX files, so don't bother trying.
2777		 */
2778		if (retval < 0 || !count || IS_DAX(inode))
2779			return retval;
2780		if (iocb->ki_pos >= i_size_read(inode))
2781			return retval;
2782	}
2783
2784	return filemap_read(iocb, iter, retval);
 
 
2785}
2786EXPORT_SYMBOL(generic_file_read_iter);
2787
2788/*
2789 * Splice subpages from a folio into a pipe.
 
 
 
 
 
 
 
2790 */
2791size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
2792			      struct folio *folio, loff_t fpos, size_t size)
2793{
 
2794	struct page *page;
2795	size_t spliced = 0, offset = offset_in_folio(folio, fpos);
2796
2797	page = folio_page(folio, offset / PAGE_SIZE);
2798	size = min(size, folio_size(folio) - offset);
2799	offset %= PAGE_SIZE;
2800
2801	while (spliced < size &&
2802	       !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2803		struct pipe_buffer *buf = pipe_head_buf(pipe);
2804		size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced);
2805
2806		*buf = (struct pipe_buffer) {
2807			.ops	= &page_cache_pipe_buf_ops,
2808			.page	= page,
2809			.offset	= offset,
2810			.len	= part,
2811		};
2812		folio_get(folio);
2813		pipe->head++;
2814		page++;
2815		spliced += part;
2816		offset = 0;
2817	}
2818
2819	return spliced;
2820}
2821
2822/**
2823 * filemap_splice_read -  Splice data from a file's pagecache into a pipe
2824 * @in: The file to read from
2825 * @ppos: Pointer to the file position to read from
2826 * @pipe: The pipe to splice into
2827 * @len: The amount to splice
2828 * @flags: The SPLICE_F_* flags
2829 *
2830 * This function gets folios from a file's pagecache and splices them into the
2831 * pipe.  Readahead will be called as necessary to fill more folios.  This may
2832 * be used for blockdevs also.
2833 *
2834 * Return: On success, the number of bytes read will be returned and *@ppos
2835 * will be updated if appropriate; 0 will be returned if there is no more data
2836 * to be read; -EAGAIN will be returned if the pipe had no space, and some
2837 * other negative error code will be returned on error.  A short read may occur
2838 * if the pipe has insufficient space, we reach the end of the data or we hit a
2839 * hole.
2840 */
2841ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
2842			    struct pipe_inode_info *pipe,
2843			    size_t len, unsigned int flags)
2844{
2845	struct folio_batch fbatch;
2846	struct kiocb iocb;
2847	size_t total_spliced = 0, used, npages;
2848	loff_t isize, end_offset;
2849	bool writably_mapped;
2850	int i, error = 0;
2851
2852	if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes))
2853		return 0;
2854
2855	init_sync_kiocb(&iocb, in);
2856	iocb.ki_pos = *ppos;
2857
2858	/* Work out how much data we can actually add into the pipe */
2859	used = pipe_occupancy(pipe->head, pipe->tail);
2860	npages = max_t(ssize_t, pipe->max_usage - used, 0);
2861	len = min_t(size_t, len, npages * PAGE_SIZE);
2862
2863	folio_batch_init(&fbatch);
2864
2865	do {
2866		cond_resched();
2867
2868		if (*ppos >= i_size_read(in->f_mapping->host))
2869			break;
2870
2871		iocb.ki_pos = *ppos;
2872		error = filemap_get_pages(&iocb, len, &fbatch, true);
2873		if (error < 0)
2874			break;
2875
2876		/*
2877		 * i_size must be checked after we know the pages are Uptodate.
2878		 *
2879		 * Checking i_size after the check allows us to calculate
2880		 * the correct value for "nr", which means the zero-filled
2881		 * part of the page is not copied back to userspace (unless
2882		 * another truncate extends the file - this is desired though).
2883		 */
2884		isize = i_size_read(in->f_mapping->host);
2885		if (unlikely(*ppos >= isize))
2886			break;
2887		end_offset = min_t(loff_t, isize, *ppos + len);
2888
2889		/*
2890		 * Once we start copying data, we don't want to be touching any
2891		 * cachelines that might be contended:
2892		 */
2893		writably_mapped = mapping_writably_mapped(in->f_mapping);
2894
2895		for (i = 0; i < folio_batch_count(&fbatch); i++) {
2896			struct folio *folio = fbatch.folios[i];
2897			size_t n;
2898
2899			if (folio_pos(folio) >= end_offset)
2900				goto out;
2901			folio_mark_accessed(folio);
2902
2903			/*
2904			 * If users can be writing to this folio using arbitrary
2905			 * virtual addresses, take care of potential aliasing
2906			 * before reading the folio on the kernel side.
2907			 */
2908			if (writably_mapped)
2909				flush_dcache_folio(folio);
2910
2911			n = min_t(loff_t, len, isize - *ppos);
2912			n = splice_folio_into_pipe(pipe, folio, *ppos, n);
2913			if (!n)
2914				goto out;
2915			len -= n;
2916			total_spliced += n;
2917			*ppos += n;
2918			in->f_ra.prev_pos = *ppos;
2919			if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
2920				goto out;
2921		}
2922
2923		folio_batch_release(&fbatch);
2924	} while (len);
2925
2926out:
2927	folio_batch_release(&fbatch);
2928	file_accessed(in);
2929
2930	return total_spliced ? total_spliced : error;
2931}
2932EXPORT_SYMBOL(filemap_splice_read);
2933
2934static inline loff_t folio_seek_hole_data(struct xa_state *xas,
2935		struct address_space *mapping, struct folio *folio,
2936		loff_t start, loff_t end, bool seek_data)
2937{
2938	const struct address_space_operations *ops = mapping->a_ops;
2939	size_t offset, bsz = i_blocksize(mapping->host);
2940
2941	if (xa_is_value(folio) || folio_test_uptodate(folio))
2942		return seek_data ? start : end;
2943	if (!ops->is_partially_uptodate)
2944		return seek_data ? end : start;
2945
2946	xas_pause(xas);
2947	rcu_read_unlock();
2948	folio_lock(folio);
2949	if (unlikely(folio->mapping != mapping))
2950		goto unlock;
2951
2952	offset = offset_in_folio(folio, start) & ~(bsz - 1);
2953
2954	do {
2955		if (ops->is_partially_uptodate(folio, offset, bsz) ==
2956							seek_data)
2957			break;
2958		start = (start + bsz) & ~(bsz - 1);
2959		offset += bsz;
2960	} while (offset < folio_size(folio));
2961unlock:
2962	folio_unlock(folio);
2963	rcu_read_lock();
2964	return start;
2965}
2966
2967static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
2968{
2969	if (xa_is_value(folio))
2970		return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
2971	return folio_size(folio);
2972}
2973
2974/**
2975 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
2976 * @mapping: Address space to search.
2977 * @start: First byte to consider.
2978 * @end: Limit of search (exclusive).
2979 * @whence: Either SEEK_HOLE or SEEK_DATA.
2980 *
2981 * If the page cache knows which blocks contain holes and which blocks
2982 * contain data, your filesystem can use this function to implement
2983 * SEEK_HOLE and SEEK_DATA.  This is useful for filesystems which are
2984 * entirely memory-based such as tmpfs, and filesystems which support
2985 * unwritten extents.
2986 *
2987 * Return: The requested offset on success, or -ENXIO if @whence specifies
2988 * SEEK_DATA and there is no data after @start.  There is an implicit hole
2989 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
2990 * and @end contain data.
2991 */
2992loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
2993		loff_t end, int whence)
2994{
2995	XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
2996	pgoff_t max = (end - 1) >> PAGE_SHIFT;
2997	bool seek_data = (whence == SEEK_DATA);
2998	struct folio *folio;
2999
3000	if (end <= start)
3001		return -ENXIO;
3002
3003	rcu_read_lock();
3004	while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
3005		loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
3006		size_t seek_size;
3007
3008		if (start < pos) {
3009			if (!seek_data)
3010				goto unlock;
3011			start = pos;
3012		}
3013
3014		seek_size = seek_folio_size(&xas, folio);
3015		pos = round_up((u64)pos + 1, seek_size);
3016		start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
3017				seek_data);
3018		if (start < pos)
3019			goto unlock;
3020		if (start >= end)
3021			break;
3022		if (seek_size > PAGE_SIZE)
3023			xas_set(&xas, pos >> PAGE_SHIFT);
3024		if (!xa_is_value(folio))
3025			folio_put(folio);
3026	}
3027	if (seek_data)
3028		start = -ENXIO;
3029unlock:
3030	rcu_read_unlock();
3031	if (folio && !xa_is_value(folio))
3032		folio_put(folio);
3033	if (start > end)
3034		return end;
3035	return start;
3036}
3037
3038#ifdef CONFIG_MMU
3039#define MMAP_LOTSAMISS  (100)
3040/*
3041 * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
3042 * @vmf - the vm_fault for this fault.
3043 * @folio - the folio to lock.
3044 * @fpin - the pointer to the file we may pin (or is already pinned).
3045 *
3046 * This works similar to lock_folio_or_retry in that it can drop the
3047 * mmap_lock.  It differs in that it actually returns the folio locked
3048 * if it returns 1 and 0 if it couldn't lock the folio.  If we did have
3049 * to drop the mmap_lock then fpin will point to the pinned file and
3050 * needs to be fput()'ed at a later point.
3051 */
3052static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
3053				     struct file **fpin)
3054{
3055	if (folio_trylock(folio))
3056		return 1;
3057
3058	/*
3059	 * NOTE! This will make us return with VM_FAULT_RETRY, but with
3060	 * the fault lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
3061	 * is supposed to work. We have way too many special cases..
3062	 */
3063	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
3064		return 0;
3065
3066	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
3067	if (vmf->flags & FAULT_FLAG_KILLABLE) {
3068		if (__folio_lock_killable(folio)) {
3069			/*
3070			 * We didn't have the right flags to drop the
3071			 * fault lock, but all fault_handlers only check
3072			 * for fatal signals if we return VM_FAULT_RETRY,
3073			 * so we need to drop the fault lock here and
3074			 * return 0 if we don't have a fpin.
3075			 */
3076			if (*fpin == NULL)
3077				release_fault_lock(vmf);
3078			return 0;
3079		}
3080	} else
3081		__folio_lock(folio);
3082
3083	return 1;
3084}
3085
3086/*
3087 * Synchronous readahead happens when we don't even find a page in the page
3088 * cache at all.  We don't want to perform IO under the mmap sem, so if we have
3089 * to drop the mmap sem we return the file that was pinned in order for us to do
3090 * that.  If we didn't pin a file then we return NULL.  The file that is
3091 * returned needs to be fput()'ed when we're done with it.
3092 */
3093static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
 
 
 
3094{
3095	struct file *file = vmf->vma->vm_file;
3096	struct file_ra_state *ra = &file->f_ra;
3097	struct address_space *mapping = file->f_mapping;
3098	DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
3099	struct file *fpin = NULL;
3100	unsigned long vm_flags = vmf->vma->vm_flags;
3101	unsigned int mmap_miss;
3102
3103#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3104	/* Use the readahead code, even if readahead is disabled */
3105	if (vm_flags & VM_HUGEPAGE) {
3106		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3107		ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
3108		ra->size = HPAGE_PMD_NR;
3109		/*
3110		 * Fetch two PMD folios, so we get the chance to actually
3111		 * readahead, unless we've been told not to.
3112		 */
3113		if (!(vm_flags & VM_RAND_READ))
3114			ra->size *= 2;
3115		ra->async_size = HPAGE_PMD_NR;
3116		page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
3117		return fpin;
3118	}
3119#endif
3120
3121	/* If we don't want any read-ahead, don't bother */
3122	if (vm_flags & VM_RAND_READ)
3123		return fpin;
3124	if (!ra->ra_pages)
3125		return fpin;
3126
3127	if (vm_flags & VM_SEQ_READ) {
3128		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3129		page_cache_sync_ra(&ractl, ra->ra_pages);
3130		return fpin;
3131	}
3132
3133	/* Avoid banging the cache line if not needed */
3134	mmap_miss = READ_ONCE(ra->mmap_miss);
3135	if (mmap_miss < MMAP_LOTSAMISS * 10)
3136		WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
3137
3138	/*
3139	 * Do we miss much more than hit in this file? If so,
3140	 * stop bothering with read-ahead. It will only hurt.
3141	 */
3142	if (mmap_miss > MMAP_LOTSAMISS)
3143		return fpin;
3144
3145	/*
3146	 * mmap read-around
3147	 */
3148	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3149	ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
3150	ra->size = ra->ra_pages;
3151	ra->async_size = ra->ra_pages / 4;
3152	ractl._index = ra->start;
3153	page_cache_ra_order(&ractl, ra, 0);
3154	return fpin;
3155}
3156
3157/*
3158 * Asynchronous readahead happens when we find the page and PG_readahead,
3159 * so we want to possibly extend the readahead further.  We return the file that
3160 * was pinned if we have to drop the mmap_lock in order to do IO.
3161 */
3162static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3163					    struct folio *folio)
 
 
 
3164{
3165	struct file *file = vmf->vma->vm_file;
3166	struct file_ra_state *ra = &file->f_ra;
3167	DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3168	struct file *fpin = NULL;
3169	unsigned int mmap_miss;
3170
3171	/* If we don't want any read-ahead, don't bother */
3172	if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
3173		return fpin;
3174
3175	mmap_miss = READ_ONCE(ra->mmap_miss);
3176	if (mmap_miss)
3177		WRITE_ONCE(ra->mmap_miss, --mmap_miss);
3178
3179	if (folio_test_readahead(folio)) {
3180		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3181		page_cache_async_ra(&ractl, folio, ra->ra_pages);
3182	}
3183	return fpin;
3184}
3185
3186/**
3187 * filemap_fault - read in file data for page fault handling
 
3188 * @vmf:	struct vm_fault containing details of the fault
3189 *
3190 * filemap_fault() is invoked via the vma operations vector for a
3191 * mapped memory region to read in file data during a page fault.
3192 *
3193 * The goto's are kind of ugly, but this streamlines the normal case of having
3194 * it in the page cache, and handles the special cases reasonably without
3195 * having a lot of duplicated code.
3196 *
3197 * vma->vm_mm->mmap_lock must be held on entry.
3198 *
3199 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
3200 * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
 
 
3201 *
3202 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
3203 * has not been released.
3204 *
3205 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
3206 *
3207 * Return: bitwise-OR of %VM_FAULT_ codes.
3208 */
3209vm_fault_t filemap_fault(struct vm_fault *vmf)
3210{
3211	int error;
3212	struct file *file = vmf->vma->vm_file;
3213	struct file *fpin = NULL;
3214	struct address_space *mapping = file->f_mapping;
 
3215	struct inode *inode = mapping->host;
3216	pgoff_t max_idx, index = vmf->pgoff;
3217	struct folio *folio;
3218	vm_fault_t ret = 0;
3219	bool mapping_locked = false;
3220
3221	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3222	if (unlikely(index >= max_idx))
3223		return VM_FAULT_SIGBUS;
3224
3225	/*
3226	 * Do we have something in the page cache already?
3227	 */
3228	folio = filemap_get_folio(mapping, index);
3229	if (likely(!IS_ERR(folio))) {
3230		/*
3231		 * We found the page, so try async readahead before waiting for
3232		 * the lock.
3233		 */
3234		if (!(vmf->flags & FAULT_FLAG_TRIED))
3235			fpin = do_async_mmap_readahead(vmf, folio);
3236		if (unlikely(!folio_test_uptodate(folio))) {
3237			filemap_invalidate_lock_shared(mapping);
3238			mapping_locked = true;
3239		}
3240	} else {
3241		/* No page in the page cache at all */
 
3242		count_vm_event(PGMAJFAULT);
3243		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
3244		ret = VM_FAULT_MAJOR;
3245		fpin = do_sync_mmap_readahead(vmf);
3246retry_find:
3247		/*
3248		 * See comment in filemap_create_folio() why we need
3249		 * invalidate_lock
3250		 */
3251		if (!mapping_locked) {
3252			filemap_invalidate_lock_shared(mapping);
3253			mapping_locked = true;
3254		}
3255		folio = __filemap_get_folio(mapping, index,
3256					  FGP_CREAT|FGP_FOR_MMAP,
3257					  vmf->gfp_mask);
3258		if (IS_ERR(folio)) {
3259			if (fpin)
3260				goto out_retry;
3261			filemap_invalidate_unlock_shared(mapping);
3262			return VM_FAULT_OOM;
3263		}
3264	}
3265
3266	if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3267		goto out_retry;
 
 
3268
3269	/* Did it get truncated? */
3270	if (unlikely(folio->mapping != mapping)) {
3271		folio_unlock(folio);
3272		folio_put(folio);
3273		goto retry_find;
3274	}
3275	VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3276
3277	/*
3278	 * We have a locked folio in the page cache, now we need to check
3279	 * that it's up-to-date. If not, it is going to be due to an error,
3280	 * or because readahead was otherwise unable to retrieve it.
3281	 */
3282	if (unlikely(!folio_test_uptodate(folio))) {
3283		/*
3284		 * If the invalidate lock is not held, the folio was in cache
3285		 * and uptodate and now it is not. Strange but possible since we
3286		 * didn't hold the page lock all the time. Let's drop
3287		 * everything, get the invalidate lock and try again.
3288		 */
3289		if (!mapping_locked) {
3290			folio_unlock(folio);
3291			folio_put(folio);
3292			goto retry_find;
3293		}
3294
3295		/*
3296		 * OK, the folio is really not uptodate. This can be because the
3297		 * VMA has the VM_RAND_READ flag set, or because an error
3298		 * arose. Let's read it in directly.
3299		 */
3300		goto page_not_uptodate;
3301	}
3302
3303	/*
3304	 * We've made it this far and we had to drop our mmap_lock, now is the
3305	 * time to return to the upper layer and have it re-find the vma and
3306	 * redo the fault.
3307	 */
3308	if (fpin) {
3309		folio_unlock(folio);
3310		goto out_retry;
3311	}
3312	if (mapping_locked)
3313		filemap_invalidate_unlock_shared(mapping);
3314
3315	/*
3316	 * Found the page and have a reference on it.
3317	 * We must recheck i_size under page lock.
3318	 */
3319	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3320	if (unlikely(index >= max_idx)) {
3321		folio_unlock(folio);
3322		folio_put(folio);
3323		return VM_FAULT_SIGBUS;
3324	}
3325
3326	vmf->page = folio_file_page(folio, index);
3327	return ret | VM_FAULT_LOCKED;
3328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3329page_not_uptodate:
3330	/*
3331	 * Umm, take care of errors if the page isn't up-to-date.
3332	 * Try to re-read it _once_. We do this synchronously,
3333	 * because there really aren't any performance issues here
3334	 * and we need to check for errors.
3335	 */
3336	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3337	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3338	if (fpin)
3339		goto out_retry;
3340	folio_put(folio);
 
 
 
3341
3342	if (!error || error == AOP_TRUNCATED_PAGE)
3343		goto retry_find;
3344	filemap_invalidate_unlock_shared(mapping);
3345
 
 
3346	return VM_FAULT_SIGBUS;
3347
3348out_retry:
3349	/*
3350	 * We dropped the mmap_lock, we need to return to the fault handler to
3351	 * re-find the vma and come back and find our hopefully still populated
3352	 * page.
3353	 */
3354	if (!IS_ERR(folio))
3355		folio_put(folio);
3356	if (mapping_locked)
3357		filemap_invalidate_unlock_shared(mapping);
3358	if (fpin)
3359		fput(fpin);
3360	return ret | VM_FAULT_RETRY;
3361}
3362EXPORT_SYMBOL(filemap_fault);
3363
3364static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
3365		pgoff_t start)
3366{
3367	struct mm_struct *mm = vmf->vma->vm_mm;
3368
3369	/* Huge page is mapped? No need to proceed. */
3370	if (pmd_trans_huge(*vmf->pmd)) {
3371		folio_unlock(folio);
3372		folio_put(folio);
3373		return true;
3374	}
3375
3376	if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
3377		struct page *page = folio_file_page(folio, start);
3378		vm_fault_t ret = do_set_pmd(vmf, page);
3379		if (!ret) {
3380			/* The page is mapped successfully, reference consumed. */
3381			folio_unlock(folio);
3382			return true;
 
 
 
 
 
 
 
 
3383		}
3384	}
3385
3386	if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
3387		pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
 
3388
3389	return false;
3390}
 
 
 
3391
3392static struct folio *next_uptodate_folio(struct xa_state *xas,
3393		struct address_space *mapping, pgoff_t end_pgoff)
3394{
3395	struct folio *folio = xas_next_entry(xas, end_pgoff);
3396	unsigned long max_idx;
3397
3398	do {
3399		if (!folio)
3400			return NULL;
3401		if (xas_retry(xas, folio))
3402			continue;
3403		if (xa_is_value(folio))
3404			continue;
3405		if (folio_test_locked(folio))
3406			continue;
3407		if (!folio_try_get_rcu(folio))
3408			continue;
3409		/* Has the page moved or been split? */
3410		if (unlikely(folio != xas_reload(xas)))
3411			goto skip;
3412		if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3413			goto skip;
3414		if (!folio_trylock(folio))
3415			goto skip;
3416		if (folio->mapping != mapping)
3417			goto unlock;
3418		if (!folio_test_uptodate(folio))
3419			goto unlock;
3420		max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3421		if (xas->xa_index >= max_idx)
3422			goto unlock;
3423		return folio;
3424unlock:
3425		folio_unlock(folio);
3426skip:
3427		folio_put(folio);
3428	} while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3429
3430	return NULL;
3431}
3432
3433/*
3434 * Map page range [start_page, start_page + nr_pages) of folio.
3435 * start_page is gotten from start by folio_page(folio, start)
3436 */
3437static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
3438			struct folio *folio, unsigned long start,
3439			unsigned long addr, unsigned int nr_pages,
3440			unsigned int *mmap_miss)
3441{
3442	vm_fault_t ret = 0;
3443	struct page *page = folio_page(folio, start);
3444	unsigned int count = 0;
3445	pte_t *old_ptep = vmf->pte;
3446
3447	do {
3448		if (PageHWPoison(page + count))
3449			goto skip;
3450
3451		(*mmap_miss)++;
 
 
3452
3453		/*
3454		 * NOTE: If there're PTE markers, we'll leave them to be
3455		 * handled in the specific fault path, and it'll prohibit the
3456		 * fault-around logic.
3457		 */
3458		if (!pte_none(ptep_get(&vmf->pte[count])))
3459			goto skip;
3460
3461		count++;
3462		continue;
 
 
 
 
 
 
 
 
3463skip:
3464		if (count) {
3465			set_pte_range(vmf, folio, page, count, addr);
3466			folio_ref_add(folio, count);
3467			if (in_range(vmf->address, addr, count * PAGE_SIZE))
3468				ret = VM_FAULT_NOPAGE;
3469		}
3470
3471		count++;
3472		page += count;
3473		vmf->pte += count;
3474		addr += count * PAGE_SIZE;
3475		count = 0;
3476	} while (--nr_pages > 0);
3477
3478	if (count) {
3479		set_pte_range(vmf, folio, page, count, addr);
3480		folio_ref_add(folio, count);
3481		if (in_range(vmf->address, addr, count * PAGE_SIZE))
3482			ret = VM_FAULT_NOPAGE;
3483	}
3484
3485	vmf->pte = old_ptep;
3486
3487	return ret;
3488}
3489
3490static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
3491		struct folio *folio, unsigned long addr,
3492		unsigned int *mmap_miss)
3493{
3494	vm_fault_t ret = 0;
3495	struct page *page = &folio->page;
3496
3497	if (PageHWPoison(page))
3498		return ret;
3499
3500	(*mmap_miss)++;
3501
3502	/*
3503	 * NOTE: If there're PTE markers, we'll leave them to be
3504	 * handled in the specific fault path, and it'll prohibit
3505	 * the fault-around logic.
3506	 */
3507	if (!pte_none(ptep_get(vmf->pte)))
3508		return ret;
3509
3510	if (vmf->address == addr)
3511		ret = VM_FAULT_NOPAGE;
3512
3513	set_pte_range(vmf, folio, page, 1, addr);
3514	folio_ref_inc(folio);
3515
3516	return ret;
3517}
3518
3519vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3520			     pgoff_t start_pgoff, pgoff_t end_pgoff)
3521{
3522	struct vm_area_struct *vma = vmf->vma;
3523	struct file *file = vma->vm_file;
3524	struct address_space *mapping = file->f_mapping;
3525	pgoff_t last_pgoff = start_pgoff;
3526	unsigned long addr;
3527	XA_STATE(xas, &mapping->i_pages, start_pgoff);
3528	struct folio *folio;
3529	vm_fault_t ret = 0;
3530	unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved;
3531
3532	rcu_read_lock();
3533	folio = next_uptodate_folio(&xas, mapping, end_pgoff);
3534	if (!folio)
3535		goto out;
3536
3537	if (filemap_map_pmd(vmf, folio, start_pgoff)) {
3538		ret = VM_FAULT_NOPAGE;
3539		goto out;
3540	}
3541
3542	addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3543	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3544	if (!vmf->pte) {
3545		folio_unlock(folio);
3546		folio_put(folio);
3547		goto out;
3548	}
3549	do {
3550		unsigned long end;
3551
3552		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3553		vmf->pte += xas.xa_index - last_pgoff;
3554		last_pgoff = xas.xa_index;
3555		end = folio_next_index(folio) - 1;
3556		nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
3557
3558		if (!folio_test_large(folio))
3559			ret |= filemap_map_order0_folio(vmf,
3560					folio, addr, &mmap_miss);
3561		else
3562			ret |= filemap_map_folio_range(vmf, folio,
3563					xas.xa_index - folio->index, addr,
3564					nr_pages, &mmap_miss);
3565
3566		folio_unlock(folio);
3567		folio_put(folio);
3568	} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
3569	pte_unmap_unlock(vmf->pte, vmf->ptl);
3570out:
3571	rcu_read_unlock();
3572
3573	mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
3574	if (mmap_miss >= mmap_miss_saved)
3575		WRITE_ONCE(file->f_ra.mmap_miss, 0);
3576	else
3577		WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
3578
3579	return ret;
3580}
3581EXPORT_SYMBOL(filemap_map_pages);
3582
3583vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3584{
3585	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3586	struct folio *folio = page_folio(vmf->page);
3587	vm_fault_t ret = VM_FAULT_LOCKED;
3588
3589	sb_start_pagefault(mapping->host->i_sb);
3590	file_update_time(vmf->vma->vm_file);
3591	folio_lock(folio);
3592	if (folio->mapping != mapping) {
3593		folio_unlock(folio);
3594		ret = VM_FAULT_NOPAGE;
3595		goto out;
3596	}
3597	/*
3598	 * We mark the folio dirty already here so that when freeze is in
3599	 * progress, we are guaranteed that writeback during freezing will
3600	 * see the dirty folio and writeprotect it again.
3601	 */
3602	folio_mark_dirty(folio);
3603	folio_wait_stable(folio);
3604out:
3605	sb_end_pagefault(mapping->host->i_sb);
3606	return ret;
3607}
 
3608
3609const struct vm_operations_struct generic_file_vm_ops = {
3610	.fault		= filemap_fault,
3611	.map_pages	= filemap_map_pages,
3612	.page_mkwrite	= filemap_page_mkwrite,
3613};
3614
3615/* This is used for a general mmap of a disk file */
3616
3617int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3618{
3619	struct address_space *mapping = file->f_mapping;
3620
3621	if (!mapping->a_ops->read_folio)
3622		return -ENOEXEC;
3623	file_accessed(file);
3624	vma->vm_ops = &generic_file_vm_ops;
3625	return 0;
3626}
3627
3628/*
3629 * This is for filesystems which do not implement ->writepage.
3630 */
3631int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3632{
3633	if (vma_is_shared_maywrite(vma))
3634		return -EINVAL;
3635	return generic_file_mmap(file, vma);
3636}
3637#else
3638vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3639{
3640	return VM_FAULT_SIGBUS;
3641}
3642int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3643{
3644	return -ENOSYS;
3645}
3646int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3647{
3648	return -ENOSYS;
3649}
3650#endif /* CONFIG_MMU */
3651
3652EXPORT_SYMBOL(filemap_page_mkwrite);
3653EXPORT_SYMBOL(generic_file_mmap);
3654EXPORT_SYMBOL(generic_file_readonly_mmap);
3655
3656static struct folio *do_read_cache_folio(struct address_space *mapping,
3657		pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
3658{
3659	struct folio *folio;
3660	int err;
 
 
 
 
 
 
 
3661
3662	if (!filler)
3663		filler = mapping->a_ops->read_folio;
 
 
 
 
 
 
3664repeat:
3665	folio = filemap_get_folio(mapping, index);
3666	if (IS_ERR(folio)) {
3667		folio = filemap_alloc_folio(gfp, 0);
3668		if (!folio)
3669			return ERR_PTR(-ENOMEM);
3670		err = filemap_add_folio(mapping, folio, index, gfp);
3671		if (unlikely(err)) {
3672			folio_put(folio);
3673			if (err == -EEXIST)
3674				goto repeat;
3675			/* Presumably ENOMEM for xarray node */
3676			return ERR_PTR(err);
3677		}
3678
3679		goto filler;
 
 
 
 
 
 
 
 
 
 
3680	}
3681	if (folio_test_uptodate(folio))
3682		goto out;
3683
3684	if (!folio_trylock(folio)) {
3685		folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3686		goto repeat;
3687	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3688
3689	/* Folio was truncated from mapping */
3690	if (!folio->mapping) {
3691		folio_unlock(folio);
3692		folio_put(folio);
3693		goto repeat;
3694	}
3695
3696	/* Someone else locked and filled the page in a very small window */
3697	if (folio_test_uptodate(folio)) {
3698		folio_unlock(folio);
3699		goto out;
3700	}
3701
3702filler:
3703	err = filemap_read_folio(file, filler, folio);
3704	if (err) {
3705		folio_put(folio);
3706		if (err == AOP_TRUNCATED_PAGE)
3707			goto repeat;
3708		return ERR_PTR(err);
3709	}
3710
3711out:
3712	folio_mark_accessed(folio);
3713	return folio;
3714}
3715
3716/**
3717 * read_cache_folio - Read into page cache, fill it if needed.
3718 * @mapping: The address_space to read from.
3719 * @index: The index to read.
3720 * @filler: Function to perform the read, or NULL to use aops->read_folio().
3721 * @file: Passed to filler function, may be NULL if not required.
3722 *
3723 * Read one page into the page cache.  If it succeeds, the folio returned
3724 * will contain @index, but it may not be the first page of the folio.
3725 *
3726 * If the filler function returns an error, it will be returned to the
3727 * caller.
3728 *
3729 * Context: May sleep.  Expects mapping->invalidate_lock to be held.
3730 * Return: An uptodate folio on success, ERR_PTR() on failure.
3731 */
3732struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3733		filler_t filler, struct file *file)
3734{
3735	return do_read_cache_folio(mapping, index, filler, file,
3736			mapping_gfp_mask(mapping));
3737}
3738EXPORT_SYMBOL(read_cache_folio);
3739
3740/**
3741 * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
3742 * @mapping:	The address_space for the folio.
3743 * @index:	The index that the allocated folio will contain.
3744 * @gfp:	The page allocator flags to use if allocating.
3745 *
3746 * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with
3747 * any new memory allocations done using the specified allocation flags.
3748 *
3749 * The most likely error from this function is EIO, but ENOMEM is
3750 * possible and so is EINTR.  If ->read_folio returns another error,
3751 * that will be returned to the caller.
3752 *
3753 * The function expects mapping->invalidate_lock to be already held.
 
3754 *
3755 * Return: Uptodate folio on success, ERR_PTR() on failure.
3756 */
3757struct folio *mapping_read_folio_gfp(struct address_space *mapping,
3758		pgoff_t index, gfp_t gfp)
3759{
3760	return do_read_cache_folio(mapping, index, NULL, NULL, gfp);
3761}
3762EXPORT_SYMBOL(mapping_read_folio_gfp);
3763
3764static struct page *do_read_cache_page(struct address_space *mapping,
3765		pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
3766{
3767	struct folio *folio;
3768
3769	folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3770	if (IS_ERR(folio))
3771		return &folio->page;
3772	return folio_file_page(folio, index);
3773}
3774
3775struct page *read_cache_page(struct address_space *mapping,
3776			pgoff_t index, filler_t *filler, struct file *file)
 
 
3777{
3778	return do_read_cache_page(mapping, index, filler, file,
3779			mapping_gfp_mask(mapping));
3780}
3781EXPORT_SYMBOL(read_cache_page);
3782
3783/**
3784 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3785 * @mapping:	the page's address_space
3786 * @index:	the page index
3787 * @gfp:	the page allocator flags to use if allocating
3788 *
3789 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3790 * any new page allocations done using the specified allocation flags.
3791 *
3792 * If the page does not get brought uptodate, return -EIO.
3793 *
3794 * The function expects mapping->invalidate_lock to be already held.
3795 *
3796 * Return: up to date page on success, ERR_PTR() on failure.
3797 */
3798struct page *read_cache_page_gfp(struct address_space *mapping,
3799				pgoff_t index,
3800				gfp_t gfp)
3801{
3802	return do_read_cache_page(mapping, index, NULL, NULL, gfp);
 
 
3803}
3804EXPORT_SYMBOL(read_cache_page_gfp);
3805
3806/*
3807 * Warn about a page cache invalidation failure during a direct I/O write.
 
 
 
 
3808 */
3809static void dio_warn_stale_pagecache(struct file *filp)
3810{
3811	static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3812	char pathname[128];
3813	char *path;
 
 
 
 
 
 
 
 
 
 
3814
3815	errseq_set(&filp->f_mapping->wb_err, -EIO);
3816	if (__ratelimit(&_rs)) {
3817		path = file_path(filp, pathname, sizeof(pathname));
3818		if (IS_ERR(path))
3819			path = "(unknown)";
3820		pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
3821		pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3822			current->comm);
 
 
 
 
 
 
 
 
3823	}
 
 
 
 
 
 
 
 
 
 
 
 
 
3824}
 
3825
3826void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count)
 
 
3827{
3828	struct address_space *mapping = iocb->ki_filp->f_mapping;
3829
3830	if (mapping->nrpages &&
3831	    invalidate_inode_pages2_range(mapping,
3832			iocb->ki_pos >> PAGE_SHIFT,
3833			(iocb->ki_pos + count - 1) >> PAGE_SHIFT))
3834		dio_warn_stale_pagecache(iocb->ki_filp);
3835}
 
 
 
 
 
 
 
 
 
 
 
3836
3837ssize_t
3838generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3839{
3840	struct address_space *mapping = iocb->ki_filp->f_mapping;
3841	size_t write_len = iov_iter_count(from);
3842	ssize_t written;
 
 
 
 
 
 
 
 
 
 
 
 
3843
3844	/*
3845	 * If a page can not be invalidated, return 0 to fall back
3846	 * to buffered write.
 
 
3847	 */
3848	written = kiocb_invalidate_pages(iocb, write_len);
3849	if (written) {
3850		if (written == -EBUSY)
3851			return 0;
3852		return written;
 
 
 
 
 
 
 
3853	}
3854
3855	written = mapping->a_ops->direct_IO(iocb, from);
 
3856
3857	/*
3858	 * Finally, try again to invalidate clean pages which might have been
3859	 * cached by non-direct readahead, or faulted in by get_user_pages()
3860	 * if the source of the write was an mmap'ed region of the file
3861	 * we're writing.  Either one is a pretty crazy thing to do,
3862	 * so we don't support it 100%.  If this invalidation
3863	 * fails, tough, the write still worked...
3864	 *
3865	 * Most of the time we do not need this since dio_complete() will do
3866	 * the invalidation for us. However there are some file systems that
3867	 * do not end up with dio_complete() being called, so let's not break
3868	 * them by removing it completely.
3869	 *
3870	 * Noticeable example is a blkdev_direct_IO().
3871	 *
3872	 * Skip invalidation for async writes or if mapping has no pages.
3873	 */
3874	if (written > 0) {
3875		struct inode *inode = mapping->host;
3876		loff_t pos = iocb->ki_pos;
 
3877
3878		kiocb_invalidate_post_direct_write(iocb, written);
3879		pos += written;
3880		write_len -= written;
3881		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3882			i_size_write(inode, pos);
3883			mark_inode_dirty(inode);
3884		}
3885		iocb->ki_pos = pos;
3886	}
3887	if (written != -EIOCBQUEUED)
3888		iov_iter_revert(from, write_len - iov_iter_count(from));
3889	return written;
3890}
3891EXPORT_SYMBOL(generic_file_direct_write);
3892
3893ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3894{
3895	struct file *file = iocb->ki_filp;
3896	loff_t pos = iocb->ki_pos;
3897	struct address_space *mapping = file->f_mapping;
3898	const struct address_space_operations *a_ops = mapping->a_ops;
3899	long status = 0;
3900	ssize_t written = 0;
 
 
 
 
 
 
 
3901
3902	do {
3903		struct page *page;
3904		unsigned long offset;	/* Offset into pagecache page */
3905		unsigned long bytes;	/* Bytes to write to page */
3906		size_t copied;		/* Bytes copied from user */
3907		void *fsdata = NULL;
3908
3909		offset = (pos & (PAGE_SIZE - 1));
3910		bytes = min_t(unsigned long, PAGE_SIZE - offset,
3911						iov_iter_count(i));
3912
3913again:
3914		/*
3915		 * Bring in the user page that we will copy from _first_.
3916		 * Otherwise there's a nasty deadlock on copying from the
3917		 * same page as we're writing to, without it being marked
3918		 * up-to-date.
 
 
 
 
3919		 */
3920		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
3921			status = -EFAULT;
3922			break;
3923		}
3924
3925		if (fatal_signal_pending(current)) {
3926			status = -EINTR;
3927			break;
3928		}
3929
3930		status = a_ops->write_begin(file, mapping, pos, bytes,
3931						&page, &fsdata);
3932		if (unlikely(status < 0))
3933			break;
3934
3935		if (mapping_writably_mapped(mapping))
3936			flush_dcache_page(page);
3937
3938		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
3939		flush_dcache_page(page);
3940
3941		status = a_ops->write_end(file, mapping, pos, bytes, copied,
3942						page, fsdata);
3943		if (unlikely(status != copied)) {
3944			iov_iter_revert(i, copied - max(status, 0L));
3945			if (unlikely(status < 0))
3946				break;
3947		}
3948		cond_resched();
3949
3950		if (unlikely(status == 0)) {
 
3951			/*
3952			 * A short copy made ->write_end() reject the
3953			 * thing entirely.  Might be memory poisoning
3954			 * halfway through, might be a race with munmap,
3955			 * might be severe memory pressure.
 
 
3956			 */
3957			if (copied)
3958				bytes = copied;
3959			goto again;
3960		}
3961		pos += status;
3962		written += status;
3963
3964		balance_dirty_pages_ratelimited(mapping);
3965	} while (iov_iter_count(i));
3966
3967	if (!written)
3968		return status;
3969	iocb->ki_pos += written;
3970	return written;
3971}
3972EXPORT_SYMBOL(generic_perform_write);
3973
3974/**
3975 * __generic_file_write_iter - write data to a file
3976 * @iocb:	IO state structure (file, offset, etc.)
3977 * @from:	iov_iter with data to write
3978 *
3979 * This function does all the work needed for actually writing data to a
3980 * file. It does all basic checks, removes SUID from the file, updates
3981 * modification times and calls proper subroutines depending on whether we
3982 * do direct IO or a standard buffered write.
3983 *
3984 * It expects i_rwsem to be grabbed unless we work on a block device or similar
3985 * object which does not need locking at all.
3986 *
3987 * This function does *not* take care of syncing data in case of O_SYNC write.
3988 * A caller has to handle it. This is mainly due to the fact that we want to
3989 * avoid syncing under i_rwsem.
3990 *
3991 * Return:
3992 * * number of bytes written, even for truncated writes
3993 * * negative error code if no data has been written at all
3994 */
3995ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3996{
3997	struct file *file = iocb->ki_filp;
3998	struct address_space *mapping = file->f_mapping;
3999	struct inode *inode = mapping->host;
4000	ssize_t ret;
4001
4002	ret = file_remove_privs(file);
4003	if (ret)
4004		return ret;
 
 
 
 
4005
4006	ret = file_update_time(file);
4007	if (ret)
4008		return ret;
4009
4010	if (iocb->ki_flags & IOCB_DIRECT) {
4011		ret = generic_file_direct_write(iocb, from);
 
 
4012		/*
4013		 * If the write stopped short of completing, fall back to
4014		 * buffered writes.  Some filesystems do this for writes to
4015		 * holes, for example.  For DAX files, a buffered write will
4016		 * not succeed (even if it did, DAX does not handle dirty
4017		 * page-cache pages correctly).
4018		 */
4019		if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode))
4020			return ret;
4021		return direct_write_fallback(iocb, from, ret,
4022				generic_perform_write(iocb, from));
4023	}
4024
4025	return generic_perform_write(iocb, from);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4026}
4027EXPORT_SYMBOL(__generic_file_write_iter);
4028
4029/**
4030 * generic_file_write_iter - write data to a file
4031 * @iocb:	IO state structure
4032 * @from:	iov_iter with data to write
4033 *
4034 * This is a wrapper around __generic_file_write_iter() to be used by most
4035 * filesystems. It takes care of syncing the file in case of O_SYNC file
4036 * and acquires i_rwsem as needed.
4037 * Return:
4038 * * negative error code if no data has been written at all of
4039 *   vfs_fsync_range() failed for a synchronous write
4040 * * number of bytes written, even for truncated writes
4041 */
4042ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4043{
4044	struct file *file = iocb->ki_filp;
4045	struct inode *inode = file->f_mapping->host;
4046	ssize_t ret;
4047
4048	inode_lock(inode);
4049	ret = generic_write_checks(iocb, from);
4050	if (ret > 0)
4051		ret = __generic_file_write_iter(iocb, from);
4052	inode_unlock(inode);
4053
4054	if (ret > 0)
4055		ret = generic_write_sync(iocb, ret);
4056	return ret;
4057}
4058EXPORT_SYMBOL(generic_file_write_iter);
4059
4060/**
4061 * filemap_release_folio() - Release fs-specific metadata on a folio.
4062 * @folio: The folio which the kernel is trying to free.
4063 * @gfp: Memory allocation flags (and I/O mode).
4064 *
4065 * The address_space is trying to release any data attached to a folio
4066 * (presumably at folio->private).
4067 *
4068 * This will also be called if the private_2 flag is set on a page,
4069 * indicating that the folio has other metadata associated with it.
 
4070 *
4071 * The @gfp argument specifies whether I/O may be performed to release
4072 * this page (__GFP_IO), and whether the call may block
4073 * (__GFP_RECLAIM & __GFP_FS).
 
 
4074 *
4075 * Return: %true if the release was successful, otherwise %false.
4076 */
4077bool filemap_release_folio(struct folio *folio, gfp_t gfp)
4078{
4079	struct address_space * const mapping = folio->mapping;
4080
4081	BUG_ON(!folio_test_locked(folio));
4082	if (!folio_needs_release(folio))
4083		return true;
4084	if (folio_test_writeback(folio))
4085		return false;
4086
4087	if (mapping && mapping->a_ops->release_folio)
4088		return mapping->a_ops->release_folio(folio, gfp);
4089	return try_to_free_buffers(folio);
4090}
4091EXPORT_SYMBOL(filemap_release_folio);
4092
4093#ifdef CONFIG_CACHESTAT_SYSCALL
4094/**
4095 * filemap_cachestat() - compute the page cache statistics of a mapping
4096 * @mapping:	The mapping to compute the statistics for.
4097 * @first_index:	The starting page cache index.
4098 * @last_index:	The final page index (inclusive).
4099 * @cs:	the cachestat struct to write the result to.
4100 *
4101 * This will query the page cache statistics of a mapping in the
4102 * page range of [first_index, last_index] (inclusive). The statistics
4103 * queried include: number of dirty pages, number of pages marked for
4104 * writeback, and the number of (recently) evicted pages.
4105 */
4106static void filemap_cachestat(struct address_space *mapping,
4107		pgoff_t first_index, pgoff_t last_index, struct cachestat *cs)
4108{
4109	XA_STATE(xas, &mapping->i_pages, first_index);
4110	struct folio *folio;
4111
4112	rcu_read_lock();
4113	xas_for_each(&xas, folio, last_index) {
4114		int order;
4115		unsigned long nr_pages;
4116		pgoff_t folio_first_index, folio_last_index;
4117
4118		/*
4119		 * Don't deref the folio. It is not pinned, and might
4120		 * get freed (and reused) underneath us.
4121		 *
4122		 * We *could* pin it, but that would be expensive for
4123		 * what should be a fast and lightweight syscall.
4124		 *
4125		 * Instead, derive all information of interest from
4126		 * the rcu-protected xarray.
4127		 */
4128
4129		if (xas_retry(&xas, folio))
4130			continue;
4131
4132		order = xa_get_order(xas.xa, xas.xa_index);
4133		nr_pages = 1 << order;
4134		folio_first_index = round_down(xas.xa_index, 1 << order);
4135		folio_last_index = folio_first_index + nr_pages - 1;
4136
4137		/* Folios might straddle the range boundaries, only count covered pages */
4138		if (folio_first_index < first_index)
4139			nr_pages -= first_index - folio_first_index;
4140
4141		if (folio_last_index > last_index)
4142			nr_pages -= folio_last_index - last_index;
4143
4144		if (xa_is_value(folio)) {
4145			/* page is evicted */
4146			void *shadow = (void *)folio;
4147			bool workingset; /* not used */
4148
4149			cs->nr_evicted += nr_pages;
4150
4151#ifdef CONFIG_SWAP /* implies CONFIG_MMU */
4152			if (shmem_mapping(mapping)) {
4153				/* shmem file - in swap cache */
4154				swp_entry_t swp = radix_to_swp_entry(folio);
4155
4156				shadow = get_shadow_from_swap_cache(swp);
4157			}
4158#endif
4159			if (workingset_test_recent(shadow, true, &workingset))
4160				cs->nr_recently_evicted += nr_pages;
4161
4162			goto resched;
4163		}
4164
4165		/* page is in cache */
4166		cs->nr_cache += nr_pages;
4167
4168		if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
4169			cs->nr_dirty += nr_pages;
4170
4171		if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
4172			cs->nr_writeback += nr_pages;
4173
4174resched:
4175		if (need_resched()) {
4176			xas_pause(&xas);
4177			cond_resched_rcu();
4178		}
4179	}
4180	rcu_read_unlock();
4181}
4182
4183/*
4184 * The cachestat(2) system call.
4185 *
4186 * cachestat() returns the page cache statistics of a file in the
4187 * bytes range specified by `off` and `len`: number of cached pages,
4188 * number of dirty pages, number of pages marked for writeback,
4189 * number of evicted pages, and number of recently evicted pages.
4190 *
4191 * An evicted page is a page that is previously in the page cache
4192 * but has been evicted since. A page is recently evicted if its last
4193 * eviction was recent enough that its reentry to the cache would
4194 * indicate that it is actively being used by the system, and that
4195 * there is memory pressure on the system.
4196 *
4197 * `off` and `len` must be non-negative integers. If `len` > 0,
4198 * the queried range is [`off`, `off` + `len`]. If `len` == 0,
4199 * we will query in the range from `off` to the end of the file.
4200 *
4201 * The `flags` argument is unused for now, but is included for future
4202 * extensibility. User should pass 0 (i.e no flag specified).
4203 *
4204 * Currently, hugetlbfs is not supported.
4205 *
4206 * Because the status of a page can change after cachestat() checks it
4207 * but before it returns to the application, the returned values may
4208 * contain stale information.
4209 *
4210 * return values:
4211 *  zero        - success
4212 *  -EFAULT     - cstat or cstat_range points to an illegal address
4213 *  -EINVAL     - invalid flags
4214 *  -EBADF      - invalid file descriptor
4215 *  -EOPNOTSUPP - file descriptor is of a hugetlbfs file
4216 */
4217SYSCALL_DEFINE4(cachestat, unsigned int, fd,
4218		struct cachestat_range __user *, cstat_range,
4219		struct cachestat __user *, cstat, unsigned int, flags)
4220{
4221	struct fd f = fdget(fd);
4222	struct address_space *mapping;
4223	struct cachestat_range csr;
4224	struct cachestat cs;
4225	pgoff_t first_index, last_index;
4226
4227	if (!f.file)
4228		return -EBADF;
4229
4230	if (copy_from_user(&csr, cstat_range,
4231			sizeof(struct cachestat_range))) {
4232		fdput(f);
4233		return -EFAULT;
4234	}
4235
4236	/* hugetlbfs is not supported */
4237	if (is_file_hugepages(f.file)) {
4238		fdput(f);
4239		return -EOPNOTSUPP;
4240	}
4241
4242	if (flags != 0) {
4243		fdput(f);
4244		return -EINVAL;
4245	}
4246
4247	first_index = csr.off >> PAGE_SHIFT;
4248	last_index =
4249		csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;
4250	memset(&cs, 0, sizeof(struct cachestat));
4251	mapping = f.file->f_mapping;
4252	filemap_cachestat(mapping, first_index, last_index, &cs);
4253	fdput(f);
4254
4255	if (copy_to_user(cstat, &cs, sizeof(struct cachestat)))
4256		return -EFAULT;
4257
4258	return 0;
4259}
4260#endif /* CONFIG_CACHESTAT_SYSCALL */
v4.10.11
 
   1/*
   2 *	linux/mm/filemap.c
   3 *
   4 * Copyright (C) 1994-1999  Linus Torvalds
   5 */
   6
   7/*
   8 * This file handles the generic file mmap semantics used by
   9 * most "normal" filesystems (but you don't /have/ to use this:
  10 * the NFS filesystem used to do this differently, for example)
  11 */
  12#include <linux/export.h>
  13#include <linux/compiler.h>
  14#include <linux/dax.h>
  15#include <linux/fs.h>
 
  16#include <linux/uaccess.h>
  17#include <linux/capability.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/gfp.h>
  20#include <linux/mm.h>
  21#include <linux/swap.h>
 
 
  22#include <linux/mman.h>
  23#include <linux/pagemap.h>
  24#include <linux/file.h>
  25#include <linux/uio.h>
 
  26#include <linux/hash.h>
  27#include <linux/writeback.h>
  28#include <linux/backing-dev.h>
  29#include <linux/pagevec.h>
  30#include <linux/blkdev.h>
  31#include <linux/security.h>
  32#include <linux/cpuset.h>
  33#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
  34#include <linux/hugetlb.h>
  35#include <linux/memcontrol.h>
  36#include <linux/cleancache.h>
  37#include <linux/rmap.h>
 
 
 
 
 
 
 
 
 
 
  38#include "internal.h"
  39
  40#define CREATE_TRACE_POINTS
  41#include <trace/events/filemap.h>
  42
  43/*
  44 * FIXME: remove all knowledge of the buffer layer from the core VM
  45 */
  46#include <linux/buffer_head.h> /* for try_to_free_buffers */
  47
  48#include <asm/mman.h>
  49
 
 
  50/*
  51 * Shared mappings implemented 30.11.1994. It's not fully working yet,
  52 * though.
  53 *
  54 * Shared mappings now work. 15.8.1995  Bruno.
  55 *
  56 * finished 'unifying' the page and buffer cache and SMP-threaded the
  57 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
  58 *
  59 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
  60 */
  61
  62/*
  63 * Lock ordering:
  64 *
  65 *  ->i_mmap_rwsem		(truncate_pagecache)
  66 *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
  67 *      ->swap_lock		(exclusive_swap_page, others)
  68 *        ->mapping->tree_lock
  69 *
  70 *  ->i_mutex
  71 *    ->i_mmap_rwsem		(truncate->unmap_mapping_range)
 
  72 *
  73 *  ->mmap_sem
  74 *    ->i_mmap_rwsem
  75 *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
  76 *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock)
  77 *
  78 *  ->mmap_sem
  79 *    ->lock_page		(access_process_vm)
 
  80 *
  81 *  ->i_mutex			(generic_perform_write)
  82 *    ->mmap_sem		(fault_in_pages_readable->do_page_fault)
  83 *
  84 *  bdi->wb.list_lock
  85 *    sb_lock			(fs/fs-writeback.c)
  86 *    ->mapping->tree_lock	(__sync_single_inode)
  87 *
  88 *  ->i_mmap_rwsem
  89 *    ->anon_vma.lock		(vma_adjust)
  90 *
  91 *  ->anon_vma.lock
  92 *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
  93 *
  94 *  ->page_table_lock or pte_lock
  95 *    ->swap_lock		(try_to_unmap_one)
  96 *    ->private_lock		(try_to_unmap_one)
  97 *    ->tree_lock		(try_to_unmap_one)
  98 *    ->zone_lru_lock(zone)	(follow_page->mark_page_accessed)
  99 *    ->zone_lru_lock(zone)	(check_pte_range->isolate_lru_page)
 100 *    ->private_lock		(page_remove_rmap->set_page_dirty)
 101 *    ->tree_lock		(page_remove_rmap->set_page_dirty)
 102 *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
 103 *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
 104 *    ->memcg->move_lock	(page_remove_rmap->lock_page_memcg)
 105 *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
 106 *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
 107 *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
 108 *
 109 * ->i_mmap_rwsem
 110 *   ->tasklist_lock            (memory_failure, collect_procs_ao)
 111 */
 112
 113static int page_cache_tree_insert(struct address_space *mapping,
 114				  struct page *page, void **shadowp)
 115{
 116	struct radix_tree_node *node;
 117	void **slot;
 118	int error;
 
 
 
 
 119
 120	error = __radix_tree_create(&mapping->page_tree, page->index, 0,
 121				    &node, &slot);
 122	if (error)
 123		return error;
 124	if (*slot) {
 125		void *p;
 126
 127		p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
 128		if (!radix_tree_exceptional_entry(p))
 129			return -EEXIST;
 130
 131		mapping->nrexceptional--;
 132		if (!dax_mapping(mapping)) {
 133			if (shadowp)
 134				*shadowp = p;
 135		} else {
 136			/* DAX can replace empty locked entry with a hole */
 137			WARN_ON_ONCE(p !=
 138				dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
 139			/* Wakeup waiters for exceptional entry lock */
 140			dax_wake_mapping_entry_waiter(mapping, page->index, p,
 141						      true);
 142		}
 143	}
 144	__radix_tree_replace(&mapping->page_tree, node, slot, page,
 145			     workingset_update_node, mapping);
 146	mapping->nrpages++;
 147	return 0;
 148}
 149
 150static void page_cache_tree_delete(struct address_space *mapping,
 151				   struct page *page, void *shadow)
 152{
 153	int i, nr;
 154
 155	/* hugetlb pages are represented by one entry in the radix tree */
 156	nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
 
 
 
 
 
 157
 158	VM_BUG_ON_PAGE(!PageLocked(page), page);
 159	VM_BUG_ON_PAGE(PageTail(page), page);
 160	VM_BUG_ON_PAGE(nr != 1 && shadow, page);
 161
 162	for (i = 0; i < nr; i++) {
 163		struct radix_tree_node *node;
 164		void **slot;
 
 
 
 
 
 
 
 
 
 165
 166		__radix_tree_lookup(&mapping->page_tree, page->index + i,
 167				    &node, &slot);
 
 168
 169		VM_BUG_ON_PAGE(!node && nr != 1, page);
 170
 171		radix_tree_clear_tags(&mapping->page_tree, node, slot);
 172		__radix_tree_replace(&mapping->page_tree, node, slot, shadow,
 173				     workingset_update_node, mapping);
 
 
 
 
 
 174	}
 175
 176	if (shadow) {
 177		mapping->nrexceptional += nr;
 178		/*
 179		 * Make sure the nrexceptional update is committed before
 180		 * the nrpages update so that final truncate racing
 181		 * with reclaim does not see both counters 0 at the
 182		 * same time and miss a shadow entry.
 183		 */
 184		smp_wmb();
 185	}
 186	mapping->nrpages -= nr;
 
 
 
 
 
 
 187}
 188
 189/*
 190 * Delete a page from the page cache and free it. Caller has to make
 191 * sure the page is locked and that nobody else uses it - or that usage
 192 * is safe.  The caller must hold the mapping's tree_lock.
 193 */
 194void __delete_from_page_cache(struct page *page, void *shadow)
 
 
 
 
 
 
 
 
 
 195{
 196	struct address_space *mapping = page->mapping;
 197	int nr = hpage_nr_pages(page);
 
 
 
 
 
 
 
 
 
 198
 199	trace_mm_filemap_delete_from_page_cache(page);
 200	/*
 201	 * if we're uptodate, flush out into the cleancache, otherwise
 202	 * invalidate any existing cleancache entries.  We can't leave
 203	 * stale data around in the cleancache once our page is gone
 204	 */
 205	if (PageUptodate(page) && PageMappedToDisk(page))
 206		cleancache_put_page(page);
 207	else
 208		cleancache_invalidate_page(mapping, page);
 
 
 
 
 
 
 
 
 
 
 209
 210	VM_BUG_ON_PAGE(PageTail(page), page);
 211	VM_BUG_ON_PAGE(page_mapped(page), page);
 212	if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
 213		int mapcount;
 214
 215		pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
 216			 current->comm, page_to_pfn(page));
 217		dump_page(page, "still mapped when deleted");
 218		dump_stack();
 219		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 220
 221		mapcount = page_mapcount(page);
 222		if (mapping_exiting(mapping) &&
 223		    page_count(page) >= mapcount + 2) {
 224			/*
 225			 * All vmas have already been torn down, so it's
 226			 * a good bet that actually the page is unmapped,
 227			 * and we'd prefer not to leak it: if we're wrong,
 228			 * some other bad page check should catch it later.
 229			 */
 230			page_mapcount_reset(page);
 231			page_ref_sub(page, mapcount);
 
 
 
 232		}
 233	}
 234
 235	page_cache_tree_delete(mapping, page, shadow);
 236
 237	page->mapping = NULL;
 238	/* Leave page->index set: truncation lookup relies upon it */
 239
 240	/* hugetlb pages do not participate in page cache accounting. */
 241	if (!PageHuge(page))
 242		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
 243	if (PageSwapBacked(page)) {
 244		__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
 245		if (PageTransHuge(page))
 246			__dec_node_page_state(page, NR_SHMEM_THPS);
 247	} else {
 248		VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page);
 249	}
 250
 251	/*
 252	 * At this point page must be either written or cleaned by truncate.
 253	 * Dirty page here signals a bug and loss of unwritten data.
 254	 *
 255	 * This fixes dirty accounting after removing the page entirely but
 256	 * leaves PageDirty set: it has no effect for truncated page and
 257	 * anyway will be cleared before returning page into buddy allocator.
 258	 */
 259	if (WARN_ON_ONCE(PageDirty(page)))
 260		account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
 261}
 262
 263/**
 264 * delete_from_page_cache - delete page from page cache
 265 * @page: the page which the kernel is trying to remove from page cache
 266 *
 267 * This must be called only on pages that have been verified to be in the page
 268 * cache and locked.  It will never put the page into the free list, the caller
 269 * has a reference on the page.
 270 */
 271void delete_from_page_cache(struct page *page)
 272{
 273	struct address_space *mapping = page_mapping(page);
 274	unsigned long flags;
 275	void (*freepage)(struct page *);
 276
 277	BUG_ON(!PageLocked(page));
 278
 279	freepage = mapping->a_ops->freepage;
 280
 281	spin_lock_irqsave(&mapping->tree_lock, flags);
 282	__delete_from_page_cache(page, NULL);
 283	spin_unlock_irqrestore(&mapping->tree_lock, flags);
 
 
 
 
 
 
 
 
 
 
 284
 285	if (freepage)
 286		freepage(page);
 287
 288	if (PageTransHuge(page) && !PageHuge(page)) {
 289		page_ref_sub(page, HPAGE_PMD_NR);
 290		VM_BUG_ON_PAGE(page_count(page) <= 0, page);
 291	} else {
 292		put_page(page);
 293	}
 294}
 295EXPORT_SYMBOL(delete_from_page_cache);
 296
 297int filemap_check_errors(struct address_space *mapping)
 298{
 299	int ret = 0;
 300	/* Check for outstanding write errors */
 301	if (test_bit(AS_ENOSPC, &mapping->flags) &&
 302	    test_and_clear_bit(AS_ENOSPC, &mapping->flags))
 303		ret = -ENOSPC;
 304	if (test_bit(AS_EIO, &mapping->flags) &&
 305	    test_and_clear_bit(AS_EIO, &mapping->flags))
 306		ret = -EIO;
 307	return ret;
 308}
 309EXPORT_SYMBOL(filemap_check_errors);
 310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 311/**
 312 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
 313 * @mapping:	address space structure to write
 314 * @start:	offset in bytes where the range starts
 315 * @end:	offset in bytes where the range ends (inclusive)
 316 * @sync_mode:	enable synchronous operation
 317 *
 318 * Start writeback against all of a mapping's dirty pages that lie
 319 * within the byte offsets <start, end> inclusive.
 320 *
 321 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
 322 * opposed to a regular memory cleansing writeback.  The difference between
 323 * these two operations is that if a dirty page/buffer is encountered, it must
 324 * be waited upon, and not just skipped over.
 
 
 325 */
 326int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 327				loff_t end, int sync_mode)
 328{
 329	int ret;
 330	struct writeback_control wbc = {
 331		.sync_mode = sync_mode,
 332		.nr_to_write = LONG_MAX,
 333		.range_start = start,
 334		.range_end = end,
 335	};
 336
 337	if (!mapping_cap_writeback_dirty(mapping))
 338		return 0;
 339
 340	wbc_attach_fdatawrite_inode(&wbc, mapping->host);
 341	ret = do_writepages(mapping, &wbc);
 342	wbc_detach_inode(&wbc);
 343	return ret;
 344}
 345
 346static inline int __filemap_fdatawrite(struct address_space *mapping,
 347	int sync_mode)
 348{
 349	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
 350}
 351
 352int filemap_fdatawrite(struct address_space *mapping)
 353{
 354	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
 355}
 356EXPORT_SYMBOL(filemap_fdatawrite);
 357
 358int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 359				loff_t end)
 360{
 361	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
 362}
 363EXPORT_SYMBOL(filemap_fdatawrite_range);
 364
 365/**
 366 * filemap_flush - mostly a non-blocking flush
 367 * @mapping:	target address_space
 368 *
 369 * This is a mostly non-blocking flush.  Not suitable for data-integrity
 370 * purposes - I/O may not be started against all dirty pages.
 
 
 371 */
 372int filemap_flush(struct address_space *mapping)
 373{
 374	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
 375}
 376EXPORT_SYMBOL(filemap_flush);
 377
 378static int __filemap_fdatawait_range(struct address_space *mapping,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 379				     loff_t start_byte, loff_t end_byte)
 380{
 381	pgoff_t index = start_byte >> PAGE_SHIFT;
 382	pgoff_t end = end_byte >> PAGE_SHIFT;
 383	struct pagevec pvec;
 384	int nr_pages;
 385	int ret = 0;
 386
 387	if (end_byte < start_byte)
 388		goto out;
 389
 390	pagevec_init(&pvec, 0);
 391	while ((index <= end) &&
 392			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
 393			PAGECACHE_TAG_WRITEBACK,
 394			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
 395		unsigned i;
 396
 397		for (i = 0; i < nr_pages; i++) {
 398			struct page *page = pvec.pages[i];
 
 
 
 399
 400			/* until radix tree lookup accepts end_index */
 401			if (page->index > end)
 402				continue;
 403
 404			wait_on_page_writeback(page);
 405			if (TestClearPageError(page))
 406				ret = -EIO;
 407		}
 408		pagevec_release(&pvec);
 409		cond_resched();
 410	}
 411out:
 412	return ret;
 413}
 414
 415/**
 416 * filemap_fdatawait_range - wait for writeback to complete
 417 * @mapping:		address space structure to wait for
 418 * @start_byte:		offset in bytes where the range starts
 419 * @end_byte:		offset in bytes where the range ends (inclusive)
 420 *
 421 * Walk the list of under-writeback pages of the given address space
 422 * in the given range and wait for all of them.  Check error status of
 423 * the address space and return it.
 424 *
 425 * Since the error status of the address space is cleared by this function,
 426 * callers are responsible for checking the return value and handling and/or
 427 * reporting the error.
 
 
 428 */
 429int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
 430			    loff_t end_byte)
 431{
 432	int ret, ret2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433
 434	ret = __filemap_fdatawait_range(mapping, start_byte, end_byte);
 435	ret2 = filemap_check_errors(mapping);
 436	if (!ret)
 437		ret = ret2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 438
 439	return ret;
 
 440}
 441EXPORT_SYMBOL(filemap_fdatawait_range);
 442
 443/**
 444 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
 445 * @mapping: address space structure to wait for
 446 *
 447 * Walk the list of under-writeback pages of the given address space
 448 * and wait for all of them.  Unlike filemap_fdatawait(), this function
 449 * does not clear error status of the address space.
 450 *
 451 * Use this function if callers don't handle errors themselves.  Expected
 452 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
 453 * fsfreeze(8)
 
 
 454 */
 455void filemap_fdatawait_keep_errors(struct address_space *mapping)
 456{
 457	loff_t i_size = i_size_read(mapping->host);
 
 
 
 458
 459	if (i_size == 0)
 460		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 461
 462	__filemap_fdatawait_range(mapping, 0, i_size - 1);
 
 
 
 
 
 
 
 
 
 
 
 463}
 
 464
 465/**
 466 * filemap_fdatawait - wait for all under-writeback pages to complete
 467 * @mapping: address space structure to wait for
 
 
 
 
 468 *
 469 * Walk the list of under-writeback pages of the given address space
 470 * and wait for all of them.  Check error status of the address space
 471 * and return it.
 472 *
 473 * Since the error status of the address space is cleared by this function,
 474 * callers are responsible for checking the return value and handling and/or
 475 * reporting the error.
 476 */
 477int filemap_fdatawait(struct address_space *mapping)
 
 478{
 479	loff_t i_size = i_size_read(mapping->host);
 480
 481	if (i_size == 0)
 482		return 0;
 483
 484	return filemap_fdatawait_range(mapping, 0, i_size - 1);
 485}
 486EXPORT_SYMBOL(filemap_fdatawait);
 487
 488int filemap_write_and_wait(struct address_space *mapping)
 489{
 490	int err = 0;
 491
 492	if ((!dax_mapping(mapping) && mapping->nrpages) ||
 493	    (dax_mapping(mapping) && mapping->nrexceptional)) {
 494		err = filemap_fdatawrite(mapping);
 495		/*
 496		 * Even if the above returned error, the pages may be
 497		 * written partially (e.g. -ENOSPC), so we wait for it.
 498		 * But the -EIO is special case, it may indicate the worst
 499		 * thing (e.g. bug) happened, so we avoid waiting for it.
 500		 */
 501		if (err != -EIO) {
 502			int err2 = filemap_fdatawait(mapping);
 503			if (!err)
 504				err = err2;
 505		}
 506	} else {
 507		err = filemap_check_errors(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508	}
 
 
 
 
 
 
 
 
 509	return err;
 510}
 511EXPORT_SYMBOL(filemap_write_and_wait);
 512
 513/**
 514 * filemap_write_and_wait_range - write out & wait on a file range
 515 * @mapping:	the address_space for the pages
 516 * @lstart:	offset in bytes where the range starts
 517 * @lend:	offset in bytes where the range ends (inclusive)
 518 *
 519 * Write out and wait upon file offsets lstart->lend, inclusive.
 520 *
 521 * Note that `lend' is inclusive (describes the last byte to be written) so
 522 * that this function can be used to write to the very end-of-file (end = -1).
 
 
 
 
 
 523 */
 524int filemap_write_and_wait_range(struct address_space *mapping,
 525				 loff_t lstart, loff_t lend)
 526{
 527	int err = 0;
 
 
 
 
 528
 529	if ((!dax_mapping(mapping) && mapping->nrpages) ||
 530	    (dax_mapping(mapping) && mapping->nrexceptional)) {
 531		err = __filemap_fdatawrite_range(mapping, lstart, lend,
 532						 WB_SYNC_ALL);
 533		/* See comment of filemap_write_and_wait() */
 534		if (err != -EIO) {
 535			int err2 = filemap_fdatawait_range(mapping,
 536						lstart, lend);
 537			if (!err)
 538				err = err2;
 539		}
 540	} else {
 541		err = filemap_check_errors(mapping);
 542	}
 
 
 
 543	return err;
 544}
 545EXPORT_SYMBOL(filemap_write_and_wait_range);
 546
 547/**
 548 * replace_page_cache_page - replace a pagecache page with a new one
 549 * @old:	page to be replaced
 550 * @new:	page to replace with
 551 * @gfp_mask:	allocation mode
 552 *
 553 * This function replaces a page in the pagecache with a new one.  On
 554 * success it acquires the pagecache reference for the new page and
 555 * drops it for the old page.  Both the old and new pages must be
 556 * locked.  This function does not add the new page to the LRU, the
 557 * caller must do that.
 558 *
 559 * The remove + add is atomic.  The only way this function can fail is
 560 * memory allocation failure.
 561 */
 562int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 563{
 564	int error;
 
 
 
 
 
 
 
 565
 566	VM_BUG_ON_PAGE(!PageLocked(old), old);
 567	VM_BUG_ON_PAGE(!PageLocked(new), new);
 568	VM_BUG_ON_PAGE(new->mapping, new);
 569
 570	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
 571	if (!error) {
 572		struct address_space *mapping = old->mapping;
 573		void (*freepage)(struct page *);
 574		unsigned long flags;
 575
 576		pgoff_t offset = old->index;
 577		freepage = mapping->a_ops->freepage;
 578
 579		get_page(new);
 580		new->mapping = mapping;
 581		new->index = offset;
 582
 583		spin_lock_irqsave(&mapping->tree_lock, flags);
 584		__delete_from_page_cache(old, NULL);
 585		error = page_cache_tree_insert(mapping, new, NULL);
 586		BUG_ON(error);
 587
 588		/*
 589		 * hugetlb pages do not participate in page cache accounting.
 590		 */
 591		if (!PageHuge(new))
 592			__inc_node_page_state(new, NR_FILE_PAGES);
 593		if (PageSwapBacked(new))
 594			__inc_node_page_state(new, NR_SHMEM);
 595		spin_unlock_irqrestore(&mapping->tree_lock, flags);
 596		mem_cgroup_migrate(old, new);
 597		radix_tree_preload_end();
 598		if (freepage)
 599			freepage(old);
 600		put_page(old);
 601	}
 602
 603	return error;
 604}
 605EXPORT_SYMBOL_GPL(replace_page_cache_page);
 606
 607static int __add_to_page_cache_locked(struct page *page,
 608				      struct address_space *mapping,
 609				      pgoff_t offset, gfp_t gfp_mask,
 610				      void **shadowp)
 611{
 612	int huge = PageHuge(page);
 613	struct mem_cgroup *memcg;
 614	int error;
 615
 616	VM_BUG_ON_PAGE(!PageLocked(page), page);
 617	VM_BUG_ON_PAGE(PageSwapBacked(page), page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 618
 619	if (!huge) {
 620		error = mem_cgroup_try_charge(page, current->mm,
 621					      gfp_mask, &memcg, false);
 622		if (error)
 623			return error;
 
 624	}
 625
 626	error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
 627	if (error) {
 628		if (!huge)
 629			mem_cgroup_cancel_charge(page, memcg, false);
 630		return error;
 631	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 632
 633	get_page(page);
 634	page->mapping = mapping;
 635	page->index = offset;
 636
 637	spin_lock_irq(&mapping->tree_lock);
 638	error = page_cache_tree_insert(mapping, page, shadowp);
 639	radix_tree_preload_end();
 640	if (unlikely(error))
 641		goto err_insert;
 642
 643	/* hugetlb pages do not participate in page cache accounting. */
 644	if (!huge)
 645		__inc_node_page_state(page, NR_FILE_PAGES);
 646	spin_unlock_irq(&mapping->tree_lock);
 647	if (!huge)
 648		mem_cgroup_commit_charge(page, memcg, false, false);
 649	trace_mm_filemap_add_to_page_cache(page);
 650	return 0;
 651err_insert:
 652	page->mapping = NULL;
 
 
 653	/* Leave page->index set: truncation relies upon it */
 654	spin_unlock_irq(&mapping->tree_lock);
 655	if (!huge)
 656		mem_cgroup_cancel_charge(page, memcg, false);
 657	put_page(page);
 658	return error;
 659}
 
 660
 661/**
 662 * add_to_page_cache_locked - add a locked page to the pagecache
 663 * @page:	page to add
 664 * @mapping:	the page's address_space
 665 * @offset:	page index
 666 * @gfp_mask:	page allocation mode
 667 *
 668 * This function is used to add a page to the pagecache. It must be locked.
 669 * This function does not add the page to the LRU.  The caller must do that.
 670 */
 671int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 672		pgoff_t offset, gfp_t gfp_mask)
 673{
 674	return __add_to_page_cache_locked(page, mapping, offset,
 675					  gfp_mask, NULL);
 676}
 677EXPORT_SYMBOL(add_to_page_cache_locked);
 678
 679int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 680				pgoff_t offset, gfp_t gfp_mask)
 681{
 682	void *shadow = NULL;
 683	int ret;
 684
 685	__SetPageLocked(page);
 686	ret = __add_to_page_cache_locked(page, mapping, offset,
 687					 gfp_mask, &shadow);
 688	if (unlikely(ret))
 689		__ClearPageLocked(page);
 690	else {
 691		/*
 692		 * The page might have been evicted from cache only
 693		 * recently, in which case it should be activated like
 694		 * any other repeatedly accessed page.
 695		 * The exception is pages getting rewritten; evicting other
 696		 * data from the working set, only to cache data that will
 697		 * get overwritten with something else, is a waste of memory.
 698		 */
 699		if (!(gfp_mask & __GFP_WRITE) &&
 700		    shadow && workingset_refault(shadow)) {
 701			SetPageActive(page);
 702			workingset_activation(page);
 703		} else
 704			ClearPageActive(page);
 705		lru_cache_add(page);
 706	}
 707	return ret;
 708}
 709EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
 710
 711#ifdef CONFIG_NUMA
 712struct page *__page_cache_alloc(gfp_t gfp)
 713{
 714	int n;
 715	struct page *page;
 716
 717	if (cpuset_do_page_mem_spread()) {
 718		unsigned int cpuset_mems_cookie;
 719		do {
 720			cpuset_mems_cookie = read_mems_allowed_begin();
 721			n = cpuset_mem_spread_node();
 722			page = __alloc_pages_node(n, gfp, 0);
 723		} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
 724
 725		return page;
 726	}
 727	return alloc_pages(gfp, 0);
 728}
 729EXPORT_SYMBOL(__page_cache_alloc);
 730#endif
 731
 732/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 733 * In order to wait for pages to become available there must be
 734 * waitqueues associated with pages. By using a hash table of
 735 * waitqueues where the bucket discipline is to maintain all
 736 * waiters on the same queue and wake all when any of the pages
 737 * become available, and for the woken contexts to check to be
 738 * sure the appropriate page became available, this saves space
 739 * at a cost of "thundering herd" phenomena during rare hash
 740 * collisions.
 741 */
 742#define PAGE_WAIT_TABLE_BITS 8
 743#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
 744static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
 745
 746static wait_queue_head_t *page_waitqueue(struct page *page)
 747{
 748	return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
 749}
 750
 751void __init pagecache_init(void)
 752{
 753	int i;
 754
 755	for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
 756		init_waitqueue_head(&page_wait_table[i]);
 757
 758	page_writeback_init();
 759}
 760
 761struct wait_page_key {
 762	struct page *page;
 763	int bit_nr;
 764	int page_match;
 765};
 766
 767struct wait_page_queue {
 768	struct page *page;
 769	int bit_nr;
 770	wait_queue_t wait;
 771};
 772
 773static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 774{
 
 775	struct wait_page_key *key = arg;
 776	struct wait_page_queue *wait_page
 777		= container_of(wait, struct wait_page_queue, wait);
 778
 779	if (wait_page->page != key->page)
 780	       return 0;
 781	key->page_match = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782
 783	if (wait_page->bit_nr != key->bit_nr)
 784		return 0;
 785	if (test_bit(key->bit_nr, &key->page->flags))
 786		return 0;
 
 
 
 
 
 
 
 787
 788	return autoremove_wake_function(wait, mode, sync, key);
 
 
 
 
 
 
 
 
 
 
 
 789}
 790
 791void wake_up_page_bit(struct page *page, int bit_nr)
 792{
 793	wait_queue_head_t *q = page_waitqueue(page);
 794	struct wait_page_key key;
 795	unsigned long flags;
 796
 797	key.page = page;
 798	key.bit_nr = bit_nr;
 799	key.page_match = 0;
 800
 801	spin_lock_irqsave(&q->lock, flags);
 802	__wake_up_locked_key(q, TASK_NORMAL, &key);
 
 803	/*
 804	 * It is possible for other pages to have collided on the waitqueue
 805	 * hash, so in that case check for a page match. That prevents a long-
 806	 * term waiter
 807	 *
 808	 * It is still possible to miss a case here, when we woke page waiters
 809	 * and removed them from the waitqueue, but there are still other
 810	 * page waiters.
 811	 */
 812	if (!waitqueue_active(q) || !key.page_match) {
 813		ClearPageWaiters(page);
 814		/*
 815		 * It's possible to miss clearing Waiters here, when we woke
 816		 * our page waiters, but the hashed waitqueue has waiters for
 817		 * other pages on it.
 818		 *
 819		 * That's okay, it's a rare case. The next waker will clear it.
 820		 */
 821	}
 822	spin_unlock_irqrestore(&q->lock, flags);
 823}
 824EXPORT_SYMBOL(wake_up_page_bit);
 825
 826static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 827		struct page *page, int bit_nr, int state, bool lock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 828{
 
 
 829	struct wait_page_queue wait_page;
 830	wait_queue_t *wait = &wait_page.wait;
 831	int ret = 0;
 
 
 
 
 
 
 
 
 
 832
 833	init_wait(wait);
 834	wait->func = wake_page_function;
 835	wait_page.page = page;
 836	wait_page.bit_nr = bit_nr;
 837
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 838	for (;;) {
 839		spin_lock_irq(&q->lock);
 840
 841		if (likely(list_empty(&wait->task_list))) {
 842			if (lock)
 843				__add_wait_queue_tail_exclusive(q, wait);
 844			else
 845				__add_wait_queue(q, wait);
 846			SetPageWaiters(page);
 847		}
 848
 849		set_current_state(state);
 850
 851		spin_unlock_irq(&q->lock);
 
 
 
 
 852
 853		if (likely(test_bit(bit_nr, &page->flags))) {
 854			io_schedule();
 855			if (unlikely(signal_pending_state(state, current))) {
 856				ret = -EINTR;
 857				break;
 858			}
 859		}
 860
 861		if (lock) {
 862			if (!test_and_set_bit_lock(bit_nr, &page->flags))
 863				break;
 864		} else {
 865			if (!test_bit(bit_nr, &page->flags))
 866				break;
 867		}
 
 
 
 
 
 
 
 
 
 
 
 
 868	}
 869
 
 
 
 
 
 
 870	finish_wait(q, wait);
 871
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 872	/*
 873	 * A signal could leave PageWaiters set. Clearing it here if
 874	 * !waitqueue_active would be possible (by open-coding finish_wait),
 875	 * but still fail to catch it in the case of wait hash collision. We
 876	 * already can fail to clear wait hash collision cases, so don't
 877	 * bother with signals either.
 878	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 879
 880	return ret;
 
 
 
 
 
 881}
 
 882
 883void wait_on_page_bit(struct page *page, int bit_nr)
 884{
 885	wait_queue_head_t *q = page_waitqueue(page);
 886	wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false);
 887}
 888EXPORT_SYMBOL(wait_on_page_bit);
 889
 890int wait_on_page_bit_killable(struct page *page, int bit_nr)
 891{
 892	wait_queue_head_t *q = page_waitqueue(page);
 893	return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false);
 894}
 
 895
 896/**
 897 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
 898 * @page: Page defining the wait queue of interest
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899 * @waiter: Waiter to add to the queue
 900 *
 901 * Add an arbitrary @waiter to the wait queue for the nominated @page.
 902 */
 903void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
 904{
 905	wait_queue_head_t *q = page_waitqueue(page);
 906	unsigned long flags;
 907
 908	spin_lock_irqsave(&q->lock, flags);
 909	__add_wait_queue(q, waiter);
 910	SetPageWaiters(page);
 911	spin_unlock_irqrestore(&q->lock, flags);
 912}
 913EXPORT_SYMBOL_GPL(add_page_wait_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 914
 915#ifndef clear_bit_unlock_is_negative_byte
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 916
 917/*
 918 * PG_waiters is the high bit in the same byte as PG_lock.
 
 919 *
 920 * On x86 (and on many other architectures), we can clear PG_lock and
 921 * test the sign bit at the same time. But if the architecture does
 922 * not support that special operation, we just do this all by hand
 923 * instead.
 924 *
 925 * The read of PG_waiters has to be after (or concurrently with) PG_locked
 926 * being cleared, but a memory barrier should be unneccssary since it is
 927 * in the same byte as PG_locked.
 928 */
 929static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
 930{
 931	clear_bit_unlock(nr, mem);
 932	/* smp_mb__after_atomic(); */
 933	return test_bit(PG_waiters, mem);
 
 934}
 935
 936#endif
 937
 938/**
 939 * unlock_page - unlock a locked page
 940 * @page: the page
 941 *
 942 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
 943 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
 944 * mechanism between PageLocked pages and PageWriteback pages is shared.
 945 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
 946 *
 947 * Note that this depends on PG_waiters being the sign bit in the byte
 948 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
 949 * clear the PG_locked bit and test PG_waiters at the same time fairly
 950 * portably (architectures that do LL/SC can test any bit, while x86 can
 951 * test the sign bit).
 952 */
 953void unlock_page(struct page *page)
 954{
 955	BUILD_BUG_ON(PG_waiters != 7);
 956	page = compound_head(page);
 957	VM_BUG_ON_PAGE(!PageLocked(page), page);
 958	if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
 959		wake_up_page_bit(page, PG_locked);
 960}
 961EXPORT_SYMBOL(unlock_page);
 962
 963/**
 964 * end_page_writeback - end writeback against a page
 965 * @page: the page
 
 
 
 
 
 
 
 966 */
 967void end_page_writeback(struct page *page)
 968{
 969	/*
 970	 * TestClearPageReclaim could be used here but it is an atomic
 971	 * operation and overkill in this particular case. Failing to
 972	 * shuffle a page marked for immediate reclaim is too mild to
 973	 * justify taking an atomic operation penalty at the end of
 974	 * ever page writeback.
 975	 */
 976	if (PageReclaim(page)) {
 977		ClearPageReclaim(page);
 978		rotate_reclaimable_page(page);
 979	}
 980
 981	if (!test_clear_page_writeback(page))
 982		BUG();
 983
 984	smp_mb__after_atomic();
 985	wake_up_page(page, PG_writeback);
 986}
 987EXPORT_SYMBOL(end_page_writeback);
 988
 989/*
 990 * After completing I/O on a page, call this routine to update the page
 991 * flags appropriately
 
 
 
 
 992 */
 993void page_endio(struct page *page, bool is_write, int err)
 994{
 995	if (!is_write) {
 996		if (!err) {
 997			SetPageUptodate(page);
 998		} else {
 999			ClearPageUptodate(page);
1000			SetPageError(page);
1001		}
1002		unlock_page(page);
1003	} else {
1004		if (err) {
1005			struct address_space *mapping;
1006
1007			SetPageError(page);
1008			mapping = page_mapping(page);
1009			if (mapping)
1010				mapping_set_error(mapping, err);
1011		}
1012		end_page_writeback(page);
 
 
 
 
1013	}
 
 
 
 
 
 
 
 
 
 
 
 
1014}
1015EXPORT_SYMBOL_GPL(page_endio);
1016
1017/**
1018 * __lock_page - get a lock on the page, assuming we need to sleep to get it
1019 * @page: the page to lock
1020 */
1021void __lock_page(struct page *__page)
1022{
1023	struct page *page = compound_head(__page);
1024	wait_queue_head_t *q = page_waitqueue(page);
1025	wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true);
1026}
1027EXPORT_SYMBOL(__lock_page);
1028
1029int __lock_page_killable(struct page *__page)
1030{
1031	struct page *page = compound_head(__page);
1032	wait_queue_head_t *q = page_waitqueue(page);
1033	return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1034}
1035EXPORT_SYMBOL_GPL(__lock_page_killable);
1036
1037/*
1038 * Return values:
1039 * 1 - page is locked; mmap_sem is still held.
1040 * 0 - page is not locked.
1041 *     mmap_sem has been released (up_read()), unless flags had both
1042 *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1043 *     which case mmap_sem is still held.
1044 *
1045 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
1046 * with the page locked and the mmap_sem unperturbed.
1047 */
1048int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
1049			 unsigned int flags)
1050{
1051	if (flags & FAULT_FLAG_ALLOW_RETRY) {
 
 
1052		/*
1053		 * CAUTION! In this case, mmap_sem is not released
1054		 * even though return 0.
1055		 */
1056		if (flags & FAULT_FLAG_RETRY_NOWAIT)
1057			return 0;
1058
1059		up_read(&mm->mmap_sem);
1060		if (flags & FAULT_FLAG_KILLABLE)
1061			wait_on_page_locked_killable(page);
1062		else
1063			wait_on_page_locked(page);
1064		return 0;
 
 
 
 
 
 
 
 
 
1065	} else {
1066		if (flags & FAULT_FLAG_KILLABLE) {
1067			int ret;
1068
1069			ret = __lock_page_killable(page);
1070			if (ret) {
1071				up_read(&mm->mmap_sem);
1072				return 0;
1073			}
1074		} else
1075			__lock_page(page);
1076		return 1;
1077	}
1078}
1079
1080/**
1081 * page_cache_next_hole - find the next hole (not-present entry)
1082 * @mapping: mapping
1083 * @index: index
1084 * @max_scan: maximum range to search
1085 *
1086 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
1087 * lowest indexed hole.
1088 *
1089 * Returns: the index of the hole if found, otherwise returns an index
1090 * outside of the set specified (in which case 'return - index >=
1091 * max_scan' will be true). In rare cases of index wrap-around, 0 will
1092 * be returned.
1093 *
1094 * page_cache_next_hole may be called under rcu_read_lock. However,
1095 * like radix_tree_gang_lookup, this will not atomically search a
1096 * snapshot of the tree at a single point in time. For example, if a
1097 * hole is created at index 5, then subsequently a hole is created at
1098 * index 10, page_cache_next_hole covering both indexes may return 10
1099 * if called under rcu_read_lock.
1100 */
1101pgoff_t page_cache_next_hole(struct address_space *mapping,
1102			     pgoff_t index, unsigned long max_scan)
1103{
1104	unsigned long i;
1105
1106	for (i = 0; i < max_scan; i++) {
1107		struct page *page;
1108
1109		page = radix_tree_lookup(&mapping->page_tree, index);
1110		if (!page || radix_tree_exceptional_entry(page))
 
1111			break;
1112		index++;
1113		if (index == 0)
1114			break;
1115	}
1116
1117	return index;
1118}
1119EXPORT_SYMBOL(page_cache_next_hole);
1120
1121/**
1122 * page_cache_prev_hole - find the prev hole (not-present entry)
1123 * @mapping: mapping
1124 * @index: index
1125 * @max_scan: maximum range to search
1126 *
1127 * Search backwards in the range [max(index-max_scan+1, 0), index] for
1128 * the first hole.
1129 *
1130 * Returns: the index of the hole if found, otherwise returns an index
1131 * outside of the set specified (in which case 'index - return >=
1132 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
1133 * will be returned.
1134 *
1135 * page_cache_prev_hole may be called under rcu_read_lock. However,
1136 * like radix_tree_gang_lookup, this will not atomically search a
1137 * snapshot of the tree at a single point in time. For example, if a
1138 * hole is created at index 10, then subsequently a hole is created at
1139 * index 5, page_cache_prev_hole covering both indexes may return 5 if
1140 * called under rcu_read_lock.
1141 */
1142pgoff_t page_cache_prev_hole(struct address_space *mapping,
1143			     pgoff_t index, unsigned long max_scan)
1144{
1145	unsigned long i;
1146
1147	for (i = 0; i < max_scan; i++) {
1148		struct page *page;
1149
1150		page = radix_tree_lookup(&mapping->page_tree, index);
1151		if (!page || radix_tree_exceptional_entry(page))
1152			break;
1153		index--;
1154		if (index == ULONG_MAX)
1155			break;
1156	}
1157
1158	return index;
1159}
1160EXPORT_SYMBOL(page_cache_prev_hole);
1161
1162/**
1163 * find_get_entry - find and get a page cache entry
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1164 * @mapping: the address_space to search
1165 * @offset: the page cache index
1166 *
1167 * Looks up the page cache slot at @mapping & @offset.  If there is a
1168 * page cache page, it is returned with an increased refcount.
1169 *
1170 * If the slot holds a shadow entry of a previously evicted page, or a
1171 * swap entry from shmem/tmpfs, it is returned.
 
 
1172 *
1173 * Otherwise, %NULL is returned.
1174 */
1175struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1176{
1177	void **pagep;
1178	struct page *head, *page;
1179
1180	rcu_read_lock();
1181repeat:
1182	page = NULL;
1183	pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
1184	if (pagep) {
1185		page = radix_tree_deref_slot(pagep);
1186		if (unlikely(!page))
1187			goto out;
1188		if (radix_tree_exception(page)) {
1189			if (radix_tree_deref_retry(page))
1190				goto repeat;
1191			/*
1192			 * A shadow entry of a recently evicted page,
1193			 * or a swap entry from shmem/tmpfs.  Return
1194			 * it without attempting to raise page count.
1195			 */
1196			goto out;
1197		}
1198
1199		head = compound_head(page);
1200		if (!page_cache_get_speculative(head))
1201			goto repeat;
1202
1203		/* The page was split under us? */
1204		if (compound_head(page) != head) {
1205			put_page(head);
1206			goto repeat;
1207		}
1208
1209		/*
1210		 * Has the page moved?
1211		 * This is part of the lockless pagecache protocol. See
1212		 * include/linux/pagemap.h for details.
1213		 */
1214		if (unlikely(page != *pagep)) {
1215			put_page(head);
1216			goto repeat;
1217		}
1218	}
1219out:
1220	rcu_read_unlock();
1221
1222	return page;
1223}
1224EXPORT_SYMBOL(find_get_entry);
1225
1226/**
1227 * find_lock_entry - locate, pin and lock a page cache entry
1228 * @mapping: the address_space to search
1229 * @offset: the page cache index
1230 *
1231 * Looks up the page cache slot at @mapping & @offset.  If there is a
1232 * page cache page, it is returned locked and with an increased
1233 * refcount.
1234 *
1235 * If the slot holds a shadow entry of a previously evicted page, or a
1236 * swap entry from shmem/tmpfs, it is returned.
1237 *
1238 * Otherwise, %NULL is returned.
1239 *
1240 * find_lock_entry() may sleep.
1241 */
1242struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1243{
1244	struct page *page;
1245
1246repeat:
1247	page = find_get_entry(mapping, offset);
1248	if (page && !radix_tree_exception(page)) {
1249		lock_page(page);
1250		/* Has the page been truncated? */
1251		if (unlikely(page_mapping(page) != mapping)) {
1252			unlock_page(page);
1253			put_page(page);
1254			goto repeat;
1255		}
1256		VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
1257	}
1258	return page;
1259}
1260EXPORT_SYMBOL(find_lock_entry);
1261
1262/**
1263 * pagecache_get_page - find and get a page reference
1264 * @mapping: the address_space to search
1265 * @offset: the page index
1266 * @fgp_flags: PCG flags
1267 * @gfp_mask: gfp mask to use for the page cache data page allocation
1268 *
1269 * Looks up the page cache slot at @mapping & @offset.
1270 *
1271 * PCG flags modify how the page is returned.
1272 *
1273 * FGP_ACCESSED: the page will be marked accessed
1274 * FGP_LOCK: Page is return locked
1275 * FGP_CREAT: If page is not present then a new page is allocated using
1276 *		@gfp_mask and added to the page cache and the VM's LRU
1277 *		list. The page is returned locked and with an increased
1278 *		refcount. Otherwise, %NULL is returned.
1279 *
1280 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1281 * if the GFP flags specified for FGP_CREAT are atomic.
1282 *
1283 * If there is a page cache page, it is returned with an increased refcount.
1284 */
1285struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1286	int fgp_flags, gfp_t gfp_mask)
1287{
1288	struct page *page;
1289
1290repeat:
1291	page = find_get_entry(mapping, offset);
1292	if (radix_tree_exceptional_entry(page))
1293		page = NULL;
1294	if (!page)
1295		goto no_page;
1296
1297	if (fgp_flags & FGP_LOCK) {
1298		if (fgp_flags & FGP_NOWAIT) {
1299			if (!trylock_page(page)) {
1300				put_page(page);
1301				return NULL;
1302			}
1303		} else {
1304			lock_page(page);
1305		}
1306
1307		/* Has the page been truncated? */
1308		if (unlikely(page->mapping != mapping)) {
1309			unlock_page(page);
1310			put_page(page);
1311			goto repeat;
1312		}
1313		VM_BUG_ON_PAGE(page->index != offset, page);
1314	}
1315
1316	if (page && (fgp_flags & FGP_ACCESSED))
1317		mark_page_accessed(page);
 
 
 
 
 
1318
 
 
1319no_page:
1320	if (!page && (fgp_flags & FGP_CREAT)) {
 
1321		int err;
1322		if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
1323			gfp_mask |= __GFP_WRITE;
 
1324		if (fgp_flags & FGP_NOFS)
1325			gfp_mask &= ~__GFP_FS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1326
1327		page = __page_cache_alloc(gfp_mask);
1328		if (!page)
1329			return NULL;
 
 
 
 
 
 
 
1330
1331		if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
1332			fgp_flags |= FGP_LOCK;
 
 
 
 
 
 
 
1333
1334		/* Init accessed so avoid atomic mark_page_accessed later */
1335		if (fgp_flags & FGP_ACCESSED)
1336			__SetPageReferenced(page);
1337
1338		err = add_to_page_cache_lru(page, mapping, offset,
1339				gfp_mask & GFP_RECLAIM_MASK);
1340		if (unlikely(err)) {
1341			put_page(page);
1342			page = NULL;
1343			if (err == -EEXIST)
1344				goto repeat;
1345		}
1346	}
1347
1348	return page;
 
 
 
1349}
1350EXPORT_SYMBOL(pagecache_get_page);
1351
1352/**
1353 * find_get_entries - gang pagecache lookup
1354 * @mapping:	The address_space to search
1355 * @start:	The starting page cache index
1356 * @nr_entries:	The maximum number of entries
1357 * @entries:	Where the resulting entries are placed
1358 * @indices:	The cache indices corresponding to the entries in @entries
1359 *
1360 * find_get_entries() will search for and return a group of up to
1361 * @nr_entries entries in the mapping.  The entries are placed at
1362 * @entries.  find_get_entries() takes a reference against any actual
1363 * pages it returns.
1364 *
1365 * The search returns a group of mapping-contiguous page cache entries
1366 * with ascending indexes.  There may be holes in the indices due to
1367 * not-present pages.
1368 *
1369 * Any shadow entries of evicted pages, or swap entries from
1370 * shmem/tmpfs, are included in the returned array.
1371 *
1372 * find_get_entries() returns the number of pages and shadow entries
1373 * which were found.
1374 */
1375unsigned find_get_entries(struct address_space *mapping,
1376			  pgoff_t start, unsigned int nr_entries,
1377			  struct page **entries, pgoff_t *indices)
1378{
1379	void **slot;
1380	unsigned int ret = 0;
1381	struct radix_tree_iter iter;
1382
1383	if (!nr_entries)
1384		return 0;
1385
1386	rcu_read_lock();
1387	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1388		struct page *head, *page;
1389repeat:
1390		page = radix_tree_deref_slot(slot);
1391		if (unlikely(!page))
1392			continue;
1393		if (radix_tree_exception(page)) {
1394			if (radix_tree_deref_retry(page)) {
1395				slot = radix_tree_iter_retry(&iter);
1396				continue;
1397			}
1398			/*
1399			 * A shadow entry of a recently evicted page, a swap
1400			 * entry from shmem/tmpfs or a DAX entry.  Return it
1401			 * without attempting to raise page count.
1402			 */
1403			goto export;
1404		}
1405
1406		head = compound_head(page);
1407		if (!page_cache_get_speculative(head))
1408			goto repeat;
 
 
 
 
 
 
 
 
1409
1410		/* The page was split under us? */
1411		if (compound_head(page) != head) {
1412			put_page(head);
1413			goto repeat;
1414		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1415
1416		/* Has the page moved? */
1417		if (unlikely(page != *slot)) {
1418			put_page(head);
1419			goto repeat;
 
 
 
 
 
 
 
 
 
 
1420		}
1421export:
1422		indices[ret] = iter.index;
1423		entries[ret] = page;
1424		if (++ret == nr_entries)
1425			break;
 
 
 
 
 
1426	}
1427	rcu_read_unlock();
1428	return ret;
 
 
 
 
 
 
 
 
 
 
1429}
1430
1431/**
1432 * find_get_pages - gang pagecache lookup
1433 * @mapping:	The address_space to search
1434 * @start:	The starting page index
1435 * @nr_pages:	The maximum number of pages
1436 * @pages:	Where the resulting pages are placed
 
 
 
 
1437 *
1438 * find_get_pages() will search for and return a group of up to
1439 * @nr_pages pages in the mapping.  The pages are placed at @pages.
1440 * find_get_pages() takes a reference against the returned pages.
 
 
 
 
 
 
 
 
 
 
 
 
 
1441 *
1442 * The search returns a group of mapping-contiguous pages with ascending
1443 * indexes.  There may be holes in the indices due to not-present pages.
 
1444 *
1445 * find_get_pages() returns the number of pages which were found.
 
1446 */
1447unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
1448			    unsigned int nr_pages, struct page **pages)
 
1449{
1450	struct radix_tree_iter iter;
1451	void **slot;
1452	unsigned ret = 0;
1453
1454	if (unlikely(!nr_pages))
1455		return 0;
1456
1457	rcu_read_lock();
1458	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1459		struct page *head, *page;
1460repeat:
1461		page = radix_tree_deref_slot(slot);
1462		if (unlikely(!page))
1463			continue;
1464
1465		if (radix_tree_exception(page)) {
1466			if (radix_tree_deref_retry(page)) {
1467				slot = radix_tree_iter_retry(&iter);
1468				continue;
1469			}
1470			/*
1471			 * A shadow entry of a recently evicted page,
1472			 * or a swap entry from shmem/tmpfs.  Skip
1473			 * over it.
1474			 */
1475			continue;
1476		}
 
 
 
 
 
1477
1478		head = compound_head(page);
1479		if (!page_cache_get_speculative(head))
1480			goto repeat;
1481
1482		/* The page was split under us? */
1483		if (compound_head(page) != head) {
1484			put_page(head);
1485			goto repeat;
1486		}
1487
1488		/* Has the page moved? */
1489		if (unlikely(page != *slot)) {
1490			put_page(head);
1491			goto repeat;
1492		}
 
 
 
1493
1494		pages[ret] = page;
1495		if (++ret == nr_pages)
1496			break;
1497	}
1498
 
 
 
 
 
 
 
 
1499	rcu_read_unlock();
1500	return ret;
1501}
 
1502
1503/**
1504 * find_get_pages_contig - gang contiguous pagecache lookup
1505 * @mapping:	The address_space to search
1506 * @index:	The starting page index
1507 * @nr_pages:	The maximum number of pages
1508 * @pages:	Where the resulting pages are placed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1509 *
1510 * find_get_pages_contig() works exactly like find_get_pages(), except
1511 * that the returned number of pages are guaranteed to be contiguous.
 
 
 
1512 *
1513 * find_get_pages_contig() returns the number of pages which were found.
1514 */
1515unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1516			       unsigned int nr_pages, struct page **pages)
1517{
1518	struct radix_tree_iter iter;
1519	void **slot;
1520	unsigned int ret = 0;
1521
1522	if (unlikely(!nr_pages))
1523		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
1524
1525	rcu_read_lock();
1526	radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
1527		struct page *head, *page;
1528repeat:
1529		page = radix_tree_deref_slot(slot);
1530		/* The hole, there no reason to continue */
1531		if (unlikely(!page))
1532			break;
1533
1534		if (radix_tree_exception(page)) {
1535			if (radix_tree_deref_retry(page)) {
1536				slot = radix_tree_iter_retry(&iter);
1537				continue;
1538			}
1539			/*
1540			 * A shadow entry of a recently evicted page,
1541			 * or a swap entry from shmem/tmpfs.  Stop
1542			 * looking for contiguous pages.
1543			 */
1544			break;
1545		}
 
1546
1547		head = compound_head(page);
1548		if (!page_cache_get_speculative(head))
1549			goto repeat;
1550
1551		/* The page was split under us? */
1552		if (compound_head(page) != head) {
1553			put_page(head);
1554			goto repeat;
1555		}
1556
1557		/* Has the page moved? */
1558		if (unlikely(page != *slot)) {
1559			put_page(head);
1560			goto repeat;
1561		}
1562
1563		/*
1564		 * must check mapping and index after taking the ref.
1565		 * otherwise we can get both false positives and false
1566		 * negatives, which is just confusing to the caller.
1567		 */
1568		if (page->mapping == NULL || page_to_pgoff(page) != iter.index) {
1569			put_page(page);
1570			break;
1571		}
1572
1573		pages[ret] = page;
1574		if (++ret == nr_pages)
1575			break;
 
 
 
 
 
 
1576	}
1577	rcu_read_unlock();
1578	return ret;
1579}
1580EXPORT_SYMBOL(find_get_pages_contig);
1581
1582/**
1583 * find_get_pages_tag - find and return pages that match @tag
1584 * @mapping:	the address_space to search
1585 * @index:	the starting page index
1586 * @tag:	the tag index
1587 * @nr_pages:	the maximum number of pages
1588 * @pages:	where the resulting pages are placed
1589 *
1590 * Like find_get_pages, except we only return pages which are tagged with
1591 * @tag.   We update @index to index the next page for the traversal.
1592 */
1593unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
1594			int tag, unsigned int nr_pages, struct page **pages)
1595{
1596	struct radix_tree_iter iter;
1597	void **slot;
1598	unsigned ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1599
1600	if (unlikely(!nr_pages))
 
 
 
1601		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1602
1603	rcu_read_lock();
1604	radix_tree_for_each_tagged(slot, &mapping->page_tree,
1605				   &iter, *index, tag) {
1606		struct page *head, *page;
1607repeat:
1608		page = radix_tree_deref_slot(slot);
1609		if (unlikely(!page))
1610			continue;
 
 
 
 
1611
1612		if (radix_tree_exception(page)) {
1613			if (radix_tree_deref_retry(page)) {
1614				slot = radix_tree_iter_retry(&iter);
1615				continue;
1616			}
 
1617			/*
1618			 * A shadow entry of a recently evicted page.
1619			 *
1620			 * Those entries should never be tagged, but
1621			 * this tree walk is lockless and the tags are
1622			 * looked up in bulk, one radix tree node at a
1623			 * time, so there is a sizable window for page
1624			 * reclaim to evict a page we saw tagged.
1625			 *
1626			 * Skip over it.
1627			 */
1628			continue;
 
1629		}
 
 
 
 
1630
1631		head = compound_head(page);
1632		if (!page_cache_get_speculative(head))
1633			goto repeat;
1634
1635		/* The page was split under us? */
1636		if (compound_head(page) != head) {
1637			put_page(head);
1638			goto repeat;
1639		}
1640
1641		/* Has the page moved? */
1642		if (unlikely(page != *slot)) {
1643			put_page(head);
1644			goto repeat;
1645		}
1646
1647		pages[ret] = page;
1648		if (++ret == nr_pages)
1649			break;
1650	}
1651
1652	rcu_read_unlock();
1653
1654	if (ret)
1655		*index = pages[ret - 1]->index + 1;
1656
1657	return ret;
1658}
1659EXPORT_SYMBOL(find_get_pages_tag);
1660
1661/**
1662 * find_get_entries_tag - find and return entries that match @tag
1663 * @mapping:	the address_space to search
1664 * @start:	the starting page cache index
1665 * @tag:	the tag index
1666 * @nr_entries:	the maximum number of entries
1667 * @entries:	where the resulting entries are placed
1668 * @indices:	the cache indices corresponding to the entries in @entries
1669 *
1670 * Like find_get_entries, except we only return entries which are tagged with
1671 * @tag.
1672 */
1673unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
1674			int tag, unsigned int nr_entries,
1675			struct page **entries, pgoff_t *indices)
1676{
1677	void **slot;
1678	unsigned int ret = 0;
1679	struct radix_tree_iter iter;
1680
1681	if (!nr_entries)
1682		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1683
1684	rcu_read_lock();
1685	radix_tree_for_each_tagged(slot, &mapping->page_tree,
1686				   &iter, start, tag) {
1687		struct page *head, *page;
1688repeat:
1689		page = radix_tree_deref_slot(slot);
1690		if (unlikely(!page))
1691			continue;
1692		if (radix_tree_exception(page)) {
1693			if (radix_tree_deref_retry(page)) {
1694				slot = radix_tree_iter_retry(&iter);
1695				continue;
1696			}
1697
1698			/*
1699			 * A shadow entry of a recently evicted page, a swap
1700			 * entry from shmem/tmpfs or a DAX entry.  Return it
1701			 * without attempting to raise page count.
1702			 */
1703			goto export;
1704		}
 
1705
1706		head = compound_head(page);
1707		if (!page_cache_get_speculative(head))
1708			goto repeat;
 
 
 
 
 
 
 
 
1709
1710		/* The page was split under us? */
1711		if (compound_head(page) != head) {
1712			put_page(head);
1713			goto repeat;
1714		}
 
 
 
 
 
1715
1716		/* Has the page moved? */
1717		if (unlikely(page != *slot)) {
1718			put_page(head);
1719			goto repeat;
1720		}
1721export:
1722		indices[ret] = iter.index;
1723		entries[ret] = page;
1724		if (++ret == nr_entries)
1725			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1726	}
1727	rcu_read_unlock();
1728	return ret;
 
 
 
 
 
 
 
 
1729}
1730EXPORT_SYMBOL(find_get_entries_tag);
1731
1732/*
1733 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1734 * a _large_ part of the i/o request. Imagine the worst scenario:
1735 *
1736 *      ---R__________________________________________B__________
1737 *         ^ reading here                             ^ bad block(assume 4k)
1738 *
1739 * read(R) => miss => readahead(R...B) => media error => frustrating retries
1740 * => failing the whole request => read(R) => read(R+1) =>
1741 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1742 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1743 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1744 *
1745 * It is going insane. Fix it by quickly scaling down the readahead size.
1746 */
1747static void shrink_readahead_size_eio(struct file *filp,
1748					struct file_ra_state *ra)
1749{
1750	ra->ra_pages /= 4;
 
 
1751}
1752
1753/**
1754 * do_generic_file_read - generic file read routine
1755 * @filp:	the file to read
1756 * @ppos:	current file position
1757 * @iter:	data destination
1758 * @written:	already copied
1759 *
1760 * This is a generic file read routine, and uses the
1761 * mapping->a_ops->readpage() function for the actual low-level stuff.
1762 *
1763 * This is really ugly. But the goto's actually try to clarify some
1764 * of the logic when it comes to error handling etc.
1765 */
1766static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1767		struct iov_iter *iter, ssize_t written)
1768{
 
 
1769	struct address_space *mapping = filp->f_mapping;
1770	struct inode *inode = mapping->host;
1771	struct file_ra_state *ra = &filp->f_ra;
1772	pgoff_t index;
1773	pgoff_t last_index;
1774	pgoff_t prev_index;
1775	unsigned long offset;      /* offset into pagecache page */
1776	unsigned int prev_offset;
1777	int error = 0;
1778
1779	if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
 
 
1780		return 0;
 
1781	iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
 
 
 
 
1782
1783	index = *ppos >> PAGE_SHIFT;
1784	prev_index = ra->prev_pos >> PAGE_SHIFT;
1785	prev_offset = ra->prev_pos & (PAGE_SIZE-1);
1786	last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
1787	offset = *ppos & ~PAGE_MASK;
 
 
1788
1789	for (;;) {
1790		struct page *page;
1791		pgoff_t end_index;
1792		loff_t isize;
1793		unsigned long nr, ret;
1794
1795		cond_resched();
1796find_page:
1797		if (fatal_signal_pending(current)) {
1798			error = -EINTR;
1799			goto out;
1800		}
1801
1802		page = find_get_page(mapping, index);
1803		if (!page) {
1804			page_cache_sync_readahead(mapping,
1805					ra, filp,
1806					index, last_index - index);
1807			page = find_get_page(mapping, index);
1808			if (unlikely(page == NULL))
1809				goto no_cached_page;
1810		}
1811		if (PageReadahead(page)) {
1812			page_cache_async_readahead(mapping,
1813					ra, filp, page,
1814					index, last_index - index);
1815		}
1816		if (!PageUptodate(page)) {
1817			/*
1818			 * See comment in do_read_cache_page on why
1819			 * wait_on_page_locked is used to avoid unnecessarily
1820			 * serialisations and why it's safe.
1821			 */
1822			error = wait_on_page_locked_killable(page);
1823			if (unlikely(error))
1824				goto readpage_error;
1825			if (PageUptodate(page))
1826				goto page_ok;
1827
1828			if (inode->i_blkbits == PAGE_SHIFT ||
1829					!mapping->a_ops->is_partially_uptodate)
1830				goto page_not_up_to_date;
1831			/* pipes can't handle partially uptodate pages */
1832			if (unlikely(iter->type & ITER_PIPE))
1833				goto page_not_up_to_date;
1834			if (!trylock_page(page))
1835				goto page_not_up_to_date;
1836			/* Did it get truncated before we got the lock? */
1837			if (!page->mapping)
1838				goto page_not_up_to_date_locked;
1839			if (!mapping->a_ops->is_partially_uptodate(page,
1840							offset, iter->count))
1841				goto page_not_up_to_date_locked;
1842			unlock_page(page);
1843		}
1844page_ok:
1845		/*
1846		 * i_size must be checked after we know the page is Uptodate.
1847		 *
1848		 * Checking i_size after the check allows us to calculate
1849		 * the correct value for "nr", which means the zero-filled
1850		 * part of the page is not copied back to userspace (unless
1851		 * another truncate extends the file - this is desired though).
1852		 */
1853
1854		isize = i_size_read(inode);
1855		end_index = (isize - 1) >> PAGE_SHIFT;
1856		if (unlikely(!isize || index > end_index)) {
1857			put_page(page);
1858			goto out;
1859		}
1860
1861		/* nr is the maximum number of bytes to copy from this page */
1862		nr = PAGE_SIZE;
1863		if (index == end_index) {
1864			nr = ((isize - 1) & ~PAGE_MASK) + 1;
1865			if (nr <= offset) {
1866				put_page(page);
1867				goto out;
1868			}
1869		}
1870		nr = nr - offset;
1871
1872		/* If users can be writing to this page using arbitrary
1873		 * virtual addresses, take care about potential aliasing
1874		 * before reading the page on the kernel side.
1875		 */
1876		if (mapping_writably_mapped(mapping))
1877			flush_dcache_page(page);
1878
1879		/*
1880		 * When a sequential read accesses a page several times,
1881		 * only mark it as accessed the first time.
1882		 */
1883		if (prev_index != index || offset != prev_offset)
1884			mark_page_accessed(page);
1885		prev_index = index;
1886
1887		/*
1888		 * Ok, we have the page, and it's up-to-date, so
1889		 * now we can copy it to user space...
1890		 */
 
 
 
 
 
 
 
 
 
 
 
1891
1892		ret = copy_page_to_iter(page, offset, nr, iter);
1893		offset += ret;
1894		index += offset >> PAGE_SHIFT;
1895		offset &= ~PAGE_MASK;
1896		prev_offset = offset;
 
 
 
 
 
 
1897
1898		put_page(page);
1899		written += ret;
1900		if (!iov_iter_count(iter))
1901			goto out;
1902		if (ret < nr) {
1903			error = -EFAULT;
1904			goto out;
1905		}
1906		continue;
1907
1908page_not_up_to_date:
1909		/* Get exclusive access to the page ... */
1910		error = lock_page_killable(page);
1911		if (unlikely(error))
1912			goto readpage_error;
1913
1914page_not_up_to_date_locked:
1915		/* Did it get truncated before we got the lock? */
1916		if (!page->mapping) {
1917			unlock_page(page);
1918			put_page(page);
1919			continue;
1920		}
1921
1922		/* Did somebody else fill it already? */
1923		if (PageUptodate(page)) {
1924			unlock_page(page);
1925			goto page_ok;
1926		}
1927
1928readpage:
1929		/*
1930		 * A previous I/O error may have been due to temporary
1931		 * failures, eg. multipath errors.
1932		 * PG_error will be set again if readpage fails.
1933		 */
1934		ClearPageError(page);
1935		/* Start the actual read. The read will unlock the page. */
1936		error = mapping->a_ops->readpage(filp, page);
1937
1938		if (unlikely(error)) {
1939			if (error == AOP_TRUNCATED_PAGE) {
1940				put_page(page);
1941				error = 0;
1942				goto find_page;
1943			}
1944			goto readpage_error;
1945		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1946
1947		if (!PageUptodate(page)) {
1948			error = lock_page_killable(page);
1949			if (unlikely(error))
1950				goto readpage_error;
1951			if (!PageUptodate(page)) {
1952				if (page->mapping == NULL) {
1953					/*
1954					 * invalidate_mapping_pages got it
1955					 */
1956					unlock_page(page);
1957					put_page(page);
1958					goto find_page;
1959				}
1960				unlock_page(page);
1961				shrink_readahead_size_eio(filp, ra);
1962				error = -EIO;
1963				goto readpage_error;
1964			}
1965			unlock_page(page);
1966		}
1967
1968		goto page_ok;
 
 
1969
1970readpage_error:
1971		/* UHHUH! A synchronous read error occurred. Report it */
1972		put_page(page);
1973		goto out;
 
 
1974
1975no_cached_page:
1976		/*
1977		 * Ok, it wasn't cached, so we need to create a new
1978		 * page..
1979		 */
1980		page = page_cache_alloc_cold(mapping);
1981		if (!page) {
1982			error = -ENOMEM;
1983			goto out;
1984		}
1985		error = add_to_page_cache_lru(page, mapping, index,
1986				mapping_gfp_constraint(mapping, GFP_KERNEL));
1987		if (error) {
1988			put_page(page);
1989			if (error == -EEXIST) {
1990				error = 0;
1991				goto find_page;
1992			}
1993			goto out;
1994		}
1995		goto readpage;
1996	}
1997
1998out:
1999	ra->prev_pos = prev_index;
2000	ra->prev_pos <<= PAGE_SHIFT;
2001	ra->prev_pos |= prev_offset;
2002
2003	*ppos = ((loff_t)index << PAGE_SHIFT) + offset;
2004	file_accessed(filp);
2005	return written ? written : error;
2006}
 
2007
2008/**
2009 * generic_file_read_iter - generic filesystem read routine
2010 * @iocb:	kernel I/O control block
2011 * @iter:	destination for the data read
2012 *
2013 * This is the "read_iter()" routine for all filesystems
2014 * that can use the page cache directly.
 
 
 
 
 
 
 
 
 
 
 
 
 
2015 */
2016ssize_t
2017generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2018{
2019	struct file *file = iocb->ki_filp;
2020	ssize_t retval = 0;
2021	size_t count = iov_iter_count(iter);
2022
2023	if (!count)
2024		goto out; /* skip atime */
2025
2026	if (iocb->ki_flags & IOCB_DIRECT) {
 
2027		struct address_space *mapping = file->f_mapping;
2028		struct inode *inode = mapping->host;
2029		struct iov_iter data = *iter;
2030		loff_t size;
2031
2032		size = i_size_read(inode);
2033		retval = filemap_write_and_wait_range(mapping, iocb->ki_pos,
2034					iocb->ki_pos + count - 1);
2035		if (retval < 0)
2036			goto out;
2037
2038		file_accessed(file);
2039
2040		retval = mapping->a_ops->direct_IO(iocb, &data);
2041		if (retval >= 0) {
2042			iocb->ki_pos += retval;
2043			iov_iter_advance(iter, retval);
2044		}
 
 
2045
2046		/*
2047		 * Btrfs can have a short DIO read if we encounter
2048		 * compressed extents, so if there was an error, or if
2049		 * we've already read everything we wanted to, or if
2050		 * there was a short read because we hit EOF, go ahead
2051		 * and return.  Otherwise fallthrough to buffered io for
2052		 * the rest of the read.  Buffered reads will not work for
2053		 * DAX files, so don't bother trying.
2054		 */
2055		if (retval < 0 || !iov_iter_count(iter) || iocb->ki_pos >= size ||
2056		    IS_DAX(inode))
2057			goto out;
 
2058	}
2059
2060	retval = do_generic_file_read(file, &iocb->ki_pos, iter, retval);
2061out:
2062	return retval;
2063}
2064EXPORT_SYMBOL(generic_file_read_iter);
2065
2066#ifdef CONFIG_MMU
2067/**
2068 * page_cache_read - adds requested page to the page cache if not already there
2069 * @file:	file to read
2070 * @offset:	page index
2071 * @gfp_mask:	memory allocation flags
2072 *
2073 * This adds the requested page to the page cache if it isn't already there,
2074 * and schedules an I/O to read in its contents from disk.
2075 */
2076static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
 
2077{
2078	struct address_space *mapping = file->f_mapping;
2079	struct page *page;
2080	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2081
2082	do {
2083		page = __page_cache_alloc(gfp_mask|__GFP_COLD);
2084		if (!page)
2085			return -ENOMEM;
2086
2087		ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
2088		if (ret == 0)
2089			ret = mapping->a_ops->readpage(file, page);
2090		else if (ret == -EEXIST)
2091			ret = 0; /* losing race to add is OK */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2092
2093		put_page(page);
 
2094
2095	} while (ret == AOP_TRUNCATED_PAGE);
 
 
 
 
 
 
 
 
 
2096
2097	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2098}
2099
 
2100#define MMAP_LOTSAMISS  (100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2101
2102/*
2103 * Synchronous readahead happens when we don't even find
2104 * a page in the page cache at all.
 
 
 
2105 */
2106static void do_sync_mmap_readahead(struct vm_area_struct *vma,
2107				   struct file_ra_state *ra,
2108				   struct file *file,
2109				   pgoff_t offset)
2110{
 
 
2111	struct address_space *mapping = file->f_mapping;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2112
2113	/* If we don't want any read-ahead, don't bother */
2114	if (vma->vm_flags & VM_RAND_READ)
2115		return;
2116	if (!ra->ra_pages)
2117		return;
2118
2119	if (vma->vm_flags & VM_SEQ_READ) {
2120		page_cache_sync_readahead(mapping, ra, file, offset,
2121					  ra->ra_pages);
2122		return;
2123	}
2124
2125	/* Avoid banging the cache line if not needed */
2126	if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
2127		ra->mmap_miss++;
 
2128
2129	/*
2130	 * Do we miss much more than hit in this file? If so,
2131	 * stop bothering with read-ahead. It will only hurt.
2132	 */
2133	if (ra->mmap_miss > MMAP_LOTSAMISS)
2134		return;
2135
2136	/*
2137	 * mmap read-around
2138	 */
2139	ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
 
2140	ra->size = ra->ra_pages;
2141	ra->async_size = ra->ra_pages / 4;
2142	ra_submit(ra, mapping, file);
 
 
2143}
2144
2145/*
2146 * Asynchronous readahead happens when we find the page and PG_readahead,
2147 * so we want to possibly extend the readahead further..
 
2148 */
2149static void do_async_mmap_readahead(struct vm_area_struct *vma,
2150				    struct file_ra_state *ra,
2151				    struct file *file,
2152				    struct page *page,
2153				    pgoff_t offset)
2154{
2155	struct address_space *mapping = file->f_mapping;
 
 
 
 
2156
2157	/* If we don't want any read-ahead, don't bother */
2158	if (vma->vm_flags & VM_RAND_READ)
2159		return;
2160	if (ra->mmap_miss > 0)
2161		ra->mmap_miss--;
2162	if (PageReadahead(page))
2163		page_cache_async_readahead(mapping, ra, file,
2164					   page, offset, ra->ra_pages);
 
 
 
 
 
2165}
2166
2167/**
2168 * filemap_fault - read in file data for page fault handling
2169 * @vma:	vma in which the fault was taken
2170 * @vmf:	struct vm_fault containing details of the fault
2171 *
2172 * filemap_fault() is invoked via the vma operations vector for a
2173 * mapped memory region to read in file data during a page fault.
2174 *
2175 * The goto's are kind of ugly, but this streamlines the normal case of having
2176 * it in the page cache, and handles the special cases reasonably without
2177 * having a lot of duplicated code.
2178 *
2179 * vma->vm_mm->mmap_sem must be held on entry.
2180 *
2181 * If our return value has VM_FAULT_RETRY set, it's because
2182 * lock_page_or_retry() returned 0.
2183 * The mmap_sem has usually been released in this case.
2184 * See __lock_page_or_retry() for the exception.
2185 *
2186 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
2187 * has not been released.
2188 *
2189 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
 
 
2190 */
2191int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2192{
2193	int error;
2194	struct file *file = vma->vm_file;
 
2195	struct address_space *mapping = file->f_mapping;
2196	struct file_ra_state *ra = &file->f_ra;
2197	struct inode *inode = mapping->host;
2198	pgoff_t offset = vmf->pgoff;
2199	struct page *page;
2200	loff_t size;
2201	int ret = 0;
2202
2203	size = round_up(i_size_read(inode), PAGE_SIZE);
2204	if (offset >= size >> PAGE_SHIFT)
2205		return VM_FAULT_SIGBUS;
2206
2207	/*
2208	 * Do we have something in the page cache already?
2209	 */
2210	page = find_get_page(mapping, offset);
2211	if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
2212		/*
2213		 * We found the page, so try async readahead before
2214		 * waiting for the lock.
2215		 */
2216		do_async_mmap_readahead(vma, ra, file, page, offset);
2217	} else if (!page) {
 
 
 
 
 
2218		/* No page in the page cache at all */
2219		do_sync_mmap_readahead(vma, ra, file, offset);
2220		count_vm_event(PGMAJFAULT);
2221		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
2222		ret = VM_FAULT_MAJOR;
 
2223retry_find:
2224		page = find_get_page(mapping, offset);
2225		if (!page)
2226			goto no_cached_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2227	}
2228
2229	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
2230		put_page(page);
2231		return ret | VM_FAULT_RETRY;
2232	}
2233
2234	/* Did it get truncated? */
2235	if (unlikely(page->mapping != mapping)) {
2236		unlock_page(page);
2237		put_page(page);
2238		goto retry_find;
2239	}
2240	VM_BUG_ON_PAGE(page->index != offset, page);
2241
2242	/*
2243	 * We have a locked page in the page cache, now we need to check
2244	 * that it's up-to-date. If not, it is going to be due to an error.
 
2245	 */
2246	if (unlikely(!PageUptodate(page)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2247		goto page_not_uptodate;
 
 
 
 
 
 
 
 
 
 
 
 
 
2248
2249	/*
2250	 * Found the page and have a reference on it.
2251	 * We must recheck i_size under page lock.
2252	 */
2253	size = round_up(i_size_read(inode), PAGE_SIZE);
2254	if (unlikely(offset >= size >> PAGE_SHIFT)) {
2255		unlock_page(page);
2256		put_page(page);
2257		return VM_FAULT_SIGBUS;
2258	}
2259
2260	vmf->page = page;
2261	return ret | VM_FAULT_LOCKED;
2262
2263no_cached_page:
2264	/*
2265	 * We're only likely to ever get here if MADV_RANDOM is in
2266	 * effect.
2267	 */
2268	error = page_cache_read(file, offset, vmf->gfp_mask);
2269
2270	/*
2271	 * The page we want has now been added to the page cache.
2272	 * In the unlikely event that someone removed it in the
2273	 * meantime, we'll just come back here and read it again.
2274	 */
2275	if (error >= 0)
2276		goto retry_find;
2277
2278	/*
2279	 * An error return from page_cache_read can result if the
2280	 * system is low on memory, or a problem occurs while trying
2281	 * to schedule I/O.
2282	 */
2283	if (error == -ENOMEM)
2284		return VM_FAULT_OOM;
2285	return VM_FAULT_SIGBUS;
2286
2287page_not_uptodate:
2288	/*
2289	 * Umm, take care of errors if the page isn't up-to-date.
2290	 * Try to re-read it _once_. We do this synchronously,
2291	 * because there really aren't any performance issues here
2292	 * and we need to check for errors.
2293	 */
2294	ClearPageError(page);
2295	error = mapping->a_ops->readpage(file, page);
2296	if (!error) {
2297		wait_on_page_locked(page);
2298		if (!PageUptodate(page))
2299			error = -EIO;
2300	}
2301	put_page(page);
2302
2303	if (!error || error == AOP_TRUNCATED_PAGE)
2304		goto retry_find;
 
2305
2306	/* Things didn't work out. Return zero to tell the mm layer so. */
2307	shrink_readahead_size_eio(file, ra);
2308	return VM_FAULT_SIGBUS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2309}
2310EXPORT_SYMBOL(filemap_fault);
2311
2312void filemap_map_pages(struct vm_fault *vmf,
2313		pgoff_t start_pgoff, pgoff_t end_pgoff)
2314{
2315	struct radix_tree_iter iter;
2316	void **slot;
2317	struct file *file = vmf->vma->vm_file;
2318	struct address_space *mapping = file->f_mapping;
2319	pgoff_t last_pgoff = start_pgoff;
2320	loff_t size;
2321	struct page *head, *page;
 
2322
2323	rcu_read_lock();
2324	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
2325			start_pgoff) {
2326		if (iter.index > end_pgoff)
2327			break;
2328repeat:
2329		page = radix_tree_deref_slot(slot);
2330		if (unlikely(!page))
2331			goto next;
2332		if (radix_tree_exception(page)) {
2333			if (radix_tree_deref_retry(page)) {
2334				slot = radix_tree_iter_retry(&iter);
2335				continue;
2336			}
2337			goto next;
2338		}
 
2339
2340		head = compound_head(page);
2341		if (!page_cache_get_speculative(head))
2342			goto repeat;
2343
2344		/* The page was split under us? */
2345		if (compound_head(page) != head) {
2346			put_page(head);
2347			goto repeat;
2348		}
2349
2350		/* Has the page moved? */
2351		if (unlikely(page != *slot)) {
2352			put_page(head);
2353			goto repeat;
2354		}
2355
2356		if (!PageUptodate(page) ||
2357				PageReadahead(page) ||
2358				PageHWPoison(page))
 
 
 
 
 
 
 
 
 
 
 
 
2359			goto skip;
2360		if (!trylock_page(page))
2361			goto skip;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2362
2363		if (page->mapping != mapping || !PageUptodate(page))
2364			goto unlock;
 
2365
2366		size = round_up(i_size_read(mapping->host), PAGE_SIZE);
2367		if (page->index >= size >> PAGE_SHIFT)
2368			goto unlock;
2369
2370		if (file->f_ra.mmap_miss > 0)
2371			file->f_ra.mmap_miss--;
 
 
 
 
 
2372
2373		vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
2374		if (vmf->pte)
2375			vmf->pte += iter.index - last_pgoff;
2376		last_pgoff = iter.index;
2377		if (alloc_set_pte(vmf, NULL, page))
2378			goto unlock;
2379		unlock_page(page);
2380		goto next;
2381unlock:
2382		unlock_page(page);
2383skip:
2384		put_page(page);
2385next:
2386		/* Huge page is mapped? No need to proceed. */
2387		if (pmd_trans_huge(*vmf->pmd))
2388			break;
2389		if (iter.index == end_pgoff)
2390			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2391	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2392	rcu_read_unlock();
 
 
 
 
 
 
 
 
2393}
2394EXPORT_SYMBOL(filemap_map_pages);
2395
2396int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2397{
2398	struct page *page = vmf->page;
2399	struct inode *inode = file_inode(vma->vm_file);
2400	int ret = VM_FAULT_LOCKED;
2401
2402	sb_start_pagefault(inode->i_sb);
2403	file_update_time(vma->vm_file);
2404	lock_page(page);
2405	if (page->mapping != inode->i_mapping) {
2406		unlock_page(page);
2407		ret = VM_FAULT_NOPAGE;
2408		goto out;
2409	}
2410	/*
2411	 * We mark the page dirty already here so that when freeze is in
2412	 * progress, we are guaranteed that writeback during freezing will
2413	 * see the dirty page and writeprotect it again.
2414	 */
2415	set_page_dirty(page);
2416	wait_for_stable_page(page);
2417out:
2418	sb_end_pagefault(inode->i_sb);
2419	return ret;
2420}
2421EXPORT_SYMBOL(filemap_page_mkwrite);
2422
2423const struct vm_operations_struct generic_file_vm_ops = {
2424	.fault		= filemap_fault,
2425	.map_pages	= filemap_map_pages,
2426	.page_mkwrite	= filemap_page_mkwrite,
2427};
2428
2429/* This is used for a general mmap of a disk file */
2430
2431int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2432{
2433	struct address_space *mapping = file->f_mapping;
2434
2435	if (!mapping->a_ops->readpage)
2436		return -ENOEXEC;
2437	file_accessed(file);
2438	vma->vm_ops = &generic_file_vm_ops;
2439	return 0;
2440}
2441
2442/*
2443 * This is for filesystems which do not implement ->writepage.
2444 */
2445int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2446{
2447	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2448		return -EINVAL;
2449	return generic_file_mmap(file, vma);
2450}
2451#else
2452int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
 
 
 
 
2453{
2454	return -ENOSYS;
2455}
2456int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
2457{
2458	return -ENOSYS;
2459}
2460#endif /* CONFIG_MMU */
2461
 
2462EXPORT_SYMBOL(generic_file_mmap);
2463EXPORT_SYMBOL(generic_file_readonly_mmap);
2464
2465static struct page *wait_on_page_read(struct page *page)
 
2466{
2467	if (!IS_ERR(page)) {
2468		wait_on_page_locked(page);
2469		if (!PageUptodate(page)) {
2470			put_page(page);
2471			page = ERR_PTR(-EIO);
2472		}
2473	}
2474	return page;
2475}
2476
2477static struct page *do_read_cache_page(struct address_space *mapping,
2478				pgoff_t index,
2479				int (*filler)(void *, struct page *),
2480				void *data,
2481				gfp_t gfp)
2482{
2483	struct page *page;
2484	int err;
2485repeat:
2486	page = find_get_page(mapping, index);
2487	if (!page) {
2488		page = __page_cache_alloc(gfp | __GFP_COLD);
2489		if (!page)
2490			return ERR_PTR(-ENOMEM);
2491		err = add_to_page_cache_lru(page, mapping, index, gfp);
2492		if (unlikely(err)) {
2493			put_page(page);
2494			if (err == -EEXIST)
2495				goto repeat;
2496			/* Presumably ENOMEM for radix tree node */
2497			return ERR_PTR(err);
2498		}
2499
2500filler:
2501		err = filler(data, page);
2502		if (err < 0) {
2503			put_page(page);
2504			return ERR_PTR(err);
2505		}
2506
2507		page = wait_on_page_read(page);
2508		if (IS_ERR(page))
2509			return page;
2510		goto out;
2511	}
2512	if (PageUptodate(page))
2513		goto out;
2514
2515	/*
2516	 * Page is not up to date and may be locked due one of the following
2517	 * case a: Page is being filled and the page lock is held
2518	 * case b: Read/write error clearing the page uptodate status
2519	 * case c: Truncation in progress (page locked)
2520	 * case d: Reclaim in progress
2521	 *
2522	 * Case a, the page will be up to date when the page is unlocked.
2523	 *    There is no need to serialise on the page lock here as the page
2524	 *    is pinned so the lock gives no additional protection. Even if the
2525	 *    the page is truncated, the data is still valid if PageUptodate as
2526	 *    it's a race vs truncate race.
2527	 * Case b, the page will not be up to date
2528	 * Case c, the page may be truncated but in itself, the data may still
2529	 *    be valid after IO completes as it's a read vs truncate race. The
2530	 *    operation must restart if the page is not uptodate on unlock but
2531	 *    otherwise serialising on page lock to stabilise the mapping gives
2532	 *    no additional guarantees to the caller as the page lock is
2533	 *    released before return.
2534	 * Case d, similar to truncation. If reclaim holds the page lock, it
2535	 *    will be a race with remove_mapping that determines if the mapping
2536	 *    is valid on unlock but otherwise the data is valid and there is
2537	 *    no need to serialise with page lock.
2538	 *
2539	 * As the page lock gives no additional guarantee, we optimistically
2540	 * wait on the page to be unlocked and check if it's up to date and
2541	 * use the page if it is. Otherwise, the page lock is required to
2542	 * distinguish between the different cases. The motivation is that we
2543	 * avoid spurious serialisations and wakeups when multiple processes
2544	 * wait on the same page for IO to complete.
2545	 */
2546	wait_on_page_locked(page);
2547	if (PageUptodate(page))
2548		goto out;
2549
2550	/* Distinguish between all the cases under the safety of the lock */
2551	lock_page(page);
2552
2553	/* Case c or d, restart the operation */
2554	if (!page->mapping) {
2555		unlock_page(page);
2556		put_page(page);
2557		goto repeat;
2558	}
2559
2560	/* Someone else locked and filled the page in a very small window */
2561	if (PageUptodate(page)) {
2562		unlock_page(page);
2563		goto out;
2564	}
2565	goto filler;
 
 
 
 
 
 
 
 
2566
2567out:
2568	mark_page_accessed(page);
2569	return page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2570}
 
2571
2572/**
2573 * read_cache_page - read into page cache, fill it if needed
2574 * @mapping:	the page's address_space
2575 * @index:	the page index
2576 * @filler:	function to perform the read
2577 * @data:	first arg to filler(data, page) function, often left as NULL
 
 
 
 
 
 
2578 *
2579 * Read into the page cache. If a page already exists, and PageUptodate() is
2580 * not set, try to fill the page and wait for it to become unlocked.
2581 *
2582 * If the page does not get brought uptodate, return -EIO.
2583 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2584struct page *read_cache_page(struct address_space *mapping,
2585				pgoff_t index,
2586				int (*filler)(void *, struct page *),
2587				void *data)
2588{
2589	return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
 
2590}
2591EXPORT_SYMBOL(read_cache_page);
2592
2593/**
2594 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
2595 * @mapping:	the page's address_space
2596 * @index:	the page index
2597 * @gfp:	the page allocator flags to use if allocating
2598 *
2599 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
2600 * any new page allocations done using the specified allocation flags.
2601 *
2602 * If the page does not get brought uptodate, return -EIO.
 
 
 
 
2603 */
2604struct page *read_cache_page_gfp(struct address_space *mapping,
2605				pgoff_t index,
2606				gfp_t gfp)
2607{
2608	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2609
2610	return do_read_cache_page(mapping, index, filler, NULL, gfp);
2611}
2612EXPORT_SYMBOL(read_cache_page_gfp);
2613
2614/*
2615 * Performs necessary checks before doing a write
2616 *
2617 * Can adjust writing position or amount of bytes to write.
2618 * Returns appropriate error code that caller should return or
2619 * zero in case that write should be allowed.
2620 */
2621inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
2622{
2623	struct file *file = iocb->ki_filp;
2624	struct inode *inode = file->f_mapping->host;
2625	unsigned long limit = rlimit(RLIMIT_FSIZE);
2626	loff_t pos;
2627
2628	if (!iov_iter_count(from))
2629		return 0;
2630
2631	/* FIXME: this is for backwards compatibility with 2.4 */
2632	if (iocb->ki_flags & IOCB_APPEND)
2633		iocb->ki_pos = i_size_read(inode);
2634
2635	pos = iocb->ki_pos;
2636
2637	if (limit != RLIM_INFINITY) {
2638		if (iocb->ki_pos >= limit) {
2639			send_sig(SIGXFSZ, current, 0);
2640			return -EFBIG;
2641		}
2642		iov_iter_truncate(from, limit - (unsigned long)pos);
2643	}
2644
2645	/*
2646	 * LFS rule
2647	 */
2648	if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
2649				!(file->f_flags & O_LARGEFILE))) {
2650		if (pos >= MAX_NON_LFS)
2651			return -EFBIG;
2652		iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
2653	}
2654
2655	/*
2656	 * Are we about to exceed the fs block limit ?
2657	 *
2658	 * If we have written data it becomes a short write.  If we have
2659	 * exceeded without writing data we send a signal and return EFBIG.
2660	 * Linus frestrict idea will clean these up nicely..
2661	 */
2662	if (unlikely(pos >= inode->i_sb->s_maxbytes))
2663		return -EFBIG;
2664
2665	iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
2666	return iov_iter_count(from);
2667}
2668EXPORT_SYMBOL(generic_write_checks);
2669
2670int pagecache_write_begin(struct file *file, struct address_space *mapping,
2671				loff_t pos, unsigned len, unsigned flags,
2672				struct page **pagep, void **fsdata)
2673{
2674	const struct address_space_operations *aops = mapping->a_ops;
2675
2676	return aops->write_begin(file, mapping, pos, len, flags,
2677							pagep, fsdata);
 
 
 
2678}
2679EXPORT_SYMBOL(pagecache_write_begin);
2680
2681int pagecache_write_end(struct file *file, struct address_space *mapping,
2682				loff_t pos, unsigned len, unsigned copied,
2683				struct page *page, void *fsdata)
2684{
2685	const struct address_space_operations *aops = mapping->a_ops;
2686
2687	return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2688}
2689EXPORT_SYMBOL(pagecache_write_end);
2690
2691ssize_t
2692generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
2693{
2694	struct file	*file = iocb->ki_filp;
2695	struct address_space *mapping = file->f_mapping;
2696	struct inode	*inode = mapping->host;
2697	loff_t		pos = iocb->ki_pos;
2698	ssize_t		written;
2699	size_t		write_len;
2700	pgoff_t		end;
2701	struct iov_iter data;
2702
2703	write_len = iov_iter_count(from);
2704	end = (pos + write_len - 1) >> PAGE_SHIFT;
2705
2706	written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2707	if (written)
2708		goto out;
2709
2710	/*
2711	 * After a write we want buffered reads to be sure to go to disk to get
2712	 * the new data.  We invalidate clean cached page from the region we're
2713	 * about to write.  We do this *before* the write so that we can return
2714	 * without clobbering -EIOCBQUEUED from ->direct_IO().
2715	 */
2716	if (mapping->nrpages) {
2717		written = invalidate_inode_pages2_range(mapping,
2718					pos >> PAGE_SHIFT, end);
2719		/*
2720		 * If a page can not be invalidated, return 0 to fall back
2721		 * to buffered write.
2722		 */
2723		if (written) {
2724			if (written == -EBUSY)
2725				return 0;
2726			goto out;
2727		}
2728	}
2729
2730	data = *from;
2731	written = mapping->a_ops->direct_IO(iocb, &data);
2732
2733	/*
2734	 * Finally, try again to invalidate clean pages which might have been
2735	 * cached by non-direct readahead, or faulted in by get_user_pages()
2736	 * if the source of the write was an mmap'ed region of the file
2737	 * we're writing.  Either one is a pretty crazy thing to do,
2738	 * so we don't support it 100%.  If this invalidation
2739	 * fails, tough, the write still worked...
 
 
 
 
 
 
 
 
 
2740	 */
2741	if (mapping->nrpages) {
2742		invalidate_inode_pages2_range(mapping,
2743					      pos >> PAGE_SHIFT, end);
2744	}
2745
2746	if (written > 0) {
2747		pos += written;
2748		iov_iter_advance(from, written);
2749		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2750			i_size_write(inode, pos);
2751			mark_inode_dirty(inode);
2752		}
2753		iocb->ki_pos = pos;
2754	}
2755out:
 
2756	return written;
2757}
2758EXPORT_SYMBOL(generic_file_direct_write);
2759
2760/*
2761 * Find or create a page at the given pagecache position. Return the locked
2762 * page. This function is specifically for buffered writes.
2763 */
2764struct page *grab_cache_page_write_begin(struct address_space *mapping,
2765					pgoff_t index, unsigned flags)
2766{
2767	struct page *page;
2768	int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
2769
2770	if (flags & AOP_FLAG_NOFS)
2771		fgp_flags |= FGP_NOFS;
2772
2773	page = pagecache_get_page(mapping, index, fgp_flags,
2774			mapping_gfp_mask(mapping));
2775	if (page)
2776		wait_for_stable_page(page);
2777
2778	return page;
2779}
2780EXPORT_SYMBOL(grab_cache_page_write_begin);
2781
2782ssize_t generic_perform_write(struct file *file,
2783				struct iov_iter *i, loff_t pos)
2784{
 
 
2785	struct address_space *mapping = file->f_mapping;
2786	const struct address_space_operations *a_ops = mapping->a_ops;
2787	long status = 0;
2788	ssize_t written = 0;
2789	unsigned int flags = 0;
2790
2791	/*
2792	 * Copies from kernel address space cannot fail (NFSD is a big user).
2793	 */
2794	if (!iter_is_iovec(i))
2795		flags |= AOP_FLAG_UNINTERRUPTIBLE;
2796
2797	do {
2798		struct page *page;
2799		unsigned long offset;	/* Offset into pagecache page */
2800		unsigned long bytes;	/* Bytes to write to page */
2801		size_t copied;		/* Bytes copied from user */
2802		void *fsdata;
2803
2804		offset = (pos & (PAGE_SIZE - 1));
2805		bytes = min_t(unsigned long, PAGE_SIZE - offset,
2806						iov_iter_count(i));
2807
2808again:
2809		/*
2810		 * Bring in the user page that we will copy from _first_.
2811		 * Otherwise there's a nasty deadlock on copying from the
2812		 * same page as we're writing to, without it being marked
2813		 * up-to-date.
2814		 *
2815		 * Not only is this an optimisation, but it is also required
2816		 * to check that the address is actually valid, when atomic
2817		 * usercopies are used, below.
2818		 */
2819		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2820			status = -EFAULT;
2821			break;
2822		}
2823
2824		if (fatal_signal_pending(current)) {
2825			status = -EINTR;
2826			break;
2827		}
2828
2829		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2830						&page, &fsdata);
2831		if (unlikely(status < 0))
2832			break;
2833
2834		if (mapping_writably_mapped(mapping))
2835			flush_dcache_page(page);
2836
2837		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2838		flush_dcache_page(page);
2839
2840		status = a_ops->write_end(file, mapping, pos, bytes, copied,
2841						page, fsdata);
2842		if (unlikely(status < 0))
2843			break;
2844		copied = status;
2845
 
2846		cond_resched();
2847
2848		iov_iter_advance(i, copied);
2849		if (unlikely(copied == 0)) {
2850			/*
2851			 * If we were unable to copy any data at all, we must
2852			 * fall back to a single segment length write.
2853			 *
2854			 * If we didn't fallback here, we could livelock
2855			 * because not all segments in the iov can be copied at
2856			 * once without a pagefault.
2857			 */
2858			bytes = min_t(unsigned long, PAGE_SIZE - offset,
2859						iov_iter_single_seg_count(i));
2860			goto again;
2861		}
2862		pos += copied;
2863		written += copied;
2864
2865		balance_dirty_pages_ratelimited(mapping);
2866	} while (iov_iter_count(i));
2867
2868	return written ? written : status;
 
 
 
2869}
2870EXPORT_SYMBOL(generic_perform_write);
2871
2872/**
2873 * __generic_file_write_iter - write data to a file
2874 * @iocb:	IO state structure (file, offset, etc.)
2875 * @from:	iov_iter with data to write
2876 *
2877 * This function does all the work needed for actually writing data to a
2878 * file. It does all basic checks, removes SUID from the file, updates
2879 * modification times and calls proper subroutines depending on whether we
2880 * do direct IO or a standard buffered write.
2881 *
2882 * It expects i_mutex to be grabbed unless we work on a block device or similar
2883 * object which does not need locking at all.
2884 *
2885 * This function does *not* take care of syncing data in case of O_SYNC write.
2886 * A caller has to handle it. This is mainly due to the fact that we want to
2887 * avoid syncing under i_mutex.
 
 
 
 
2888 */
2889ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2890{
2891	struct file *file = iocb->ki_filp;
2892	struct address_space * mapping = file->f_mapping;
2893	struct inode 	*inode = mapping->host;
2894	ssize_t		written = 0;
2895	ssize_t		err;
2896	ssize_t		status;
2897
2898	/* We can write back this queue in page reclaim */
2899	current->backing_dev_info = inode_to_bdi(inode);
2900	err = file_remove_privs(file);
2901	if (err)
2902		goto out;
2903
2904	err = file_update_time(file);
2905	if (err)
2906		goto out;
2907
2908	if (iocb->ki_flags & IOCB_DIRECT) {
2909		loff_t pos, endbyte;
2910
2911		written = generic_file_direct_write(iocb, from);
2912		/*
2913		 * If the write stopped short of completing, fall back to
2914		 * buffered writes.  Some filesystems do this for writes to
2915		 * holes, for example.  For DAX files, a buffered write will
2916		 * not succeed (even if it did, DAX does not handle dirty
2917		 * page-cache pages correctly).
2918		 */
2919		if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
2920			goto out;
 
 
 
2921
2922		status = generic_perform_write(file, from, pos = iocb->ki_pos);
2923		/*
2924		 * If generic_perform_write() returned a synchronous error
2925		 * then we want to return the number of bytes which were
2926		 * direct-written, or the error code if that was zero.  Note
2927		 * that this differs from normal direct-io semantics, which
2928		 * will return -EFOO even if some bytes were written.
2929		 */
2930		if (unlikely(status < 0)) {
2931			err = status;
2932			goto out;
2933		}
2934		/*
2935		 * We need to ensure that the page cache pages are written to
2936		 * disk and invalidated to preserve the expected O_DIRECT
2937		 * semantics.
2938		 */
2939		endbyte = pos + status - 1;
2940		err = filemap_write_and_wait_range(mapping, pos, endbyte);
2941		if (err == 0) {
2942			iocb->ki_pos = endbyte + 1;
2943			written += status;
2944			invalidate_mapping_pages(mapping,
2945						 pos >> PAGE_SHIFT,
2946						 endbyte >> PAGE_SHIFT);
2947		} else {
2948			/*
2949			 * We don't know how much we wrote, so just return
2950			 * the number of bytes which were direct-written
2951			 */
2952		}
2953	} else {
2954		written = generic_perform_write(file, from, iocb->ki_pos);
2955		if (likely(written > 0))
2956			iocb->ki_pos += written;
2957	}
2958out:
2959	current->backing_dev_info = NULL;
2960	return written ? written : err;
2961}
2962EXPORT_SYMBOL(__generic_file_write_iter);
2963
2964/**
2965 * generic_file_write_iter - write data to a file
2966 * @iocb:	IO state structure
2967 * @from:	iov_iter with data to write
2968 *
2969 * This is a wrapper around __generic_file_write_iter() to be used by most
2970 * filesystems. It takes care of syncing the file in case of O_SYNC file
2971 * and acquires i_mutex as needed.
 
 
 
 
2972 */
2973ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2974{
2975	struct file *file = iocb->ki_filp;
2976	struct inode *inode = file->f_mapping->host;
2977	ssize_t ret;
2978
2979	inode_lock(inode);
2980	ret = generic_write_checks(iocb, from);
2981	if (ret > 0)
2982		ret = __generic_file_write_iter(iocb, from);
2983	inode_unlock(inode);
2984
2985	if (ret > 0)
2986		ret = generic_write_sync(iocb, ret);
2987	return ret;
2988}
2989EXPORT_SYMBOL(generic_file_write_iter);
2990
2991/**
2992 * try_to_release_page() - release old fs-specific metadata on a page
 
 
2993 *
2994 * @page: the page which the kernel is trying to free
2995 * @gfp_mask: memory allocation flags (and I/O mode)
2996 *
2997 * The address_space is to try to release any data against the page
2998 * (presumably at page->private).  If the release was successful, return `1'.
2999 * Otherwise return zero.
3000 *
3001 * This may also be called if PG_fscache is set on a page, indicating that the
3002 * page is known to the local caching routines.
3003 *
3004 * The @gfp_mask argument specifies whether I/O may be performed to release
3005 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3006 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3007 */
3008int try_to_release_page(struct page *page, gfp_t gfp_mask)
 
3009{
3010	struct address_space * const mapping = page->mapping;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3011
3012	BUG_ON(!PageLocked(page));
3013	if (PageWriteback(page))
3014		return 0;
 
 
 
 
 
 
 
 
3015
3016	if (mapping && mapping->a_ops->releasepage)
3017		return mapping->a_ops->releasepage(page, gfp_mask);
3018	return try_to_free_buffers(page);
 
 
 
 
3019}
3020
3021EXPORT_SYMBOL(try_to_release_page);