Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *	linux/mm/filemap.c
   3 *
   4 * Copyright (C) 1994-1999  Linus Torvalds
   5 */
   6
   7/*
   8 * This file handles the generic file mmap semantics used by
   9 * most "normal" filesystems (but you don't /have/ to use this:
  10 * the NFS filesystem used to do this differently, for example)
  11 */
  12#include <linux/module.h>
  13#include <linux/compiler.h>
  14#include <linux/fs.h>
  15#include <linux/uaccess.h>
  16#include <linux/aio.h>
  17#include <linux/capability.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/gfp.h>
  20#include <linux/mm.h>
  21#include <linux/swap.h>
  22#include <linux/mman.h>
  23#include <linux/pagemap.h>
  24#include <linux/file.h>
  25#include <linux/uio.h>
  26#include <linux/hash.h>
  27#include <linux/writeback.h>
  28#include <linux/backing-dev.h>
  29#include <linux/pagevec.h>
  30#include <linux/blkdev.h>
  31#include <linux/security.h>
  32#include <linux/syscalls.h>
  33#include <linux/cpuset.h>
  34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
  35#include <linux/memcontrol.h>
  36#include <linux/cleancache.h>
  37#include "internal.h"
  38
  39/*
  40 * FIXME: remove all knowledge of the buffer layer from the core VM
  41 */
  42#include <linux/buffer_head.h> /* for try_to_free_buffers */
  43
  44#include <asm/mman.h>
  45
  46/*
  47 * Shared mappings implemented 30.11.1994. It's not fully working yet,
  48 * though.
  49 *
  50 * Shared mappings now work. 15.8.1995  Bruno.
  51 *
  52 * finished 'unifying' the page and buffer cache and SMP-threaded the
  53 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
  54 *
  55 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
  56 */
  57
  58/*
  59 * Lock ordering:
  60 *
  61 *  ->i_mmap_mutex		(truncate_pagecache)
  62 *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
  63 *      ->swap_lock		(exclusive_swap_page, others)
  64 *        ->mapping->tree_lock
  65 *
  66 *  ->i_mutex
  67 *    ->i_mmap_mutex		(truncate->unmap_mapping_range)
  68 *
  69 *  ->mmap_sem
  70 *    ->i_mmap_mutex
  71 *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
  72 *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock)
  73 *
  74 *  ->mmap_sem
  75 *    ->lock_page		(access_process_vm)
  76 *
  77 *  ->i_mutex			(generic_file_buffered_write)
  78 *    ->mmap_sem		(fault_in_pages_readable->do_page_fault)
  79 *
  80 *  bdi->wb.list_lock
  81 *    sb_lock			(fs/fs-writeback.c)
  82 *    ->mapping->tree_lock	(__sync_single_inode)
  83 *
  84 *  ->i_mmap_mutex
  85 *    ->anon_vma.lock		(vma_adjust)
  86 *
  87 *  ->anon_vma.lock
  88 *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
  89 *
  90 *  ->page_table_lock or pte_lock
  91 *    ->swap_lock		(try_to_unmap_one)
  92 *    ->private_lock		(try_to_unmap_one)
  93 *    ->tree_lock		(try_to_unmap_one)
  94 *    ->zone.lru_lock		(follow_page->mark_page_accessed)
  95 *    ->zone.lru_lock		(check_pte_range->isolate_lru_page)
  96 *    ->private_lock		(page_remove_rmap->set_page_dirty)
  97 *    ->tree_lock		(page_remove_rmap->set_page_dirty)
  98 *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
  99 *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
 100 *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
 101 *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
 102 *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
 103 *
 104 *  (code doesn't rely on that order, so you could switch it around)
 105 *  ->tasklist_lock             (memory_failure, collect_procs_ao)
 106 *    ->i_mmap_mutex
 107 */
 108
 109/*
 110 * Delete a page from the page cache and free it. Caller has to make
 111 * sure the page is locked and that nobody else uses it - or that usage
 112 * is safe.  The caller must hold the mapping's tree_lock.
 113 */
 114void __delete_from_page_cache(struct page *page)
 115{
 116	struct address_space *mapping = page->mapping;
 117
 118	/*
 119	 * if we're uptodate, flush out into the cleancache, otherwise
 120	 * invalidate any existing cleancache entries.  We can't leave
 121	 * stale data around in the cleancache once our page is gone
 122	 */
 123	if (PageUptodate(page) && PageMappedToDisk(page))
 124		cleancache_put_page(page);
 125	else
 126		cleancache_flush_page(mapping, page);
 127
 128	radix_tree_delete(&mapping->page_tree, page->index);
 129	page->mapping = NULL;
 130	/* Leave page->index set: truncation lookup relies upon it */
 131	mapping->nrpages--;
 132	__dec_zone_page_state(page, NR_FILE_PAGES);
 133	if (PageSwapBacked(page))
 134		__dec_zone_page_state(page, NR_SHMEM);
 135	BUG_ON(page_mapped(page));
 136
 137	/*
 138	 * Some filesystems seem to re-dirty the page even after
 139	 * the VM has canceled the dirty bit (eg ext3 journaling).
 140	 *
 141	 * Fix it up by doing a final dirty accounting check after
 142	 * having removed the page entirely.
 143	 */
 144	if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
 145		dec_zone_page_state(page, NR_FILE_DIRTY);
 146		dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
 147	}
 148}
 149
 150/**
 151 * delete_from_page_cache - delete page from page cache
 152 * @page: the page which the kernel is trying to remove from page cache
 153 *
 154 * This must be called only on pages that have been verified to be in the page
 155 * cache and locked.  It will never put the page into the free list, the caller
 156 * has a reference on the page.
 157 */
 158void delete_from_page_cache(struct page *page)
 159{
 160	struct address_space *mapping = page->mapping;
 161	void (*freepage)(struct page *);
 162
 163	BUG_ON(!PageLocked(page));
 164
 165	freepage = mapping->a_ops->freepage;
 166	spin_lock_irq(&mapping->tree_lock);
 167	__delete_from_page_cache(page);
 168	spin_unlock_irq(&mapping->tree_lock);
 169	mem_cgroup_uncharge_cache_page(page);
 170
 171	if (freepage)
 172		freepage(page);
 173	page_cache_release(page);
 174}
 175EXPORT_SYMBOL(delete_from_page_cache);
 176
 177static int sleep_on_page(void *word)
 178{
 179	io_schedule();
 180	return 0;
 181}
 182
 183static int sleep_on_page_killable(void *word)
 184{
 185	sleep_on_page(word);
 186	return fatal_signal_pending(current) ? -EINTR : 0;
 187}
 188
 189/**
 190 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
 191 * @mapping:	address space structure to write
 192 * @start:	offset in bytes where the range starts
 193 * @end:	offset in bytes where the range ends (inclusive)
 194 * @sync_mode:	enable synchronous operation
 195 *
 196 * Start writeback against all of a mapping's dirty pages that lie
 197 * within the byte offsets <start, end> inclusive.
 198 *
 199 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
 200 * opposed to a regular memory cleansing writeback.  The difference between
 201 * these two operations is that if a dirty page/buffer is encountered, it must
 202 * be waited upon, and not just skipped over.
 203 */
 204int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 205				loff_t end, int sync_mode)
 206{
 207	int ret;
 208	struct writeback_control wbc = {
 209		.sync_mode = sync_mode,
 210		.nr_to_write = LONG_MAX,
 211		.range_start = start,
 212		.range_end = end,
 213	};
 214
 215	if (!mapping_cap_writeback_dirty(mapping))
 216		return 0;
 217
 218	ret = do_writepages(mapping, &wbc);
 219	return ret;
 220}
 221
 222static inline int __filemap_fdatawrite(struct address_space *mapping,
 223	int sync_mode)
 224{
 225	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
 226}
 227
 228int filemap_fdatawrite(struct address_space *mapping)
 229{
 230	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
 231}
 232EXPORT_SYMBOL(filemap_fdatawrite);
 233
 234int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 235				loff_t end)
 236{
 237	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
 238}
 239EXPORT_SYMBOL(filemap_fdatawrite_range);
 240
 241/**
 242 * filemap_flush - mostly a non-blocking flush
 243 * @mapping:	target address_space
 244 *
 245 * This is a mostly non-blocking flush.  Not suitable for data-integrity
 246 * purposes - I/O may not be started against all dirty pages.
 247 */
 248int filemap_flush(struct address_space *mapping)
 249{
 250	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
 251}
 252EXPORT_SYMBOL(filemap_flush);
 253
 254/**
 255 * filemap_fdatawait_range - wait for writeback to complete
 256 * @mapping:		address space structure to wait for
 257 * @start_byte:		offset in bytes where the range starts
 258 * @end_byte:		offset in bytes where the range ends (inclusive)
 259 *
 260 * Walk the list of under-writeback pages of the given address space
 261 * in the given range and wait for all of them.
 262 */
 263int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
 264			    loff_t end_byte)
 265{
 266	pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
 267	pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
 268	struct pagevec pvec;
 269	int nr_pages;
 270	int ret = 0;
 271
 272	if (end_byte < start_byte)
 273		return 0;
 274
 275	pagevec_init(&pvec, 0);
 276	while ((index <= end) &&
 277			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
 278			PAGECACHE_TAG_WRITEBACK,
 279			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
 280		unsigned i;
 281
 282		for (i = 0; i < nr_pages; i++) {
 283			struct page *page = pvec.pages[i];
 284
 285			/* until radix tree lookup accepts end_index */
 286			if (page->index > end)
 287				continue;
 288
 289			wait_on_page_writeback(page);
 290			if (TestClearPageError(page))
 291				ret = -EIO;
 292		}
 293		pagevec_release(&pvec);
 294		cond_resched();
 295	}
 296
 297	/* Check for outstanding write errors */
 298	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
 299		ret = -ENOSPC;
 300	if (test_and_clear_bit(AS_EIO, &mapping->flags))
 301		ret = -EIO;
 302
 303	return ret;
 304}
 305EXPORT_SYMBOL(filemap_fdatawait_range);
 306
 307/**
 308 * filemap_fdatawait - wait for all under-writeback pages to complete
 309 * @mapping: address space structure to wait for
 310 *
 311 * Walk the list of under-writeback pages of the given address space
 312 * and wait for all of them.
 313 */
 314int filemap_fdatawait(struct address_space *mapping)
 315{
 316	loff_t i_size = i_size_read(mapping->host);
 317
 318	if (i_size == 0)
 319		return 0;
 320
 321	return filemap_fdatawait_range(mapping, 0, i_size - 1);
 322}
 323EXPORT_SYMBOL(filemap_fdatawait);
 324
 325int filemap_write_and_wait(struct address_space *mapping)
 326{
 327	int err = 0;
 328
 329	if (mapping->nrpages) {
 330		err = filemap_fdatawrite(mapping);
 331		/*
 332		 * Even if the above returned error, the pages may be
 333		 * written partially (e.g. -ENOSPC), so we wait for it.
 334		 * But the -EIO is special case, it may indicate the worst
 335		 * thing (e.g. bug) happened, so we avoid waiting for it.
 336		 */
 337		if (err != -EIO) {
 338			int err2 = filemap_fdatawait(mapping);
 339			if (!err)
 340				err = err2;
 341		}
 342	}
 343	return err;
 344}
 345EXPORT_SYMBOL(filemap_write_and_wait);
 346
 347/**
 348 * filemap_write_and_wait_range - write out & wait on a file range
 349 * @mapping:	the address_space for the pages
 350 * @lstart:	offset in bytes where the range starts
 351 * @lend:	offset in bytes where the range ends (inclusive)
 352 *
 353 * Write out and wait upon file offsets lstart->lend, inclusive.
 354 *
 355 * Note that `lend' is inclusive (describes the last byte to be written) so
 356 * that this function can be used to write to the very end-of-file (end = -1).
 357 */
 358int filemap_write_and_wait_range(struct address_space *mapping,
 359				 loff_t lstart, loff_t lend)
 360{
 361	int err = 0;
 362
 363	if (mapping->nrpages) {
 364		err = __filemap_fdatawrite_range(mapping, lstart, lend,
 365						 WB_SYNC_ALL);
 366		/* See comment of filemap_write_and_wait() */
 367		if (err != -EIO) {
 368			int err2 = filemap_fdatawait_range(mapping,
 369						lstart, lend);
 370			if (!err)
 371				err = err2;
 372		}
 373	}
 374	return err;
 375}
 376EXPORT_SYMBOL(filemap_write_and_wait_range);
 377
 378/**
 379 * replace_page_cache_page - replace a pagecache page with a new one
 380 * @old:	page to be replaced
 381 * @new:	page to replace with
 382 * @gfp_mask:	allocation mode
 383 *
 384 * This function replaces a page in the pagecache with a new one.  On
 385 * success it acquires the pagecache reference for the new page and
 386 * drops it for the old page.  Both the old and new pages must be
 387 * locked.  This function does not add the new page to the LRU, the
 388 * caller must do that.
 389 *
 390 * The remove + add is atomic.  The only way this function can fail is
 391 * memory allocation failure.
 392 */
 393int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 394{
 395	int error;
 396	struct mem_cgroup *memcg = NULL;
 397
 398	VM_BUG_ON(!PageLocked(old));
 399	VM_BUG_ON(!PageLocked(new));
 400	VM_BUG_ON(new->mapping);
 401
 402	/*
 403	 * This is not page migration, but prepare_migration and
 404	 * end_migration does enough work for charge replacement.
 405	 *
 406	 * In the longer term we probably want a specialized function
 407	 * for moving the charge from old to new in a more efficient
 408	 * manner.
 409	 */
 410	error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
 411	if (error)
 412		return error;
 413
 414	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
 415	if (!error) {
 416		struct address_space *mapping = old->mapping;
 417		void (*freepage)(struct page *);
 418
 419		pgoff_t offset = old->index;
 420		freepage = mapping->a_ops->freepage;
 421
 422		page_cache_get(new);
 423		new->mapping = mapping;
 424		new->index = offset;
 425
 426		spin_lock_irq(&mapping->tree_lock);
 427		__delete_from_page_cache(old);
 428		error = radix_tree_insert(&mapping->page_tree, offset, new);
 429		BUG_ON(error);
 430		mapping->nrpages++;
 431		__inc_zone_page_state(new, NR_FILE_PAGES);
 432		if (PageSwapBacked(new))
 433			__inc_zone_page_state(new, NR_SHMEM);
 434		spin_unlock_irq(&mapping->tree_lock);
 
 
 435		radix_tree_preload_end();
 436		if (freepage)
 437			freepage(old);
 438		page_cache_release(old);
 439		mem_cgroup_end_migration(memcg, old, new, true);
 440	} else {
 441		mem_cgroup_end_migration(memcg, old, new, false);
 442	}
 443
 444	return error;
 445}
 446EXPORT_SYMBOL_GPL(replace_page_cache_page);
 447
 448/**
 449 * add_to_page_cache_locked - add a locked page to the pagecache
 450 * @page:	page to add
 451 * @mapping:	the page's address_space
 452 * @offset:	page index
 453 * @gfp_mask:	page allocation mode
 454 *
 455 * This function is used to add a page to the pagecache. It must be locked.
 456 * This function does not add the page to the LRU.  The caller must do that.
 457 */
 458int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 459		pgoff_t offset, gfp_t gfp_mask)
 460{
 461	int error;
 462
 463	VM_BUG_ON(!PageLocked(page));
 464	VM_BUG_ON(PageSwapBacked(page));
 465
 466	error = mem_cgroup_cache_charge(page, current->mm,
 467					gfp_mask & GFP_RECLAIM_MASK);
 468	if (error)
 469		goto out;
 470
 471	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
 472	if (error == 0) {
 473		page_cache_get(page);
 474		page->mapping = mapping;
 475		page->index = offset;
 476
 477		spin_lock_irq(&mapping->tree_lock);
 478		error = radix_tree_insert(&mapping->page_tree, offset, page);
 479		if (likely(!error)) {
 480			mapping->nrpages++;
 481			__inc_zone_page_state(page, NR_FILE_PAGES);
 482			spin_unlock_irq(&mapping->tree_lock);
 483		} else {
 484			page->mapping = NULL;
 485			/* Leave page->index set: truncation relies upon it */
 486			spin_unlock_irq(&mapping->tree_lock);
 487			mem_cgroup_uncharge_cache_page(page);
 488			page_cache_release(page);
 489		}
 490		radix_tree_preload_end();
 491	} else
 492		mem_cgroup_uncharge_cache_page(page);
 493out:
 494	return error;
 495}
 496EXPORT_SYMBOL(add_to_page_cache_locked);
 497
 498int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 499				pgoff_t offset, gfp_t gfp_mask)
 500{
 501	int ret;
 502
 503	ret = add_to_page_cache(page, mapping, offset, gfp_mask);
 504	if (ret == 0)
 505		lru_cache_add_file(page);
 506	return ret;
 507}
 508EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
 509
 510#ifdef CONFIG_NUMA
 511struct page *__page_cache_alloc(gfp_t gfp)
 512{
 513	int n;
 514	struct page *page;
 515
 516	if (cpuset_do_page_mem_spread()) {
 517		get_mems_allowed();
 518		n = cpuset_mem_spread_node();
 519		page = alloc_pages_exact_node(n, gfp, 0);
 520		put_mems_allowed();
 
 
 
 521		return page;
 522	}
 523	return alloc_pages(gfp, 0);
 524}
 525EXPORT_SYMBOL(__page_cache_alloc);
 526#endif
 527
 528/*
 529 * In order to wait for pages to become available there must be
 530 * waitqueues associated with pages. By using a hash table of
 531 * waitqueues where the bucket discipline is to maintain all
 532 * waiters on the same queue and wake all when any of the pages
 533 * become available, and for the woken contexts to check to be
 534 * sure the appropriate page became available, this saves space
 535 * at a cost of "thundering herd" phenomena during rare hash
 536 * collisions.
 537 */
 538static wait_queue_head_t *page_waitqueue(struct page *page)
 539{
 540	const struct zone *zone = page_zone(page);
 541
 542	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
 543}
 544
 545static inline void wake_up_page(struct page *page, int bit)
 546{
 547	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
 548}
 549
 550void wait_on_page_bit(struct page *page, int bit_nr)
 551{
 552	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
 553
 554	if (test_bit(bit_nr, &page->flags))
 555		__wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
 556							TASK_UNINTERRUPTIBLE);
 557}
 558EXPORT_SYMBOL(wait_on_page_bit);
 559
 560int wait_on_page_bit_killable(struct page *page, int bit_nr)
 561{
 562	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
 563
 564	if (!test_bit(bit_nr, &page->flags))
 565		return 0;
 566
 567	return __wait_on_bit(page_waitqueue(page), &wait,
 568			     sleep_on_page_killable, TASK_KILLABLE);
 569}
 570
 571/**
 572 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
 573 * @page: Page defining the wait queue of interest
 574 * @waiter: Waiter to add to the queue
 575 *
 576 * Add an arbitrary @waiter to the wait queue for the nominated @page.
 577 */
 578void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
 579{
 580	wait_queue_head_t *q = page_waitqueue(page);
 581	unsigned long flags;
 582
 583	spin_lock_irqsave(&q->lock, flags);
 584	__add_wait_queue(q, waiter);
 585	spin_unlock_irqrestore(&q->lock, flags);
 586}
 587EXPORT_SYMBOL_GPL(add_page_wait_queue);
 588
 589/**
 590 * unlock_page - unlock a locked page
 591 * @page: the page
 592 *
 593 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
 594 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
 595 * mechananism between PageLocked pages and PageWriteback pages is shared.
 596 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
 597 *
 598 * The mb is necessary to enforce ordering between the clear_bit and the read
 599 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
 600 */
 601void unlock_page(struct page *page)
 602{
 603	VM_BUG_ON(!PageLocked(page));
 604	clear_bit_unlock(PG_locked, &page->flags);
 605	smp_mb__after_clear_bit();
 606	wake_up_page(page, PG_locked);
 607}
 608EXPORT_SYMBOL(unlock_page);
 609
 610/**
 611 * end_page_writeback - end writeback against a page
 612 * @page: the page
 613 */
 614void end_page_writeback(struct page *page)
 615{
 616	if (TestClearPageReclaim(page))
 617		rotate_reclaimable_page(page);
 618
 619	if (!test_clear_page_writeback(page))
 620		BUG();
 621
 622	smp_mb__after_clear_bit();
 623	wake_up_page(page, PG_writeback);
 624}
 625EXPORT_SYMBOL(end_page_writeback);
 626
 627/**
 628 * __lock_page - get a lock on the page, assuming we need to sleep to get it
 629 * @page: the page to lock
 630 */
 631void __lock_page(struct page *page)
 632{
 633	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 634
 635	__wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
 636							TASK_UNINTERRUPTIBLE);
 637}
 638EXPORT_SYMBOL(__lock_page);
 639
 640int __lock_page_killable(struct page *page)
 641{
 642	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 643
 644	return __wait_on_bit_lock(page_waitqueue(page), &wait,
 645					sleep_on_page_killable, TASK_KILLABLE);
 646}
 647EXPORT_SYMBOL_GPL(__lock_page_killable);
 648
 649int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 650			 unsigned int flags)
 651{
 652	if (flags & FAULT_FLAG_ALLOW_RETRY) {
 653		/*
 654		 * CAUTION! In this case, mmap_sem is not released
 655		 * even though return 0.
 656		 */
 657		if (flags & FAULT_FLAG_RETRY_NOWAIT)
 658			return 0;
 659
 660		up_read(&mm->mmap_sem);
 661		if (flags & FAULT_FLAG_KILLABLE)
 662			wait_on_page_locked_killable(page);
 663		else
 664			wait_on_page_locked(page);
 665		return 0;
 666	} else {
 667		if (flags & FAULT_FLAG_KILLABLE) {
 668			int ret;
 669
 670			ret = __lock_page_killable(page);
 671			if (ret) {
 672				up_read(&mm->mmap_sem);
 673				return 0;
 674			}
 675		} else
 676			__lock_page(page);
 677		return 1;
 678	}
 679}
 680
 681/**
 682 * find_get_page - find and get a page reference
 683 * @mapping: the address_space to search
 684 * @offset: the page index
 685 *
 686 * Is there a pagecache struct page at the given (mapping, offset) tuple?
 687 * If yes, increment its refcount and return it; if no, return NULL.
 688 */
 689struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
 690{
 691	void **pagep;
 692	struct page *page;
 693
 694	rcu_read_lock();
 695repeat:
 696	page = NULL;
 697	pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
 698	if (pagep) {
 699		page = radix_tree_deref_slot(pagep);
 700		if (unlikely(!page))
 701			goto out;
 702		if (radix_tree_exception(page)) {
 703			if (radix_tree_deref_retry(page))
 704				goto repeat;
 705			/*
 706			 * Otherwise, shmem/tmpfs must be storing a swap entry
 707			 * here as an exceptional entry: so return it without
 708			 * attempting to raise page count.
 709			 */
 710			goto out;
 711		}
 712		if (!page_cache_get_speculative(page))
 713			goto repeat;
 714
 715		/*
 716		 * Has the page moved?
 717		 * This is part of the lockless pagecache protocol. See
 718		 * include/linux/pagemap.h for details.
 719		 */
 720		if (unlikely(page != *pagep)) {
 721			page_cache_release(page);
 722			goto repeat;
 723		}
 724	}
 725out:
 726	rcu_read_unlock();
 727
 728	return page;
 729}
 730EXPORT_SYMBOL(find_get_page);
 731
 732/**
 733 * find_lock_page - locate, pin and lock a pagecache page
 734 * @mapping: the address_space to search
 735 * @offset: the page index
 736 *
 737 * Locates the desired pagecache page, locks it, increments its reference
 738 * count and returns its address.
 739 *
 740 * Returns zero if the page was not present. find_lock_page() may sleep.
 741 */
 742struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
 743{
 744	struct page *page;
 745
 746repeat:
 747	page = find_get_page(mapping, offset);
 748	if (page && !radix_tree_exception(page)) {
 749		lock_page(page);
 750		/* Has the page been truncated? */
 751		if (unlikely(page->mapping != mapping)) {
 752			unlock_page(page);
 753			page_cache_release(page);
 754			goto repeat;
 755		}
 756		VM_BUG_ON(page->index != offset);
 757	}
 758	return page;
 759}
 760EXPORT_SYMBOL(find_lock_page);
 761
 762/**
 763 * find_or_create_page - locate or add a pagecache page
 764 * @mapping: the page's address_space
 765 * @index: the page's index into the mapping
 766 * @gfp_mask: page allocation mode
 767 *
 768 * Locates a page in the pagecache.  If the page is not present, a new page
 769 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
 770 * LRU list.  The returned page is locked and has its reference count
 771 * incremented.
 772 *
 773 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
 774 * allocation!
 775 *
 776 * find_or_create_page() returns the desired page's address, or zero on
 777 * memory exhaustion.
 778 */
 779struct page *find_or_create_page(struct address_space *mapping,
 780		pgoff_t index, gfp_t gfp_mask)
 781{
 782	struct page *page;
 783	int err;
 784repeat:
 785	page = find_lock_page(mapping, index);
 786	if (!page) {
 787		page = __page_cache_alloc(gfp_mask);
 788		if (!page)
 789			return NULL;
 790		/*
 791		 * We want a regular kernel memory (not highmem or DMA etc)
 792		 * allocation for the radix tree nodes, but we need to honour
 793		 * the context-specific requirements the caller has asked for.
 794		 * GFP_RECLAIM_MASK collects those requirements.
 795		 */
 796		err = add_to_page_cache_lru(page, mapping, index,
 797			(gfp_mask & GFP_RECLAIM_MASK));
 798		if (unlikely(err)) {
 799			page_cache_release(page);
 800			page = NULL;
 801			if (err == -EEXIST)
 802				goto repeat;
 803		}
 804	}
 805	return page;
 806}
 807EXPORT_SYMBOL(find_or_create_page);
 808
 809/**
 810 * find_get_pages - gang pagecache lookup
 811 * @mapping:	The address_space to search
 812 * @start:	The starting page index
 813 * @nr_pages:	The maximum number of pages
 814 * @pages:	Where the resulting pages are placed
 815 *
 816 * find_get_pages() will search for and return a group of up to
 817 * @nr_pages pages in the mapping.  The pages are placed at @pages.
 818 * find_get_pages() takes a reference against the returned pages.
 819 *
 820 * The search returns a group of mapping-contiguous pages with ascending
 821 * indexes.  There may be holes in the indices due to not-present pages.
 822 *
 823 * find_get_pages() returns the number of pages which were found.
 824 */
 825unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 826			    unsigned int nr_pages, struct page **pages)
 827{
 828	unsigned int i;
 829	unsigned int ret;
 830	unsigned int nr_found, nr_skip;
 
 
 
 831
 832	rcu_read_lock();
 833restart:
 834	nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
 835				(void ***)pages, NULL, start, nr_pages);
 836	ret = 0;
 837	nr_skip = 0;
 838	for (i = 0; i < nr_found; i++) {
 839		struct page *page;
 840repeat:
 841		page = radix_tree_deref_slot((void **)pages[i]);
 842		if (unlikely(!page))
 843			continue;
 844
 845		if (radix_tree_exception(page)) {
 846			if (radix_tree_deref_retry(page)) {
 847				/*
 848				 * Transient condition which can only trigger
 849				 * when entry at index 0 moves out of or back
 850				 * to root: none yet gotten, safe to restart.
 851				 */
 852				WARN_ON(start | i);
 853				goto restart;
 854			}
 855			/*
 856			 * Otherwise, shmem/tmpfs must be storing a swap entry
 857			 * here as an exceptional entry: so skip over it -
 858			 * we only reach this from invalidate_mapping_pages().
 859			 */
 860			nr_skip++;
 861			continue;
 862		}
 863
 864		if (!page_cache_get_speculative(page))
 865			goto repeat;
 866
 867		/* Has the page moved? */
 868		if (unlikely(page != *((void **)pages[i]))) {
 869			page_cache_release(page);
 870			goto repeat;
 871		}
 872
 873		pages[ret] = page;
 874		ret++;
 
 875	}
 876
 877	/*
 878	 * If all entries were removed before we could secure them,
 879	 * try again, because callers stop trying once 0 is returned.
 880	 */
 881	if (unlikely(!ret && nr_found > nr_skip))
 882		goto restart;
 883	rcu_read_unlock();
 884	return ret;
 885}
 886
 887/**
 888 * find_get_pages_contig - gang contiguous pagecache lookup
 889 * @mapping:	The address_space to search
 890 * @index:	The starting page index
 891 * @nr_pages:	The maximum number of pages
 892 * @pages:	Where the resulting pages are placed
 893 *
 894 * find_get_pages_contig() works exactly like find_get_pages(), except
 895 * that the returned number of pages are guaranteed to be contiguous.
 896 *
 897 * find_get_pages_contig() returns the number of pages which were found.
 898 */
 899unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
 900			       unsigned int nr_pages, struct page **pages)
 901{
 902	unsigned int i;
 903	unsigned int ret;
 904	unsigned int nr_found;
 
 
 
 905
 906	rcu_read_lock();
 907restart:
 908	nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
 909				(void ***)pages, NULL, index, nr_pages);
 910	ret = 0;
 911	for (i = 0; i < nr_found; i++) {
 912		struct page *page;
 913repeat:
 914		page = radix_tree_deref_slot((void **)pages[i]);
 
 915		if (unlikely(!page))
 916			continue;
 917
 918		if (radix_tree_exception(page)) {
 919			if (radix_tree_deref_retry(page)) {
 920				/*
 921				 * Transient condition which can only trigger
 922				 * when entry at index 0 moves out of or back
 923				 * to root: none yet gotten, safe to restart.
 924				 */
 925				goto restart;
 926			}
 927			/*
 928			 * Otherwise, shmem/tmpfs must be storing a swap entry
 929			 * here as an exceptional entry: so stop looking for
 930			 * contiguous pages.
 931			 */
 932			break;
 933		}
 934
 935		if (!page_cache_get_speculative(page))
 936			goto repeat;
 937
 938		/* Has the page moved? */
 939		if (unlikely(page != *((void **)pages[i]))) {
 940			page_cache_release(page);
 941			goto repeat;
 942		}
 943
 944		/*
 945		 * must check mapping and index after taking the ref.
 946		 * otherwise we can get both false positives and false
 947		 * negatives, which is just confusing to the caller.
 948		 */
 949		if (page->mapping == NULL || page->index != index) {
 950			page_cache_release(page);
 951			break;
 952		}
 953
 954		pages[ret] = page;
 955		ret++;
 956		index++;
 957	}
 958	rcu_read_unlock();
 959	return ret;
 960}
 961EXPORT_SYMBOL(find_get_pages_contig);
 962
 963/**
 964 * find_get_pages_tag - find and return pages that match @tag
 965 * @mapping:	the address_space to search
 966 * @index:	the starting page index
 967 * @tag:	the tag index
 968 * @nr_pages:	the maximum number of pages
 969 * @pages:	where the resulting pages are placed
 970 *
 971 * Like find_get_pages, except we only return pages which are tagged with
 972 * @tag.   We update @index to index the next page for the traversal.
 973 */
 974unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 975			int tag, unsigned int nr_pages, struct page **pages)
 976{
 977	unsigned int i;
 978	unsigned int ret;
 979	unsigned int nr_found;
 
 
 
 980
 981	rcu_read_lock();
 982restart:
 983	nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree,
 984				(void ***)pages, *index, nr_pages, tag);
 985	ret = 0;
 986	for (i = 0; i < nr_found; i++) {
 987		struct page *page;
 988repeat:
 989		page = radix_tree_deref_slot((void **)pages[i]);
 990		if (unlikely(!page))
 991			continue;
 992
 993		if (radix_tree_exception(page)) {
 994			if (radix_tree_deref_retry(page)) {
 995				/*
 996				 * Transient condition which can only trigger
 997				 * when entry at index 0 moves out of or back
 998				 * to root: none yet gotten, safe to restart.
 999				 */
1000				goto restart;
1001			}
1002			/*
1003			 * This function is never used on a shmem/tmpfs
1004			 * mapping, so a swap entry won't be found here.
1005			 */
1006			BUG();
1007		}
1008
1009		if (!page_cache_get_speculative(page))
1010			goto repeat;
1011
1012		/* Has the page moved? */
1013		if (unlikely(page != *((void **)pages[i]))) {
1014			page_cache_release(page);
1015			goto repeat;
1016		}
1017
1018		pages[ret] = page;
1019		ret++;
 
1020	}
1021
1022	/*
1023	 * If all entries were removed before we could secure them,
1024	 * try again, because callers stop trying once 0 is returned.
1025	 */
1026	if (unlikely(!ret && nr_found))
1027		goto restart;
1028	rcu_read_unlock();
1029
1030	if (ret)
1031		*index = pages[ret - 1]->index + 1;
1032
1033	return ret;
1034}
1035EXPORT_SYMBOL(find_get_pages_tag);
1036
1037/**
1038 * grab_cache_page_nowait - returns locked page at given index in given cache
1039 * @mapping: target address_space
1040 * @index: the page index
1041 *
1042 * Same as grab_cache_page(), but do not wait if the page is unavailable.
1043 * This is intended for speculative data generators, where the data can
1044 * be regenerated if the page couldn't be grabbed.  This routine should
1045 * be safe to call while holding the lock for another page.
1046 *
1047 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
1048 * and deadlock against the caller's locked page.
1049 */
1050struct page *
1051grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
1052{
1053	struct page *page = find_get_page(mapping, index);
1054
1055	if (page) {
1056		if (trylock_page(page))
1057			return page;
1058		page_cache_release(page);
1059		return NULL;
1060	}
1061	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
1062	if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
1063		page_cache_release(page);
1064		page = NULL;
1065	}
1066	return page;
1067}
1068EXPORT_SYMBOL(grab_cache_page_nowait);
1069
1070/*
1071 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1072 * a _large_ part of the i/o request. Imagine the worst scenario:
1073 *
1074 *      ---R__________________________________________B__________
1075 *         ^ reading here                             ^ bad block(assume 4k)
1076 *
1077 * read(R) => miss => readahead(R...B) => media error => frustrating retries
1078 * => failing the whole request => read(R) => read(R+1) =>
1079 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1080 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1081 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1082 *
1083 * It is going insane. Fix it by quickly scaling down the readahead size.
1084 */
1085static void shrink_readahead_size_eio(struct file *filp,
1086					struct file_ra_state *ra)
1087{
1088	ra->ra_pages /= 4;
1089}
1090
1091/**
1092 * do_generic_file_read - generic file read routine
1093 * @filp:	the file to read
1094 * @ppos:	current file position
1095 * @desc:	read_descriptor
1096 * @actor:	read method
1097 *
1098 * This is a generic file read routine, and uses the
1099 * mapping->a_ops->readpage() function for the actual low-level stuff.
1100 *
1101 * This is really ugly. But the goto's actually try to clarify some
1102 * of the logic when it comes to error handling etc.
1103 */
1104static void do_generic_file_read(struct file *filp, loff_t *ppos,
1105		read_descriptor_t *desc, read_actor_t actor)
1106{
1107	struct address_space *mapping = filp->f_mapping;
1108	struct inode *inode = mapping->host;
1109	struct file_ra_state *ra = &filp->f_ra;
1110	pgoff_t index;
1111	pgoff_t last_index;
1112	pgoff_t prev_index;
1113	unsigned long offset;      /* offset into pagecache page */
1114	unsigned int prev_offset;
1115	int error;
1116
1117	index = *ppos >> PAGE_CACHE_SHIFT;
1118	prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1119	prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1120	last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1121	offset = *ppos & ~PAGE_CACHE_MASK;
1122
1123	for (;;) {
1124		struct page *page;
1125		pgoff_t end_index;
1126		loff_t isize;
1127		unsigned long nr, ret;
1128
1129		cond_resched();
1130find_page:
1131		page = find_get_page(mapping, index);
1132		if (!page) {
1133			page_cache_sync_readahead(mapping,
1134					ra, filp,
1135					index, last_index - index);
1136			page = find_get_page(mapping, index);
1137			if (unlikely(page == NULL))
1138				goto no_cached_page;
1139		}
1140		if (PageReadahead(page)) {
1141			page_cache_async_readahead(mapping,
1142					ra, filp, page,
1143					index, last_index - index);
1144		}
1145		if (!PageUptodate(page)) {
1146			if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1147					!mapping->a_ops->is_partially_uptodate)
1148				goto page_not_up_to_date;
1149			if (!trylock_page(page))
1150				goto page_not_up_to_date;
1151			/* Did it get truncated before we got the lock? */
1152			if (!page->mapping)
1153				goto page_not_up_to_date_locked;
1154			if (!mapping->a_ops->is_partially_uptodate(page,
1155								desc, offset))
1156				goto page_not_up_to_date_locked;
1157			unlock_page(page);
1158		}
1159page_ok:
1160		/*
1161		 * i_size must be checked after we know the page is Uptodate.
1162		 *
1163		 * Checking i_size after the check allows us to calculate
1164		 * the correct value for "nr", which means the zero-filled
1165		 * part of the page is not copied back to userspace (unless
1166		 * another truncate extends the file - this is desired though).
1167		 */
1168
1169		isize = i_size_read(inode);
1170		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1171		if (unlikely(!isize || index > end_index)) {
1172			page_cache_release(page);
1173			goto out;
1174		}
1175
1176		/* nr is the maximum number of bytes to copy from this page */
1177		nr = PAGE_CACHE_SIZE;
1178		if (index == end_index) {
1179			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1180			if (nr <= offset) {
1181				page_cache_release(page);
1182				goto out;
1183			}
1184		}
1185		nr = nr - offset;
1186
1187		/* If users can be writing to this page using arbitrary
1188		 * virtual addresses, take care about potential aliasing
1189		 * before reading the page on the kernel side.
1190		 */
1191		if (mapping_writably_mapped(mapping))
1192			flush_dcache_page(page);
1193
1194		/*
1195		 * When a sequential read accesses a page several times,
1196		 * only mark it as accessed the first time.
1197		 */
1198		if (prev_index != index || offset != prev_offset)
1199			mark_page_accessed(page);
1200		prev_index = index;
1201
1202		/*
1203		 * Ok, we have the page, and it's up-to-date, so
1204		 * now we can copy it to user space...
1205		 *
1206		 * The actor routine returns how many bytes were actually used..
1207		 * NOTE! This may not be the same as how much of a user buffer
1208		 * we filled up (we may be padding etc), so we can only update
1209		 * "pos" here (the actor routine has to update the user buffer
1210		 * pointers and the remaining count).
1211		 */
1212		ret = actor(desc, page, offset, nr);
1213		offset += ret;
1214		index += offset >> PAGE_CACHE_SHIFT;
1215		offset &= ~PAGE_CACHE_MASK;
1216		prev_offset = offset;
1217
1218		page_cache_release(page);
1219		if (ret == nr && desc->count)
1220			continue;
1221		goto out;
1222
1223page_not_up_to_date:
1224		/* Get exclusive access to the page ... */
1225		error = lock_page_killable(page);
1226		if (unlikely(error))
1227			goto readpage_error;
1228
1229page_not_up_to_date_locked:
1230		/* Did it get truncated before we got the lock? */
1231		if (!page->mapping) {
1232			unlock_page(page);
1233			page_cache_release(page);
1234			continue;
1235		}
1236
1237		/* Did somebody else fill it already? */
1238		if (PageUptodate(page)) {
1239			unlock_page(page);
1240			goto page_ok;
1241		}
1242
1243readpage:
1244		/*
1245		 * A previous I/O error may have been due to temporary
1246		 * failures, eg. multipath errors.
1247		 * PG_error will be set again if readpage fails.
1248		 */
1249		ClearPageError(page);
1250		/* Start the actual read. The read will unlock the page. */
1251		error = mapping->a_ops->readpage(filp, page);
1252
1253		if (unlikely(error)) {
1254			if (error == AOP_TRUNCATED_PAGE) {
1255				page_cache_release(page);
1256				goto find_page;
1257			}
1258			goto readpage_error;
1259		}
1260
1261		if (!PageUptodate(page)) {
1262			error = lock_page_killable(page);
1263			if (unlikely(error))
1264				goto readpage_error;
1265			if (!PageUptodate(page)) {
1266				if (page->mapping == NULL) {
1267					/*
1268					 * invalidate_mapping_pages got it
1269					 */
1270					unlock_page(page);
1271					page_cache_release(page);
1272					goto find_page;
1273				}
1274				unlock_page(page);
1275				shrink_readahead_size_eio(filp, ra);
1276				error = -EIO;
1277				goto readpage_error;
1278			}
1279			unlock_page(page);
1280		}
1281
1282		goto page_ok;
1283
1284readpage_error:
1285		/* UHHUH! A synchronous read error occurred. Report it */
1286		desc->error = error;
1287		page_cache_release(page);
1288		goto out;
1289
1290no_cached_page:
1291		/*
1292		 * Ok, it wasn't cached, so we need to create a new
1293		 * page..
1294		 */
1295		page = page_cache_alloc_cold(mapping);
1296		if (!page) {
1297			desc->error = -ENOMEM;
1298			goto out;
1299		}
1300		error = add_to_page_cache_lru(page, mapping,
1301						index, GFP_KERNEL);
1302		if (error) {
1303			page_cache_release(page);
1304			if (error == -EEXIST)
1305				goto find_page;
1306			desc->error = error;
1307			goto out;
1308		}
1309		goto readpage;
1310	}
1311
1312out:
1313	ra->prev_pos = prev_index;
1314	ra->prev_pos <<= PAGE_CACHE_SHIFT;
1315	ra->prev_pos |= prev_offset;
1316
1317	*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1318	file_accessed(filp);
1319}
1320
1321int file_read_actor(read_descriptor_t *desc, struct page *page,
1322			unsigned long offset, unsigned long size)
1323{
1324	char *kaddr;
1325	unsigned long left, count = desc->count;
1326
1327	if (size > count)
1328		size = count;
1329
1330	/*
1331	 * Faults on the destination of a read are common, so do it before
1332	 * taking the kmap.
1333	 */
1334	if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1335		kaddr = kmap_atomic(page, KM_USER0);
1336		left = __copy_to_user_inatomic(desc->arg.buf,
1337						kaddr + offset, size);
1338		kunmap_atomic(kaddr, KM_USER0);
1339		if (left == 0)
1340			goto success;
1341	}
1342
1343	/* Do it the slow way */
1344	kaddr = kmap(page);
1345	left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1346	kunmap(page);
1347
1348	if (left) {
1349		size -= left;
1350		desc->error = -EFAULT;
1351	}
1352success:
1353	desc->count = count - size;
1354	desc->written += size;
1355	desc->arg.buf += size;
1356	return size;
1357}
1358
1359/*
1360 * Performs necessary checks before doing a write
1361 * @iov:	io vector request
1362 * @nr_segs:	number of segments in the iovec
1363 * @count:	number of bytes to write
1364 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1365 *
1366 * Adjust number of segments and amount of bytes to write (nr_segs should be
1367 * properly initialized first). Returns appropriate error code that caller
1368 * should return or zero in case that write should be allowed.
1369 */
1370int generic_segment_checks(const struct iovec *iov,
1371			unsigned long *nr_segs, size_t *count, int access_flags)
1372{
1373	unsigned long   seg;
1374	size_t cnt = 0;
1375	for (seg = 0; seg < *nr_segs; seg++) {
1376		const struct iovec *iv = &iov[seg];
1377
1378		/*
1379		 * If any segment has a negative length, or the cumulative
1380		 * length ever wraps negative then return -EINVAL.
1381		 */
1382		cnt += iv->iov_len;
1383		if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1384			return -EINVAL;
1385		if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1386			continue;
1387		if (seg == 0)
1388			return -EFAULT;
1389		*nr_segs = seg;
1390		cnt -= iv->iov_len;	/* This segment is no good */
1391		break;
1392	}
1393	*count = cnt;
1394	return 0;
1395}
1396EXPORT_SYMBOL(generic_segment_checks);
1397
1398/**
1399 * generic_file_aio_read - generic filesystem read routine
1400 * @iocb:	kernel I/O control block
1401 * @iov:	io vector request
1402 * @nr_segs:	number of segments in the iovec
1403 * @pos:	current file position
1404 *
1405 * This is the "read()" routine for all filesystems
1406 * that can use the page cache directly.
1407 */
1408ssize_t
1409generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1410		unsigned long nr_segs, loff_t pos)
1411{
1412	struct file *filp = iocb->ki_filp;
1413	ssize_t retval;
1414	unsigned long seg = 0;
1415	size_t count;
1416	loff_t *ppos = &iocb->ki_pos;
1417	struct blk_plug plug;
1418
1419	count = 0;
1420	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1421	if (retval)
1422		return retval;
1423
1424	blk_start_plug(&plug);
1425
1426	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1427	if (filp->f_flags & O_DIRECT) {
1428		loff_t size;
1429		struct address_space *mapping;
1430		struct inode *inode;
1431
1432		mapping = filp->f_mapping;
1433		inode = mapping->host;
1434		if (!count)
1435			goto out; /* skip atime */
1436		size = i_size_read(inode);
1437		if (pos < size) {
1438			retval = filemap_write_and_wait_range(mapping, pos,
1439					pos + iov_length(iov, nr_segs) - 1);
1440			if (!retval) {
 
 
 
1441				retval = mapping->a_ops->direct_IO(READ, iocb,
1442							iov, pos, nr_segs);
 
1443			}
1444			if (retval > 0) {
1445				*ppos = pos + retval;
1446				count -= retval;
1447			}
1448
1449			/*
1450			 * Btrfs can have a short DIO read if we encounter
1451			 * compressed extents, so if there was an error, or if
1452			 * we've already read everything we wanted to, or if
1453			 * there was a short read because we hit EOF, go ahead
1454			 * and return.  Otherwise fallthrough to buffered io for
1455			 * the rest of the read.
1456			 */
1457			if (retval < 0 || !count || *ppos >= size) {
1458				file_accessed(filp);
1459				goto out;
1460			}
1461		}
1462	}
1463
1464	count = retval;
1465	for (seg = 0; seg < nr_segs; seg++) {
1466		read_descriptor_t desc;
1467		loff_t offset = 0;
1468
1469		/*
1470		 * If we did a short DIO read we need to skip the section of the
1471		 * iov that we've already read data into.
1472		 */
1473		if (count) {
1474			if (count > iov[seg].iov_len) {
1475				count -= iov[seg].iov_len;
1476				continue;
1477			}
1478			offset = count;
1479			count = 0;
1480		}
1481
1482		desc.written = 0;
1483		desc.arg.buf = iov[seg].iov_base + offset;
1484		desc.count = iov[seg].iov_len - offset;
1485		if (desc.count == 0)
1486			continue;
1487		desc.error = 0;
1488		do_generic_file_read(filp, ppos, &desc, file_read_actor);
1489		retval += desc.written;
1490		if (desc.error) {
1491			retval = retval ?: desc.error;
1492			break;
1493		}
1494		if (desc.count > 0)
1495			break;
1496	}
1497out:
1498	blk_finish_plug(&plug);
1499	return retval;
1500}
1501EXPORT_SYMBOL(generic_file_aio_read);
1502
1503static ssize_t
1504do_readahead(struct address_space *mapping, struct file *filp,
1505	     pgoff_t index, unsigned long nr)
1506{
1507	if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1508		return -EINVAL;
1509
1510	force_page_cache_readahead(mapping, filp, index, nr);
1511	return 0;
1512}
1513
1514SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
1515{
1516	ssize_t ret;
1517	struct file *file;
1518
1519	ret = -EBADF;
1520	file = fget(fd);
1521	if (file) {
1522		if (file->f_mode & FMODE_READ) {
1523			struct address_space *mapping = file->f_mapping;
1524			pgoff_t start = offset >> PAGE_CACHE_SHIFT;
1525			pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1526			unsigned long len = end - start + 1;
1527			ret = do_readahead(mapping, file, start, len);
1528		}
1529		fput(file);
1530	}
1531	return ret;
1532}
1533#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1534asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
1535{
1536	return SYSC_readahead((int) fd, offset, (size_t) count);
1537}
1538SYSCALL_ALIAS(sys_readahead, SyS_readahead);
1539#endif
1540
1541#ifdef CONFIG_MMU
1542/**
1543 * page_cache_read - adds requested page to the page cache if not already there
1544 * @file:	file to read
1545 * @offset:	page index
1546 *
1547 * This adds the requested page to the page cache if it isn't already there,
1548 * and schedules an I/O to read in its contents from disk.
1549 */
1550static int page_cache_read(struct file *file, pgoff_t offset)
1551{
1552	struct address_space *mapping = file->f_mapping;
1553	struct page *page; 
1554	int ret;
1555
1556	do {
1557		page = page_cache_alloc_cold(mapping);
1558		if (!page)
1559			return -ENOMEM;
1560
1561		ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1562		if (ret == 0)
1563			ret = mapping->a_ops->readpage(file, page);
1564		else if (ret == -EEXIST)
1565			ret = 0; /* losing race to add is OK */
1566
1567		page_cache_release(page);
1568
1569	} while (ret == AOP_TRUNCATED_PAGE);
1570		
1571	return ret;
1572}
1573
1574#define MMAP_LOTSAMISS  (100)
1575
1576/*
1577 * Synchronous readahead happens when we don't even find
1578 * a page in the page cache at all.
1579 */
1580static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1581				   struct file_ra_state *ra,
1582				   struct file *file,
1583				   pgoff_t offset)
1584{
1585	unsigned long ra_pages;
1586	struct address_space *mapping = file->f_mapping;
1587
1588	/* If we don't want any read-ahead, don't bother */
1589	if (VM_RandomReadHint(vma))
1590		return;
1591	if (!ra->ra_pages)
1592		return;
1593
1594	if (VM_SequentialReadHint(vma)) {
1595		page_cache_sync_readahead(mapping, ra, file, offset,
1596					  ra->ra_pages);
1597		return;
1598	}
1599
1600	/* Avoid banging the cache line if not needed */
1601	if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1602		ra->mmap_miss++;
1603
1604	/*
1605	 * Do we miss much more than hit in this file? If so,
1606	 * stop bothering with read-ahead. It will only hurt.
1607	 */
1608	if (ra->mmap_miss > MMAP_LOTSAMISS)
1609		return;
1610
1611	/*
1612	 * mmap read-around
1613	 */
1614	ra_pages = max_sane_readahead(ra->ra_pages);
1615	ra->start = max_t(long, 0, offset - ra_pages / 2);
1616	ra->size = ra_pages;
1617	ra->async_size = ra_pages / 4;
1618	ra_submit(ra, mapping, file);
1619}
1620
1621/*
1622 * Asynchronous readahead happens when we find the page and PG_readahead,
1623 * so we want to possibly extend the readahead further..
1624 */
1625static void do_async_mmap_readahead(struct vm_area_struct *vma,
1626				    struct file_ra_state *ra,
1627				    struct file *file,
1628				    struct page *page,
1629				    pgoff_t offset)
1630{
1631	struct address_space *mapping = file->f_mapping;
1632
1633	/* If we don't want any read-ahead, don't bother */
1634	if (VM_RandomReadHint(vma))
1635		return;
1636	if (ra->mmap_miss > 0)
1637		ra->mmap_miss--;
1638	if (PageReadahead(page))
1639		page_cache_async_readahead(mapping, ra, file,
1640					   page, offset, ra->ra_pages);
1641}
1642
1643/**
1644 * filemap_fault - read in file data for page fault handling
1645 * @vma:	vma in which the fault was taken
1646 * @vmf:	struct vm_fault containing details of the fault
1647 *
1648 * filemap_fault() is invoked via the vma operations vector for a
1649 * mapped memory region to read in file data during a page fault.
1650 *
1651 * The goto's are kind of ugly, but this streamlines the normal case of having
1652 * it in the page cache, and handles the special cases reasonably without
1653 * having a lot of duplicated code.
1654 */
1655int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1656{
1657	int error;
1658	struct file *file = vma->vm_file;
1659	struct address_space *mapping = file->f_mapping;
1660	struct file_ra_state *ra = &file->f_ra;
1661	struct inode *inode = mapping->host;
1662	pgoff_t offset = vmf->pgoff;
1663	struct page *page;
1664	pgoff_t size;
1665	int ret = 0;
1666
1667	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1668	if (offset >= size)
1669		return VM_FAULT_SIGBUS;
1670
1671	/*
1672	 * Do we have something in the page cache already?
1673	 */
1674	page = find_get_page(mapping, offset);
1675	if (likely(page)) {
1676		/*
1677		 * We found the page, so try async readahead before
1678		 * waiting for the lock.
1679		 */
1680		do_async_mmap_readahead(vma, ra, file, page, offset);
1681	} else {
1682		/* No page in the page cache at all */
1683		do_sync_mmap_readahead(vma, ra, file, offset);
1684		count_vm_event(PGMAJFAULT);
1685		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1686		ret = VM_FAULT_MAJOR;
1687retry_find:
1688		page = find_get_page(mapping, offset);
1689		if (!page)
1690			goto no_cached_page;
1691	}
1692
1693	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1694		page_cache_release(page);
1695		return ret | VM_FAULT_RETRY;
1696	}
1697
1698	/* Did it get truncated? */
1699	if (unlikely(page->mapping != mapping)) {
1700		unlock_page(page);
1701		put_page(page);
1702		goto retry_find;
1703	}
1704	VM_BUG_ON(page->index != offset);
1705
1706	/*
1707	 * We have a locked page in the page cache, now we need to check
1708	 * that it's up-to-date. If not, it is going to be due to an error.
1709	 */
1710	if (unlikely(!PageUptodate(page)))
1711		goto page_not_uptodate;
1712
1713	/*
1714	 * Found the page and have a reference on it.
1715	 * We must recheck i_size under page lock.
1716	 */
1717	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1718	if (unlikely(offset >= size)) {
1719		unlock_page(page);
1720		page_cache_release(page);
1721		return VM_FAULT_SIGBUS;
1722	}
1723
1724	vmf->page = page;
1725	return ret | VM_FAULT_LOCKED;
1726
1727no_cached_page:
1728	/*
1729	 * We're only likely to ever get here if MADV_RANDOM is in
1730	 * effect.
1731	 */
1732	error = page_cache_read(file, offset);
1733
1734	/*
1735	 * The page we want has now been added to the page cache.
1736	 * In the unlikely event that someone removed it in the
1737	 * meantime, we'll just come back here and read it again.
1738	 */
1739	if (error >= 0)
1740		goto retry_find;
1741
1742	/*
1743	 * An error return from page_cache_read can result if the
1744	 * system is low on memory, or a problem occurs while trying
1745	 * to schedule I/O.
1746	 */
1747	if (error == -ENOMEM)
1748		return VM_FAULT_OOM;
1749	return VM_FAULT_SIGBUS;
1750
1751page_not_uptodate:
1752	/*
1753	 * Umm, take care of errors if the page isn't up-to-date.
1754	 * Try to re-read it _once_. We do this synchronously,
1755	 * because there really aren't any performance issues here
1756	 * and we need to check for errors.
1757	 */
1758	ClearPageError(page);
1759	error = mapping->a_ops->readpage(file, page);
1760	if (!error) {
1761		wait_on_page_locked(page);
1762		if (!PageUptodate(page))
1763			error = -EIO;
1764	}
1765	page_cache_release(page);
1766
1767	if (!error || error == AOP_TRUNCATED_PAGE)
1768		goto retry_find;
1769
1770	/* Things didn't work out. Return zero to tell the mm layer so. */
1771	shrink_readahead_size_eio(file, ra);
1772	return VM_FAULT_SIGBUS;
1773}
1774EXPORT_SYMBOL(filemap_fault);
1775
1776const struct vm_operations_struct generic_file_vm_ops = {
1777	.fault		= filemap_fault,
1778};
1779
1780/* This is used for a general mmap of a disk file */
1781
1782int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1783{
1784	struct address_space *mapping = file->f_mapping;
1785
1786	if (!mapping->a_ops->readpage)
1787		return -ENOEXEC;
1788	file_accessed(file);
1789	vma->vm_ops = &generic_file_vm_ops;
1790	vma->vm_flags |= VM_CAN_NONLINEAR;
1791	return 0;
1792}
1793
1794/*
1795 * This is for filesystems which do not implement ->writepage.
1796 */
1797int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1798{
1799	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1800		return -EINVAL;
1801	return generic_file_mmap(file, vma);
1802}
1803#else
1804int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1805{
1806	return -ENOSYS;
1807}
1808int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1809{
1810	return -ENOSYS;
1811}
1812#endif /* CONFIG_MMU */
1813
1814EXPORT_SYMBOL(generic_file_mmap);
1815EXPORT_SYMBOL(generic_file_readonly_mmap);
1816
1817static struct page *__read_cache_page(struct address_space *mapping,
1818				pgoff_t index,
1819				int (*filler)(void *, struct page *),
1820				void *data,
1821				gfp_t gfp)
1822{
1823	struct page *page;
1824	int err;
1825repeat:
1826	page = find_get_page(mapping, index);
1827	if (!page) {
1828		page = __page_cache_alloc(gfp | __GFP_COLD);
1829		if (!page)
1830			return ERR_PTR(-ENOMEM);
1831		err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
1832		if (unlikely(err)) {
1833			page_cache_release(page);
1834			if (err == -EEXIST)
1835				goto repeat;
1836			/* Presumably ENOMEM for radix tree node */
1837			return ERR_PTR(err);
1838		}
1839		err = filler(data, page);
1840		if (err < 0) {
1841			page_cache_release(page);
1842			page = ERR_PTR(err);
1843		}
1844	}
1845	return page;
1846}
1847
1848static struct page *do_read_cache_page(struct address_space *mapping,
1849				pgoff_t index,
1850				int (*filler)(void *, struct page *),
1851				void *data,
1852				gfp_t gfp)
1853
1854{
1855	struct page *page;
1856	int err;
1857
1858retry:
1859	page = __read_cache_page(mapping, index, filler, data, gfp);
1860	if (IS_ERR(page))
1861		return page;
1862	if (PageUptodate(page))
1863		goto out;
1864
1865	lock_page(page);
1866	if (!page->mapping) {
1867		unlock_page(page);
1868		page_cache_release(page);
1869		goto retry;
1870	}
1871	if (PageUptodate(page)) {
1872		unlock_page(page);
1873		goto out;
1874	}
1875	err = filler(data, page);
1876	if (err < 0) {
1877		page_cache_release(page);
1878		return ERR_PTR(err);
1879	}
1880out:
1881	mark_page_accessed(page);
1882	return page;
1883}
1884
1885/**
1886 * read_cache_page_async - read into page cache, fill it if needed
1887 * @mapping:	the page's address_space
1888 * @index:	the page index
1889 * @filler:	function to perform the read
1890 * @data:	first arg to filler(data, page) function, often left as NULL
1891 *
1892 * Same as read_cache_page, but don't wait for page to become unlocked
1893 * after submitting it to the filler.
1894 *
1895 * Read into the page cache. If a page already exists, and PageUptodate() is
1896 * not set, try to fill the page but don't wait for it to become unlocked.
1897 *
1898 * If the page does not get brought uptodate, return -EIO.
1899 */
1900struct page *read_cache_page_async(struct address_space *mapping,
1901				pgoff_t index,
1902				int (*filler)(void *, struct page *),
1903				void *data)
1904{
1905	return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1906}
1907EXPORT_SYMBOL(read_cache_page_async);
1908
1909static struct page *wait_on_page_read(struct page *page)
1910{
1911	if (!IS_ERR(page)) {
1912		wait_on_page_locked(page);
1913		if (!PageUptodate(page)) {
1914			page_cache_release(page);
1915			page = ERR_PTR(-EIO);
1916		}
1917	}
1918	return page;
1919}
1920
1921/**
1922 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
1923 * @mapping:	the page's address_space
1924 * @index:	the page index
1925 * @gfp:	the page allocator flags to use if allocating
1926 *
1927 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
1928 * any new page allocations done using the specified allocation flags. Note
1929 * that the Radix tree operations will still use GFP_KERNEL, so you can't
1930 * expect to do this atomically or anything like that - but you can pass in
1931 * other page requirements.
1932 *
1933 * If the page does not get brought uptodate, return -EIO.
1934 */
1935struct page *read_cache_page_gfp(struct address_space *mapping,
1936				pgoff_t index,
1937				gfp_t gfp)
1938{
1939	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1940
1941	return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1942}
1943EXPORT_SYMBOL(read_cache_page_gfp);
1944
1945/**
1946 * read_cache_page - read into page cache, fill it if needed
1947 * @mapping:	the page's address_space
1948 * @index:	the page index
1949 * @filler:	function to perform the read
1950 * @data:	first arg to filler(data, page) function, often left as NULL
1951 *
1952 * Read into the page cache. If a page already exists, and PageUptodate() is
1953 * not set, try to fill the page then wait for it to become unlocked.
1954 *
1955 * If the page does not get brought uptodate, return -EIO.
1956 */
1957struct page *read_cache_page(struct address_space *mapping,
1958				pgoff_t index,
1959				int (*filler)(void *, struct page *),
1960				void *data)
1961{
1962	return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
1963}
1964EXPORT_SYMBOL(read_cache_page);
1965
1966/*
1967 * The logic we want is
1968 *
1969 *	if suid or (sgid and xgrp)
1970 *		remove privs
1971 */
1972int should_remove_suid(struct dentry *dentry)
1973{
1974	mode_t mode = dentry->d_inode->i_mode;
1975	int kill = 0;
1976
1977	/* suid always must be killed */
1978	if (unlikely(mode & S_ISUID))
1979		kill = ATTR_KILL_SUID;
1980
1981	/*
1982	 * sgid without any exec bits is just a mandatory locking mark; leave
1983	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1984	 */
1985	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1986		kill |= ATTR_KILL_SGID;
1987
1988	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1989		return kill;
1990
1991	return 0;
1992}
1993EXPORT_SYMBOL(should_remove_suid);
1994
1995static int __remove_suid(struct dentry *dentry, int kill)
1996{
1997	struct iattr newattrs;
1998
1999	newattrs.ia_valid = ATTR_FORCE | kill;
2000	return notify_change(dentry, &newattrs);
2001}
2002
2003int file_remove_suid(struct file *file)
2004{
2005	struct dentry *dentry = file->f_path.dentry;
2006	struct inode *inode = dentry->d_inode;
2007	int killsuid;
2008	int killpriv;
2009	int error = 0;
2010
2011	/* Fast path for nothing security related */
2012	if (IS_NOSEC(inode))
2013		return 0;
2014
2015	killsuid = should_remove_suid(dentry);
2016	killpriv = security_inode_need_killpriv(dentry);
2017
2018	if (killpriv < 0)
2019		return killpriv;
2020	if (killpriv)
2021		error = security_inode_killpriv(dentry);
2022	if (!error && killsuid)
2023		error = __remove_suid(dentry, killsuid);
2024	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
2025		inode->i_flags |= S_NOSEC;
2026
2027	return error;
2028}
2029EXPORT_SYMBOL(file_remove_suid);
2030
2031static size_t __iovec_copy_from_user_inatomic(char *vaddr,
2032			const struct iovec *iov, size_t base, size_t bytes)
2033{
2034	size_t copied = 0, left = 0;
2035
2036	while (bytes) {
2037		char __user *buf = iov->iov_base + base;
2038		int copy = min(bytes, iov->iov_len - base);
2039
2040		base = 0;
2041		left = __copy_from_user_inatomic(vaddr, buf, copy);
2042		copied += copy;
2043		bytes -= copy;
2044		vaddr += copy;
2045		iov++;
2046
2047		if (unlikely(left))
2048			break;
2049	}
2050	return copied - left;
2051}
2052
2053/*
2054 * Copy as much as we can into the page and return the number of bytes which
2055 * were successfully copied.  If a fault is encountered then return the number of
2056 * bytes which were copied.
2057 */
2058size_t iov_iter_copy_from_user_atomic(struct page *page,
2059		struct iov_iter *i, unsigned long offset, size_t bytes)
2060{
2061	char *kaddr;
2062	size_t copied;
2063
2064	BUG_ON(!in_atomic());
2065	kaddr = kmap_atomic(page, KM_USER0);
2066	if (likely(i->nr_segs == 1)) {
2067		int left;
2068		char __user *buf = i->iov->iov_base + i->iov_offset;
2069		left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
2070		copied = bytes - left;
2071	} else {
2072		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2073						i->iov, i->iov_offset, bytes);
2074	}
2075	kunmap_atomic(kaddr, KM_USER0);
2076
2077	return copied;
2078}
2079EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
2080
2081/*
2082 * This has the same sideeffects and return value as
2083 * iov_iter_copy_from_user_atomic().
2084 * The difference is that it attempts to resolve faults.
2085 * Page must not be locked.
2086 */
2087size_t iov_iter_copy_from_user(struct page *page,
2088		struct iov_iter *i, unsigned long offset, size_t bytes)
2089{
2090	char *kaddr;
2091	size_t copied;
2092
2093	kaddr = kmap(page);
2094	if (likely(i->nr_segs == 1)) {
2095		int left;
2096		char __user *buf = i->iov->iov_base + i->iov_offset;
2097		left = __copy_from_user(kaddr + offset, buf, bytes);
2098		copied = bytes - left;
2099	} else {
2100		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2101						i->iov, i->iov_offset, bytes);
2102	}
2103	kunmap(page);
2104	return copied;
2105}
2106EXPORT_SYMBOL(iov_iter_copy_from_user);
2107
2108void iov_iter_advance(struct iov_iter *i, size_t bytes)
2109{
2110	BUG_ON(i->count < bytes);
2111
2112	if (likely(i->nr_segs == 1)) {
2113		i->iov_offset += bytes;
2114		i->count -= bytes;
2115	} else {
2116		const struct iovec *iov = i->iov;
2117		size_t base = i->iov_offset;
 
2118
2119		/*
2120		 * The !iov->iov_len check ensures we skip over unlikely
2121		 * zero-length segments (without overruning the iovec).
2122		 */
2123		while (bytes || unlikely(i->count && !iov->iov_len)) {
2124			int copy;
2125
2126			copy = min(bytes, iov->iov_len - base);
2127			BUG_ON(!i->count || i->count < copy);
2128			i->count -= copy;
2129			bytes -= copy;
2130			base += copy;
2131			if (iov->iov_len == base) {
2132				iov++;
 
2133				base = 0;
2134			}
2135		}
2136		i->iov = iov;
2137		i->iov_offset = base;
 
2138	}
2139}
2140EXPORT_SYMBOL(iov_iter_advance);
2141
2142/*
2143 * Fault in the first iovec of the given iov_iter, to a maximum length
2144 * of bytes. Returns 0 on success, or non-zero if the memory could not be
2145 * accessed (ie. because it is an invalid address).
2146 *
2147 * writev-intensive code may want this to prefault several iovecs -- that
2148 * would be possible (callers must not rely on the fact that _only_ the
2149 * first iovec will be faulted with the current implementation).
2150 */
2151int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
2152{
2153	char __user *buf = i->iov->iov_base + i->iov_offset;
2154	bytes = min(bytes, i->iov->iov_len - i->iov_offset);
2155	return fault_in_pages_readable(buf, bytes);
2156}
2157EXPORT_SYMBOL(iov_iter_fault_in_readable);
2158
2159/*
2160 * Return the count of just the current iov_iter segment.
2161 */
2162size_t iov_iter_single_seg_count(struct iov_iter *i)
2163{
2164	const struct iovec *iov = i->iov;
2165	if (i->nr_segs == 1)
2166		return i->count;
2167	else
2168		return min(i->count, iov->iov_len - i->iov_offset);
2169}
2170EXPORT_SYMBOL(iov_iter_single_seg_count);
2171
2172/*
2173 * Performs necessary checks before doing a write
2174 *
2175 * Can adjust writing position or amount of bytes to write.
2176 * Returns appropriate error code that caller should return or
2177 * zero in case that write should be allowed.
2178 */
2179inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
2180{
2181	struct inode *inode = file->f_mapping->host;
2182	unsigned long limit = rlimit(RLIMIT_FSIZE);
2183
2184        if (unlikely(*pos < 0))
2185                return -EINVAL;
2186
2187	if (!isblk) {
2188		/* FIXME: this is for backwards compatibility with 2.4 */
2189		if (file->f_flags & O_APPEND)
2190                        *pos = i_size_read(inode);
2191
2192		if (limit != RLIM_INFINITY) {
2193			if (*pos >= limit) {
2194				send_sig(SIGXFSZ, current, 0);
2195				return -EFBIG;
2196			}
2197			if (*count > limit - (typeof(limit))*pos) {
2198				*count = limit - (typeof(limit))*pos;
2199			}
2200		}
2201	}
2202
2203	/*
2204	 * LFS rule
2205	 */
2206	if (unlikely(*pos + *count > MAX_NON_LFS &&
2207				!(file->f_flags & O_LARGEFILE))) {
2208		if (*pos >= MAX_NON_LFS) {
2209			return -EFBIG;
2210		}
2211		if (*count > MAX_NON_LFS - (unsigned long)*pos) {
2212			*count = MAX_NON_LFS - (unsigned long)*pos;
2213		}
2214	}
2215
2216	/*
2217	 * Are we about to exceed the fs block limit ?
2218	 *
2219	 * If we have written data it becomes a short write.  If we have
2220	 * exceeded without writing data we send a signal and return EFBIG.
2221	 * Linus frestrict idea will clean these up nicely..
2222	 */
2223	if (likely(!isblk)) {
2224		if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
2225			if (*count || *pos > inode->i_sb->s_maxbytes) {
2226				return -EFBIG;
2227			}
2228			/* zero-length writes at ->s_maxbytes are OK */
2229		}
2230
2231		if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
2232			*count = inode->i_sb->s_maxbytes - *pos;
2233	} else {
2234#ifdef CONFIG_BLOCK
2235		loff_t isize;
2236		if (bdev_read_only(I_BDEV(inode)))
2237			return -EPERM;
2238		isize = i_size_read(inode);
2239		if (*pos >= isize) {
2240			if (*count || *pos > isize)
2241				return -ENOSPC;
2242		}
2243
2244		if (*pos + *count > isize)
2245			*count = isize - *pos;
2246#else
2247		return -EPERM;
2248#endif
2249	}
2250	return 0;
2251}
2252EXPORT_SYMBOL(generic_write_checks);
2253
2254int pagecache_write_begin(struct file *file, struct address_space *mapping,
2255				loff_t pos, unsigned len, unsigned flags,
2256				struct page **pagep, void **fsdata)
2257{
2258	const struct address_space_operations *aops = mapping->a_ops;
2259
2260	return aops->write_begin(file, mapping, pos, len, flags,
2261							pagep, fsdata);
2262}
2263EXPORT_SYMBOL(pagecache_write_begin);
2264
2265int pagecache_write_end(struct file *file, struct address_space *mapping,
2266				loff_t pos, unsigned len, unsigned copied,
2267				struct page *page, void *fsdata)
2268{
2269	const struct address_space_operations *aops = mapping->a_ops;
2270
2271	mark_page_accessed(page);
2272	return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2273}
2274EXPORT_SYMBOL(pagecache_write_end);
2275
2276ssize_t
2277generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2278		unsigned long *nr_segs, loff_t pos, loff_t *ppos,
2279		size_t count, size_t ocount)
2280{
2281	struct file	*file = iocb->ki_filp;
2282	struct address_space *mapping = file->f_mapping;
2283	struct inode	*inode = mapping->host;
2284	ssize_t		written;
2285	size_t		write_len;
2286	pgoff_t		end;
2287
2288	if (count != ocount)
2289		*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2290
2291	write_len = iov_length(iov, *nr_segs);
2292	end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2293
2294	written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2295	if (written)
2296		goto out;
2297
2298	/*
2299	 * After a write we want buffered reads to be sure to go to disk to get
2300	 * the new data.  We invalidate clean cached page from the region we're
2301	 * about to write.  We do this *before* the write so that we can return
2302	 * without clobbering -EIOCBQUEUED from ->direct_IO().
2303	 */
2304	if (mapping->nrpages) {
2305		written = invalidate_inode_pages2_range(mapping,
2306					pos >> PAGE_CACHE_SHIFT, end);
2307		/*
2308		 * If a page can not be invalidated, return 0 to fall back
2309		 * to buffered write.
2310		 */
2311		if (written) {
2312			if (written == -EBUSY)
2313				return 0;
2314			goto out;
2315		}
2316	}
2317
2318	written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2319
2320	/*
2321	 * Finally, try again to invalidate clean pages which might have been
2322	 * cached by non-direct readahead, or faulted in by get_user_pages()
2323	 * if the source of the write was an mmap'ed region of the file
2324	 * we're writing.  Either one is a pretty crazy thing to do,
2325	 * so we don't support it 100%.  If this invalidation
2326	 * fails, tough, the write still worked...
2327	 */
2328	if (mapping->nrpages) {
2329		invalidate_inode_pages2_range(mapping,
2330					      pos >> PAGE_CACHE_SHIFT, end);
2331	}
2332
2333	if (written > 0) {
2334		pos += written;
2335		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2336			i_size_write(inode, pos);
2337			mark_inode_dirty(inode);
2338		}
2339		*ppos = pos;
2340	}
2341out:
2342	return written;
2343}
2344EXPORT_SYMBOL(generic_file_direct_write);
2345
2346/*
2347 * Find or create a page at the given pagecache position. Return the locked
2348 * page. This function is specifically for buffered writes.
2349 */
2350struct page *grab_cache_page_write_begin(struct address_space *mapping,
2351					pgoff_t index, unsigned flags)
2352{
2353	int status;
 
2354	struct page *page;
2355	gfp_t gfp_notmask = 0;
 
 
 
 
2356	if (flags & AOP_FLAG_NOFS)
2357		gfp_notmask = __GFP_FS;
2358repeat:
2359	page = find_lock_page(mapping, index);
2360	if (page)
2361		goto found;
2362
2363	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
2364	if (!page)
2365		return NULL;
2366	status = add_to_page_cache_lru(page, mapping, index,
2367						GFP_KERNEL & ~gfp_notmask);
2368	if (unlikely(status)) {
2369		page_cache_release(page);
2370		if (status == -EEXIST)
2371			goto repeat;
2372		return NULL;
2373	}
2374found:
2375	wait_on_page_writeback(page);
2376	return page;
2377}
2378EXPORT_SYMBOL(grab_cache_page_write_begin);
2379
2380static ssize_t generic_perform_write(struct file *file,
2381				struct iov_iter *i, loff_t pos)
2382{
2383	struct address_space *mapping = file->f_mapping;
2384	const struct address_space_operations *a_ops = mapping->a_ops;
2385	long status = 0;
2386	ssize_t written = 0;
2387	unsigned int flags = 0;
2388
2389	/*
2390	 * Copies from kernel address space cannot fail (NFSD is a big user).
2391	 */
2392	if (segment_eq(get_fs(), KERNEL_DS))
2393		flags |= AOP_FLAG_UNINTERRUPTIBLE;
2394
2395	do {
2396		struct page *page;
2397		unsigned long offset;	/* Offset into pagecache page */
2398		unsigned long bytes;	/* Bytes to write to page */
2399		size_t copied;		/* Bytes copied from user */
2400		void *fsdata;
2401
2402		offset = (pos & (PAGE_CACHE_SIZE - 1));
2403		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2404						iov_iter_count(i));
2405
2406again:
2407
2408		/*
2409		 * Bring in the user page that we will copy from _first_.
2410		 * Otherwise there's a nasty deadlock on copying from the
2411		 * same page as we're writing to, without it being marked
2412		 * up-to-date.
2413		 *
2414		 * Not only is this an optimisation, but it is also required
2415		 * to check that the address is actually valid, when atomic
2416		 * usercopies are used, below.
2417		 */
2418		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2419			status = -EFAULT;
2420			break;
2421		}
2422
2423		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2424						&page, &fsdata);
2425		if (unlikely(status))
2426			break;
2427
2428		if (mapping_writably_mapped(mapping))
2429			flush_dcache_page(page);
2430
2431		pagefault_disable();
2432		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2433		pagefault_enable();
2434		flush_dcache_page(page);
2435
2436		mark_page_accessed(page);
2437		status = a_ops->write_end(file, mapping, pos, bytes, copied,
2438						page, fsdata);
2439		if (unlikely(status < 0))
2440			break;
2441		copied = status;
2442
2443		cond_resched();
2444
2445		iov_iter_advance(i, copied);
2446		if (unlikely(copied == 0)) {
2447			/*
2448			 * If we were unable to copy any data at all, we must
2449			 * fall back to a single segment length write.
2450			 *
2451			 * If we didn't fallback here, we could livelock
2452			 * because not all segments in the iov can be copied at
2453			 * once without a pagefault.
2454			 */
2455			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2456						iov_iter_single_seg_count(i));
2457			goto again;
2458		}
2459		pos += copied;
2460		written += copied;
2461
2462		balance_dirty_pages_ratelimited(mapping);
2463
 
 
 
2464	} while (iov_iter_count(i));
2465
2466	return written ? written : status;
2467}
2468
2469ssize_t
2470generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2471		unsigned long nr_segs, loff_t pos, loff_t *ppos,
2472		size_t count, ssize_t written)
2473{
2474	struct file *file = iocb->ki_filp;
2475	ssize_t status;
2476	struct iov_iter i;
2477
2478	iov_iter_init(&i, iov, nr_segs, count, written);
2479	status = generic_perform_write(file, &i, pos);
2480
2481	if (likely(status >= 0)) {
2482		written += status;
2483		*ppos = pos + status;
2484  	}
2485	
2486	return written ? written : status;
2487}
2488EXPORT_SYMBOL(generic_file_buffered_write);
2489
2490/**
2491 * __generic_file_aio_write - write data to a file
2492 * @iocb:	IO state structure (file, offset, etc.)
2493 * @iov:	vector with data to write
2494 * @nr_segs:	number of segments in the vector
2495 * @ppos:	position where to write
2496 *
2497 * This function does all the work needed for actually writing data to a
2498 * file. It does all basic checks, removes SUID from the file, updates
2499 * modification times and calls proper subroutines depending on whether we
2500 * do direct IO or a standard buffered write.
2501 *
2502 * It expects i_mutex to be grabbed unless we work on a block device or similar
2503 * object which does not need locking at all.
2504 *
2505 * This function does *not* take care of syncing data in case of O_SYNC write.
2506 * A caller has to handle it. This is mainly due to the fact that we want to
2507 * avoid syncing under i_mutex.
2508 */
2509ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2510				 unsigned long nr_segs, loff_t *ppos)
2511{
2512	struct file *file = iocb->ki_filp;
2513	struct address_space * mapping = file->f_mapping;
2514	size_t ocount;		/* original count */
2515	size_t count;		/* after file limit checks */
2516	struct inode 	*inode = mapping->host;
2517	loff_t		pos;
2518	ssize_t		written;
2519	ssize_t		err;
2520
2521	ocount = 0;
2522	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2523	if (err)
2524		return err;
2525
2526	count = ocount;
2527	pos = *ppos;
2528
2529	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2530
2531	/* We can write back this queue in page reclaim */
2532	current->backing_dev_info = mapping->backing_dev_info;
2533	written = 0;
2534
2535	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2536	if (err)
2537		goto out;
2538
2539	if (count == 0)
2540		goto out;
2541
2542	err = file_remove_suid(file);
2543	if (err)
2544		goto out;
2545
2546	file_update_time(file);
 
 
2547
2548	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2549	if (unlikely(file->f_flags & O_DIRECT)) {
2550		loff_t endbyte;
2551		ssize_t written_buffered;
2552
2553		written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2554							ppos, count, ocount);
2555		if (written < 0 || written == count)
2556			goto out;
2557		/*
2558		 * direct-io write to a hole: fall through to buffered I/O
2559		 * for completing the rest of the request.
2560		 */
2561		pos += written;
2562		count -= written;
2563		written_buffered = generic_file_buffered_write(iocb, iov,
2564						nr_segs, pos, ppos, count,
2565						written);
2566		/*
2567		 * If generic_file_buffered_write() retuned a synchronous error
2568		 * then we want to return the number of bytes which were
2569		 * direct-written, or the error code if that was zero.  Note
2570		 * that this differs from normal direct-io semantics, which
2571		 * will return -EFOO even if some bytes were written.
2572		 */
2573		if (written_buffered < 0) {
2574			err = written_buffered;
2575			goto out;
2576		}
2577
2578		/*
2579		 * We need to ensure that the page cache pages are written to
2580		 * disk and invalidated to preserve the expected O_DIRECT
2581		 * semantics.
2582		 */
2583		endbyte = pos + written_buffered - written - 1;
2584		err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2585		if (err == 0) {
2586			written = written_buffered;
2587			invalidate_mapping_pages(mapping,
2588						 pos >> PAGE_CACHE_SHIFT,
2589						 endbyte >> PAGE_CACHE_SHIFT);
2590		} else {
2591			/*
2592			 * We don't know how much we wrote, so just return
2593			 * the number of bytes which were direct-written
2594			 */
2595		}
2596	} else {
2597		written = generic_file_buffered_write(iocb, iov, nr_segs,
2598				pos, ppos, count, written);
2599	}
2600out:
2601	current->backing_dev_info = NULL;
2602	return written ? written : err;
2603}
2604EXPORT_SYMBOL(__generic_file_aio_write);
2605
2606/**
2607 * generic_file_aio_write - write data to a file
2608 * @iocb:	IO state structure
2609 * @iov:	vector with data to write
2610 * @nr_segs:	number of segments in the vector
2611 * @pos:	position in file where to write
2612 *
2613 * This is a wrapper around __generic_file_aio_write() to be used by most
2614 * filesystems. It takes care of syncing the file in case of O_SYNC file
2615 * and acquires i_mutex as needed.
2616 */
2617ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2618		unsigned long nr_segs, loff_t pos)
2619{
2620	struct file *file = iocb->ki_filp;
2621	struct inode *inode = file->f_mapping->host;
2622	struct blk_plug plug;
2623	ssize_t ret;
2624
2625	BUG_ON(iocb->ki_pos != pos);
2626
2627	mutex_lock(&inode->i_mutex);
2628	blk_start_plug(&plug);
2629	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2630	mutex_unlock(&inode->i_mutex);
2631
2632	if (ret > 0 || ret == -EIOCBQUEUED) {
2633		ssize_t err;
2634
2635		err = generic_write_sync(file, pos, ret);
2636		if (err < 0 && ret > 0)
2637			ret = err;
2638	}
2639	blk_finish_plug(&plug);
2640	return ret;
2641}
2642EXPORT_SYMBOL(generic_file_aio_write);
2643
2644/**
2645 * try_to_release_page() - release old fs-specific metadata on a page
2646 *
2647 * @page: the page which the kernel is trying to free
2648 * @gfp_mask: memory allocation flags (and I/O mode)
2649 *
2650 * The address_space is to try to release any data against the page
2651 * (presumably at page->private).  If the release was successful, return `1'.
2652 * Otherwise return zero.
2653 *
2654 * This may also be called if PG_fscache is set on a page, indicating that the
2655 * page is known to the local caching routines.
2656 *
2657 * The @gfp_mask argument specifies whether I/O may be performed to release
2658 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2659 *
2660 */
2661int try_to_release_page(struct page *page, gfp_t gfp_mask)
2662{
2663	struct address_space * const mapping = page->mapping;
2664
2665	BUG_ON(!PageLocked(page));
2666	if (PageWriteback(page))
2667		return 0;
2668
2669	if (mapping && mapping->a_ops->releasepage)
2670		return mapping->a_ops->releasepage(page, gfp_mask);
2671	return try_to_free_buffers(page);
2672}
2673
2674EXPORT_SYMBOL(try_to_release_page);
v3.5.6
   1/*
   2 *	linux/mm/filemap.c
   3 *
   4 * Copyright (C) 1994-1999  Linus Torvalds
   5 */
   6
   7/*
   8 * This file handles the generic file mmap semantics used by
   9 * most "normal" filesystems (but you don't /have/ to use this:
  10 * the NFS filesystem used to do this differently, for example)
  11 */
  12#include <linux/export.h>
  13#include <linux/compiler.h>
  14#include <linux/fs.h>
  15#include <linux/uaccess.h>
  16#include <linux/aio.h>
  17#include <linux/capability.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/gfp.h>
  20#include <linux/mm.h>
  21#include <linux/swap.h>
  22#include <linux/mman.h>
  23#include <linux/pagemap.h>
  24#include <linux/file.h>
  25#include <linux/uio.h>
  26#include <linux/hash.h>
  27#include <linux/writeback.h>
  28#include <linux/backing-dev.h>
  29#include <linux/pagevec.h>
  30#include <linux/blkdev.h>
  31#include <linux/security.h>
 
  32#include <linux/cpuset.h>
  33#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
  34#include <linux/memcontrol.h>
  35#include <linux/cleancache.h>
  36#include "internal.h"
  37
  38/*
  39 * FIXME: remove all knowledge of the buffer layer from the core VM
  40 */
  41#include <linux/buffer_head.h> /* for try_to_free_buffers */
  42
  43#include <asm/mman.h>
  44
  45/*
  46 * Shared mappings implemented 30.11.1994. It's not fully working yet,
  47 * though.
  48 *
  49 * Shared mappings now work. 15.8.1995  Bruno.
  50 *
  51 * finished 'unifying' the page and buffer cache and SMP-threaded the
  52 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
  53 *
  54 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
  55 */
  56
  57/*
  58 * Lock ordering:
  59 *
  60 *  ->i_mmap_mutex		(truncate_pagecache)
  61 *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
  62 *      ->swap_lock		(exclusive_swap_page, others)
  63 *        ->mapping->tree_lock
  64 *
  65 *  ->i_mutex
  66 *    ->i_mmap_mutex		(truncate->unmap_mapping_range)
  67 *
  68 *  ->mmap_sem
  69 *    ->i_mmap_mutex
  70 *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
  71 *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock)
  72 *
  73 *  ->mmap_sem
  74 *    ->lock_page		(access_process_vm)
  75 *
  76 *  ->i_mutex			(generic_file_buffered_write)
  77 *    ->mmap_sem		(fault_in_pages_readable->do_page_fault)
  78 *
  79 *  bdi->wb.list_lock
  80 *    sb_lock			(fs/fs-writeback.c)
  81 *    ->mapping->tree_lock	(__sync_single_inode)
  82 *
  83 *  ->i_mmap_mutex
  84 *    ->anon_vma.lock		(vma_adjust)
  85 *
  86 *  ->anon_vma.lock
  87 *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
  88 *
  89 *  ->page_table_lock or pte_lock
  90 *    ->swap_lock		(try_to_unmap_one)
  91 *    ->private_lock		(try_to_unmap_one)
  92 *    ->tree_lock		(try_to_unmap_one)
  93 *    ->zone.lru_lock		(follow_page->mark_page_accessed)
  94 *    ->zone.lru_lock		(check_pte_range->isolate_lru_page)
  95 *    ->private_lock		(page_remove_rmap->set_page_dirty)
  96 *    ->tree_lock		(page_remove_rmap->set_page_dirty)
  97 *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
  98 *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
  99 *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
 100 *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
 101 *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
 102 *
 103 * ->i_mmap_mutex
 104 *   ->tasklist_lock            (memory_failure, collect_procs_ao)
 
 105 */
 106
 107/*
 108 * Delete a page from the page cache and free it. Caller has to make
 109 * sure the page is locked and that nobody else uses it - or that usage
 110 * is safe.  The caller must hold the mapping's tree_lock.
 111 */
 112void __delete_from_page_cache(struct page *page)
 113{
 114	struct address_space *mapping = page->mapping;
 115
 116	/*
 117	 * if we're uptodate, flush out into the cleancache, otherwise
 118	 * invalidate any existing cleancache entries.  We can't leave
 119	 * stale data around in the cleancache once our page is gone
 120	 */
 121	if (PageUptodate(page) && PageMappedToDisk(page))
 122		cleancache_put_page(page);
 123	else
 124		cleancache_invalidate_page(mapping, page);
 125
 126	radix_tree_delete(&mapping->page_tree, page->index);
 127	page->mapping = NULL;
 128	/* Leave page->index set: truncation lookup relies upon it */
 129	mapping->nrpages--;
 130	__dec_zone_page_state(page, NR_FILE_PAGES);
 131	if (PageSwapBacked(page))
 132		__dec_zone_page_state(page, NR_SHMEM);
 133	BUG_ON(page_mapped(page));
 134
 135	/*
 136	 * Some filesystems seem to re-dirty the page even after
 137	 * the VM has canceled the dirty bit (eg ext3 journaling).
 138	 *
 139	 * Fix it up by doing a final dirty accounting check after
 140	 * having removed the page entirely.
 141	 */
 142	if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
 143		dec_zone_page_state(page, NR_FILE_DIRTY);
 144		dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
 145	}
 146}
 147
 148/**
 149 * delete_from_page_cache - delete page from page cache
 150 * @page: the page which the kernel is trying to remove from page cache
 151 *
 152 * This must be called only on pages that have been verified to be in the page
 153 * cache and locked.  It will never put the page into the free list, the caller
 154 * has a reference on the page.
 155 */
 156void delete_from_page_cache(struct page *page)
 157{
 158	struct address_space *mapping = page->mapping;
 159	void (*freepage)(struct page *);
 160
 161	BUG_ON(!PageLocked(page));
 162
 163	freepage = mapping->a_ops->freepage;
 164	spin_lock_irq(&mapping->tree_lock);
 165	__delete_from_page_cache(page);
 166	spin_unlock_irq(&mapping->tree_lock);
 167	mem_cgroup_uncharge_cache_page(page);
 168
 169	if (freepage)
 170		freepage(page);
 171	page_cache_release(page);
 172}
 173EXPORT_SYMBOL(delete_from_page_cache);
 174
 175static int sleep_on_page(void *word)
 176{
 177	io_schedule();
 178	return 0;
 179}
 180
 181static int sleep_on_page_killable(void *word)
 182{
 183	sleep_on_page(word);
 184	return fatal_signal_pending(current) ? -EINTR : 0;
 185}
 186
 187/**
 188 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
 189 * @mapping:	address space structure to write
 190 * @start:	offset in bytes where the range starts
 191 * @end:	offset in bytes where the range ends (inclusive)
 192 * @sync_mode:	enable synchronous operation
 193 *
 194 * Start writeback against all of a mapping's dirty pages that lie
 195 * within the byte offsets <start, end> inclusive.
 196 *
 197 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
 198 * opposed to a regular memory cleansing writeback.  The difference between
 199 * these two operations is that if a dirty page/buffer is encountered, it must
 200 * be waited upon, and not just skipped over.
 201 */
 202int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 203				loff_t end, int sync_mode)
 204{
 205	int ret;
 206	struct writeback_control wbc = {
 207		.sync_mode = sync_mode,
 208		.nr_to_write = LONG_MAX,
 209		.range_start = start,
 210		.range_end = end,
 211	};
 212
 213	if (!mapping_cap_writeback_dirty(mapping))
 214		return 0;
 215
 216	ret = do_writepages(mapping, &wbc);
 217	return ret;
 218}
 219
 220static inline int __filemap_fdatawrite(struct address_space *mapping,
 221	int sync_mode)
 222{
 223	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
 224}
 225
 226int filemap_fdatawrite(struct address_space *mapping)
 227{
 228	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
 229}
 230EXPORT_SYMBOL(filemap_fdatawrite);
 231
 232int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
 233				loff_t end)
 234{
 235	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
 236}
 237EXPORT_SYMBOL(filemap_fdatawrite_range);
 238
 239/**
 240 * filemap_flush - mostly a non-blocking flush
 241 * @mapping:	target address_space
 242 *
 243 * This is a mostly non-blocking flush.  Not suitable for data-integrity
 244 * purposes - I/O may not be started against all dirty pages.
 245 */
 246int filemap_flush(struct address_space *mapping)
 247{
 248	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
 249}
 250EXPORT_SYMBOL(filemap_flush);
 251
 252/**
 253 * filemap_fdatawait_range - wait for writeback to complete
 254 * @mapping:		address space structure to wait for
 255 * @start_byte:		offset in bytes where the range starts
 256 * @end_byte:		offset in bytes where the range ends (inclusive)
 257 *
 258 * Walk the list of under-writeback pages of the given address space
 259 * in the given range and wait for all of them.
 260 */
 261int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
 262			    loff_t end_byte)
 263{
 264	pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
 265	pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
 266	struct pagevec pvec;
 267	int nr_pages;
 268	int ret = 0;
 269
 270	if (end_byte < start_byte)
 271		return 0;
 272
 273	pagevec_init(&pvec, 0);
 274	while ((index <= end) &&
 275			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
 276			PAGECACHE_TAG_WRITEBACK,
 277			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
 278		unsigned i;
 279
 280		for (i = 0; i < nr_pages; i++) {
 281			struct page *page = pvec.pages[i];
 282
 283			/* until radix tree lookup accepts end_index */
 284			if (page->index > end)
 285				continue;
 286
 287			wait_on_page_writeback(page);
 288			if (TestClearPageError(page))
 289				ret = -EIO;
 290		}
 291		pagevec_release(&pvec);
 292		cond_resched();
 293	}
 294
 295	/* Check for outstanding write errors */
 296	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
 297		ret = -ENOSPC;
 298	if (test_and_clear_bit(AS_EIO, &mapping->flags))
 299		ret = -EIO;
 300
 301	return ret;
 302}
 303EXPORT_SYMBOL(filemap_fdatawait_range);
 304
 305/**
 306 * filemap_fdatawait - wait for all under-writeback pages to complete
 307 * @mapping: address space structure to wait for
 308 *
 309 * Walk the list of under-writeback pages of the given address space
 310 * and wait for all of them.
 311 */
 312int filemap_fdatawait(struct address_space *mapping)
 313{
 314	loff_t i_size = i_size_read(mapping->host);
 315
 316	if (i_size == 0)
 317		return 0;
 318
 319	return filemap_fdatawait_range(mapping, 0, i_size - 1);
 320}
 321EXPORT_SYMBOL(filemap_fdatawait);
 322
 323int filemap_write_and_wait(struct address_space *mapping)
 324{
 325	int err = 0;
 326
 327	if (mapping->nrpages) {
 328		err = filemap_fdatawrite(mapping);
 329		/*
 330		 * Even if the above returned error, the pages may be
 331		 * written partially (e.g. -ENOSPC), so we wait for it.
 332		 * But the -EIO is special case, it may indicate the worst
 333		 * thing (e.g. bug) happened, so we avoid waiting for it.
 334		 */
 335		if (err != -EIO) {
 336			int err2 = filemap_fdatawait(mapping);
 337			if (!err)
 338				err = err2;
 339		}
 340	}
 341	return err;
 342}
 343EXPORT_SYMBOL(filemap_write_and_wait);
 344
 345/**
 346 * filemap_write_and_wait_range - write out & wait on a file range
 347 * @mapping:	the address_space for the pages
 348 * @lstart:	offset in bytes where the range starts
 349 * @lend:	offset in bytes where the range ends (inclusive)
 350 *
 351 * Write out and wait upon file offsets lstart->lend, inclusive.
 352 *
 353 * Note that `lend' is inclusive (describes the last byte to be written) so
 354 * that this function can be used to write to the very end-of-file (end = -1).
 355 */
 356int filemap_write_and_wait_range(struct address_space *mapping,
 357				 loff_t lstart, loff_t lend)
 358{
 359	int err = 0;
 360
 361	if (mapping->nrpages) {
 362		err = __filemap_fdatawrite_range(mapping, lstart, lend,
 363						 WB_SYNC_ALL);
 364		/* See comment of filemap_write_and_wait() */
 365		if (err != -EIO) {
 366			int err2 = filemap_fdatawait_range(mapping,
 367						lstart, lend);
 368			if (!err)
 369				err = err2;
 370		}
 371	}
 372	return err;
 373}
 374EXPORT_SYMBOL(filemap_write_and_wait_range);
 375
 376/**
 377 * replace_page_cache_page - replace a pagecache page with a new one
 378 * @old:	page to be replaced
 379 * @new:	page to replace with
 380 * @gfp_mask:	allocation mode
 381 *
 382 * This function replaces a page in the pagecache with a new one.  On
 383 * success it acquires the pagecache reference for the new page and
 384 * drops it for the old page.  Both the old and new pages must be
 385 * locked.  This function does not add the new page to the LRU, the
 386 * caller must do that.
 387 *
 388 * The remove + add is atomic.  The only way this function can fail is
 389 * memory allocation failure.
 390 */
 391int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 392{
 393	int error;
 
 394
 395	VM_BUG_ON(!PageLocked(old));
 396	VM_BUG_ON(!PageLocked(new));
 397	VM_BUG_ON(new->mapping);
 398
 
 
 
 
 
 
 
 
 
 
 
 
 399	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
 400	if (!error) {
 401		struct address_space *mapping = old->mapping;
 402		void (*freepage)(struct page *);
 403
 404		pgoff_t offset = old->index;
 405		freepage = mapping->a_ops->freepage;
 406
 407		page_cache_get(new);
 408		new->mapping = mapping;
 409		new->index = offset;
 410
 411		spin_lock_irq(&mapping->tree_lock);
 412		__delete_from_page_cache(old);
 413		error = radix_tree_insert(&mapping->page_tree, offset, new);
 414		BUG_ON(error);
 415		mapping->nrpages++;
 416		__inc_zone_page_state(new, NR_FILE_PAGES);
 417		if (PageSwapBacked(new))
 418			__inc_zone_page_state(new, NR_SHMEM);
 419		spin_unlock_irq(&mapping->tree_lock);
 420		/* mem_cgroup codes must not be called under tree_lock */
 421		mem_cgroup_replace_page_cache(old, new);
 422		radix_tree_preload_end();
 423		if (freepage)
 424			freepage(old);
 425		page_cache_release(old);
 
 
 
 426	}
 427
 428	return error;
 429}
 430EXPORT_SYMBOL_GPL(replace_page_cache_page);
 431
 432/**
 433 * add_to_page_cache_locked - add a locked page to the pagecache
 434 * @page:	page to add
 435 * @mapping:	the page's address_space
 436 * @offset:	page index
 437 * @gfp_mask:	page allocation mode
 438 *
 439 * This function is used to add a page to the pagecache. It must be locked.
 440 * This function does not add the page to the LRU.  The caller must do that.
 441 */
 442int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 443		pgoff_t offset, gfp_t gfp_mask)
 444{
 445	int error;
 446
 447	VM_BUG_ON(!PageLocked(page));
 448	VM_BUG_ON(PageSwapBacked(page));
 449
 450	error = mem_cgroup_cache_charge(page, current->mm,
 451					gfp_mask & GFP_RECLAIM_MASK);
 452	if (error)
 453		goto out;
 454
 455	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
 456	if (error == 0) {
 457		page_cache_get(page);
 458		page->mapping = mapping;
 459		page->index = offset;
 460
 461		spin_lock_irq(&mapping->tree_lock);
 462		error = radix_tree_insert(&mapping->page_tree, offset, page);
 463		if (likely(!error)) {
 464			mapping->nrpages++;
 465			__inc_zone_page_state(page, NR_FILE_PAGES);
 466			spin_unlock_irq(&mapping->tree_lock);
 467		} else {
 468			page->mapping = NULL;
 469			/* Leave page->index set: truncation relies upon it */
 470			spin_unlock_irq(&mapping->tree_lock);
 471			mem_cgroup_uncharge_cache_page(page);
 472			page_cache_release(page);
 473		}
 474		radix_tree_preload_end();
 475	} else
 476		mem_cgroup_uncharge_cache_page(page);
 477out:
 478	return error;
 479}
 480EXPORT_SYMBOL(add_to_page_cache_locked);
 481
 482int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 483				pgoff_t offset, gfp_t gfp_mask)
 484{
 485	int ret;
 486
 487	ret = add_to_page_cache(page, mapping, offset, gfp_mask);
 488	if (ret == 0)
 489		lru_cache_add_file(page);
 490	return ret;
 491}
 492EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
 493
 494#ifdef CONFIG_NUMA
 495struct page *__page_cache_alloc(gfp_t gfp)
 496{
 497	int n;
 498	struct page *page;
 499
 500	if (cpuset_do_page_mem_spread()) {
 501		unsigned int cpuset_mems_cookie;
 502		do {
 503			cpuset_mems_cookie = get_mems_allowed();
 504			n = cpuset_mem_spread_node();
 505			page = alloc_pages_exact_node(n, gfp, 0);
 506		} while (!put_mems_allowed(cpuset_mems_cookie) && !page);
 507
 508		return page;
 509	}
 510	return alloc_pages(gfp, 0);
 511}
 512EXPORT_SYMBOL(__page_cache_alloc);
 513#endif
 514
 515/*
 516 * In order to wait for pages to become available there must be
 517 * waitqueues associated with pages. By using a hash table of
 518 * waitqueues where the bucket discipline is to maintain all
 519 * waiters on the same queue and wake all when any of the pages
 520 * become available, and for the woken contexts to check to be
 521 * sure the appropriate page became available, this saves space
 522 * at a cost of "thundering herd" phenomena during rare hash
 523 * collisions.
 524 */
 525static wait_queue_head_t *page_waitqueue(struct page *page)
 526{
 527	const struct zone *zone = page_zone(page);
 528
 529	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
 530}
 531
 532static inline void wake_up_page(struct page *page, int bit)
 533{
 534	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
 535}
 536
 537void wait_on_page_bit(struct page *page, int bit_nr)
 538{
 539	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
 540
 541	if (test_bit(bit_nr, &page->flags))
 542		__wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
 543							TASK_UNINTERRUPTIBLE);
 544}
 545EXPORT_SYMBOL(wait_on_page_bit);
 546
 547int wait_on_page_bit_killable(struct page *page, int bit_nr)
 548{
 549	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
 550
 551	if (!test_bit(bit_nr, &page->flags))
 552		return 0;
 553
 554	return __wait_on_bit(page_waitqueue(page), &wait,
 555			     sleep_on_page_killable, TASK_KILLABLE);
 556}
 557
 558/**
 559 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
 560 * @page: Page defining the wait queue of interest
 561 * @waiter: Waiter to add to the queue
 562 *
 563 * Add an arbitrary @waiter to the wait queue for the nominated @page.
 564 */
 565void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
 566{
 567	wait_queue_head_t *q = page_waitqueue(page);
 568	unsigned long flags;
 569
 570	spin_lock_irqsave(&q->lock, flags);
 571	__add_wait_queue(q, waiter);
 572	spin_unlock_irqrestore(&q->lock, flags);
 573}
 574EXPORT_SYMBOL_GPL(add_page_wait_queue);
 575
 576/**
 577 * unlock_page - unlock a locked page
 578 * @page: the page
 579 *
 580 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
 581 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
 582 * mechananism between PageLocked pages and PageWriteback pages is shared.
 583 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
 584 *
 585 * The mb is necessary to enforce ordering between the clear_bit and the read
 586 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
 587 */
 588void unlock_page(struct page *page)
 589{
 590	VM_BUG_ON(!PageLocked(page));
 591	clear_bit_unlock(PG_locked, &page->flags);
 592	smp_mb__after_clear_bit();
 593	wake_up_page(page, PG_locked);
 594}
 595EXPORT_SYMBOL(unlock_page);
 596
 597/**
 598 * end_page_writeback - end writeback against a page
 599 * @page: the page
 600 */
 601void end_page_writeback(struct page *page)
 602{
 603	if (TestClearPageReclaim(page))
 604		rotate_reclaimable_page(page);
 605
 606	if (!test_clear_page_writeback(page))
 607		BUG();
 608
 609	smp_mb__after_clear_bit();
 610	wake_up_page(page, PG_writeback);
 611}
 612EXPORT_SYMBOL(end_page_writeback);
 613
 614/**
 615 * __lock_page - get a lock on the page, assuming we need to sleep to get it
 616 * @page: the page to lock
 617 */
 618void __lock_page(struct page *page)
 619{
 620	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 621
 622	__wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
 623							TASK_UNINTERRUPTIBLE);
 624}
 625EXPORT_SYMBOL(__lock_page);
 626
 627int __lock_page_killable(struct page *page)
 628{
 629	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 630
 631	return __wait_on_bit_lock(page_waitqueue(page), &wait,
 632					sleep_on_page_killable, TASK_KILLABLE);
 633}
 634EXPORT_SYMBOL_GPL(__lock_page_killable);
 635
 636int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 637			 unsigned int flags)
 638{
 639	if (flags & FAULT_FLAG_ALLOW_RETRY) {
 640		/*
 641		 * CAUTION! In this case, mmap_sem is not released
 642		 * even though return 0.
 643		 */
 644		if (flags & FAULT_FLAG_RETRY_NOWAIT)
 645			return 0;
 646
 647		up_read(&mm->mmap_sem);
 648		if (flags & FAULT_FLAG_KILLABLE)
 649			wait_on_page_locked_killable(page);
 650		else
 651			wait_on_page_locked(page);
 652		return 0;
 653	} else {
 654		if (flags & FAULT_FLAG_KILLABLE) {
 655			int ret;
 656
 657			ret = __lock_page_killable(page);
 658			if (ret) {
 659				up_read(&mm->mmap_sem);
 660				return 0;
 661			}
 662		} else
 663			__lock_page(page);
 664		return 1;
 665	}
 666}
 667
 668/**
 669 * find_get_page - find and get a page reference
 670 * @mapping: the address_space to search
 671 * @offset: the page index
 672 *
 673 * Is there a pagecache struct page at the given (mapping, offset) tuple?
 674 * If yes, increment its refcount and return it; if no, return NULL.
 675 */
 676struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
 677{
 678	void **pagep;
 679	struct page *page;
 680
 681	rcu_read_lock();
 682repeat:
 683	page = NULL;
 684	pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
 685	if (pagep) {
 686		page = radix_tree_deref_slot(pagep);
 687		if (unlikely(!page))
 688			goto out;
 689		if (radix_tree_exception(page)) {
 690			if (radix_tree_deref_retry(page))
 691				goto repeat;
 692			/*
 693			 * Otherwise, shmem/tmpfs must be storing a swap entry
 694			 * here as an exceptional entry: so return it without
 695			 * attempting to raise page count.
 696			 */
 697			goto out;
 698		}
 699		if (!page_cache_get_speculative(page))
 700			goto repeat;
 701
 702		/*
 703		 * Has the page moved?
 704		 * This is part of the lockless pagecache protocol. See
 705		 * include/linux/pagemap.h for details.
 706		 */
 707		if (unlikely(page != *pagep)) {
 708			page_cache_release(page);
 709			goto repeat;
 710		}
 711	}
 712out:
 713	rcu_read_unlock();
 714
 715	return page;
 716}
 717EXPORT_SYMBOL(find_get_page);
 718
 719/**
 720 * find_lock_page - locate, pin and lock a pagecache page
 721 * @mapping: the address_space to search
 722 * @offset: the page index
 723 *
 724 * Locates the desired pagecache page, locks it, increments its reference
 725 * count and returns its address.
 726 *
 727 * Returns zero if the page was not present. find_lock_page() may sleep.
 728 */
 729struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
 730{
 731	struct page *page;
 732
 733repeat:
 734	page = find_get_page(mapping, offset);
 735	if (page && !radix_tree_exception(page)) {
 736		lock_page(page);
 737		/* Has the page been truncated? */
 738		if (unlikely(page->mapping != mapping)) {
 739			unlock_page(page);
 740			page_cache_release(page);
 741			goto repeat;
 742		}
 743		VM_BUG_ON(page->index != offset);
 744	}
 745	return page;
 746}
 747EXPORT_SYMBOL(find_lock_page);
 748
 749/**
 750 * find_or_create_page - locate or add a pagecache page
 751 * @mapping: the page's address_space
 752 * @index: the page's index into the mapping
 753 * @gfp_mask: page allocation mode
 754 *
 755 * Locates a page in the pagecache.  If the page is not present, a new page
 756 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
 757 * LRU list.  The returned page is locked and has its reference count
 758 * incremented.
 759 *
 760 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
 761 * allocation!
 762 *
 763 * find_or_create_page() returns the desired page's address, or zero on
 764 * memory exhaustion.
 765 */
 766struct page *find_or_create_page(struct address_space *mapping,
 767		pgoff_t index, gfp_t gfp_mask)
 768{
 769	struct page *page;
 770	int err;
 771repeat:
 772	page = find_lock_page(mapping, index);
 773	if (!page) {
 774		page = __page_cache_alloc(gfp_mask);
 775		if (!page)
 776			return NULL;
 777		/*
 778		 * We want a regular kernel memory (not highmem or DMA etc)
 779		 * allocation for the radix tree nodes, but we need to honour
 780		 * the context-specific requirements the caller has asked for.
 781		 * GFP_RECLAIM_MASK collects those requirements.
 782		 */
 783		err = add_to_page_cache_lru(page, mapping, index,
 784			(gfp_mask & GFP_RECLAIM_MASK));
 785		if (unlikely(err)) {
 786			page_cache_release(page);
 787			page = NULL;
 788			if (err == -EEXIST)
 789				goto repeat;
 790		}
 791	}
 792	return page;
 793}
 794EXPORT_SYMBOL(find_or_create_page);
 795
 796/**
 797 * find_get_pages - gang pagecache lookup
 798 * @mapping:	The address_space to search
 799 * @start:	The starting page index
 800 * @nr_pages:	The maximum number of pages
 801 * @pages:	Where the resulting pages are placed
 802 *
 803 * find_get_pages() will search for and return a group of up to
 804 * @nr_pages pages in the mapping.  The pages are placed at @pages.
 805 * find_get_pages() takes a reference against the returned pages.
 806 *
 807 * The search returns a group of mapping-contiguous pages with ascending
 808 * indexes.  There may be holes in the indices due to not-present pages.
 809 *
 810 * find_get_pages() returns the number of pages which were found.
 811 */
 812unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 813			    unsigned int nr_pages, struct page **pages)
 814{
 815	struct radix_tree_iter iter;
 816	void **slot;
 817	unsigned ret = 0;
 818
 819	if (unlikely(!nr_pages))
 820		return 0;
 821
 822	rcu_read_lock();
 823restart:
 824	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
 
 
 
 
 825		struct page *page;
 826repeat:
 827		page = radix_tree_deref_slot(slot);
 828		if (unlikely(!page))
 829			continue;
 830
 831		if (radix_tree_exception(page)) {
 832			if (radix_tree_deref_retry(page)) {
 833				/*
 834				 * Transient condition which can only trigger
 835				 * when entry at index 0 moves out of or back
 836				 * to root: none yet gotten, safe to restart.
 837				 */
 838				WARN_ON(iter.index);
 839				goto restart;
 840			}
 841			/*
 842			 * Otherwise, shmem/tmpfs must be storing a swap entry
 843			 * here as an exceptional entry: so skip over it -
 844			 * we only reach this from invalidate_mapping_pages().
 845			 */
 
 846			continue;
 847		}
 848
 849		if (!page_cache_get_speculative(page))
 850			goto repeat;
 851
 852		/* Has the page moved? */
 853		if (unlikely(page != *slot)) {
 854			page_cache_release(page);
 855			goto repeat;
 856		}
 857
 858		pages[ret] = page;
 859		if (++ret == nr_pages)
 860			break;
 861	}
 862
 
 
 
 
 
 
 863	rcu_read_unlock();
 864	return ret;
 865}
 866
 867/**
 868 * find_get_pages_contig - gang contiguous pagecache lookup
 869 * @mapping:	The address_space to search
 870 * @index:	The starting page index
 871 * @nr_pages:	The maximum number of pages
 872 * @pages:	Where the resulting pages are placed
 873 *
 874 * find_get_pages_contig() works exactly like find_get_pages(), except
 875 * that the returned number of pages are guaranteed to be contiguous.
 876 *
 877 * find_get_pages_contig() returns the number of pages which were found.
 878 */
 879unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
 880			       unsigned int nr_pages, struct page **pages)
 881{
 882	struct radix_tree_iter iter;
 883	void **slot;
 884	unsigned int ret = 0;
 885
 886	if (unlikely(!nr_pages))
 887		return 0;
 888
 889	rcu_read_lock();
 890restart:
 891	radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
 
 
 
 892		struct page *page;
 893repeat:
 894		page = radix_tree_deref_slot(slot);
 895		/* The hole, there no reason to continue */
 896		if (unlikely(!page))
 897			break;
 898
 899		if (radix_tree_exception(page)) {
 900			if (radix_tree_deref_retry(page)) {
 901				/*
 902				 * Transient condition which can only trigger
 903				 * when entry at index 0 moves out of or back
 904				 * to root: none yet gotten, safe to restart.
 905				 */
 906				goto restart;
 907			}
 908			/*
 909			 * Otherwise, shmem/tmpfs must be storing a swap entry
 910			 * here as an exceptional entry: so stop looking for
 911			 * contiguous pages.
 912			 */
 913			break;
 914		}
 915
 916		if (!page_cache_get_speculative(page))
 917			goto repeat;
 918
 919		/* Has the page moved? */
 920		if (unlikely(page != *slot)) {
 921			page_cache_release(page);
 922			goto repeat;
 923		}
 924
 925		/*
 926		 * must check mapping and index after taking the ref.
 927		 * otherwise we can get both false positives and false
 928		 * negatives, which is just confusing to the caller.
 929		 */
 930		if (page->mapping == NULL || page->index != iter.index) {
 931			page_cache_release(page);
 932			break;
 933		}
 934
 935		pages[ret] = page;
 936		if (++ret == nr_pages)
 937			break;
 938	}
 939	rcu_read_unlock();
 940	return ret;
 941}
 942EXPORT_SYMBOL(find_get_pages_contig);
 943
 944/**
 945 * find_get_pages_tag - find and return pages that match @tag
 946 * @mapping:	the address_space to search
 947 * @index:	the starting page index
 948 * @tag:	the tag index
 949 * @nr_pages:	the maximum number of pages
 950 * @pages:	where the resulting pages are placed
 951 *
 952 * Like find_get_pages, except we only return pages which are tagged with
 953 * @tag.   We update @index to index the next page for the traversal.
 954 */
 955unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 956			int tag, unsigned int nr_pages, struct page **pages)
 957{
 958	struct radix_tree_iter iter;
 959	void **slot;
 960	unsigned ret = 0;
 961
 962	if (unlikely(!nr_pages))
 963		return 0;
 964
 965	rcu_read_lock();
 966restart:
 967	radix_tree_for_each_tagged(slot, &mapping->page_tree,
 968				   &iter, *index, tag) {
 
 
 969		struct page *page;
 970repeat:
 971		page = radix_tree_deref_slot(slot);
 972		if (unlikely(!page))
 973			continue;
 974
 975		if (radix_tree_exception(page)) {
 976			if (radix_tree_deref_retry(page)) {
 977				/*
 978				 * Transient condition which can only trigger
 979				 * when entry at index 0 moves out of or back
 980				 * to root: none yet gotten, safe to restart.
 981				 */
 982				goto restart;
 983			}
 984			/*
 985			 * This function is never used on a shmem/tmpfs
 986			 * mapping, so a swap entry won't be found here.
 987			 */
 988			BUG();
 989		}
 990
 991		if (!page_cache_get_speculative(page))
 992			goto repeat;
 993
 994		/* Has the page moved? */
 995		if (unlikely(page != *slot)) {
 996			page_cache_release(page);
 997			goto repeat;
 998		}
 999
1000		pages[ret] = page;
1001		if (++ret == nr_pages)
1002			break;
1003	}
1004
 
 
 
 
 
 
1005	rcu_read_unlock();
1006
1007	if (ret)
1008		*index = pages[ret - 1]->index + 1;
1009
1010	return ret;
1011}
1012EXPORT_SYMBOL(find_get_pages_tag);
1013
1014/**
1015 * grab_cache_page_nowait - returns locked page at given index in given cache
1016 * @mapping: target address_space
1017 * @index: the page index
1018 *
1019 * Same as grab_cache_page(), but do not wait if the page is unavailable.
1020 * This is intended for speculative data generators, where the data can
1021 * be regenerated if the page couldn't be grabbed.  This routine should
1022 * be safe to call while holding the lock for another page.
1023 *
1024 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
1025 * and deadlock against the caller's locked page.
1026 */
1027struct page *
1028grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
1029{
1030	struct page *page = find_get_page(mapping, index);
1031
1032	if (page) {
1033		if (trylock_page(page))
1034			return page;
1035		page_cache_release(page);
1036		return NULL;
1037	}
1038	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
1039	if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
1040		page_cache_release(page);
1041		page = NULL;
1042	}
1043	return page;
1044}
1045EXPORT_SYMBOL(grab_cache_page_nowait);
1046
1047/*
1048 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1049 * a _large_ part of the i/o request. Imagine the worst scenario:
1050 *
1051 *      ---R__________________________________________B__________
1052 *         ^ reading here                             ^ bad block(assume 4k)
1053 *
1054 * read(R) => miss => readahead(R...B) => media error => frustrating retries
1055 * => failing the whole request => read(R) => read(R+1) =>
1056 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1057 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1058 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1059 *
1060 * It is going insane. Fix it by quickly scaling down the readahead size.
1061 */
1062static void shrink_readahead_size_eio(struct file *filp,
1063					struct file_ra_state *ra)
1064{
1065	ra->ra_pages /= 4;
1066}
1067
1068/**
1069 * do_generic_file_read - generic file read routine
1070 * @filp:	the file to read
1071 * @ppos:	current file position
1072 * @desc:	read_descriptor
1073 * @actor:	read method
1074 *
1075 * This is a generic file read routine, and uses the
1076 * mapping->a_ops->readpage() function for the actual low-level stuff.
1077 *
1078 * This is really ugly. But the goto's actually try to clarify some
1079 * of the logic when it comes to error handling etc.
1080 */
1081static void do_generic_file_read(struct file *filp, loff_t *ppos,
1082		read_descriptor_t *desc, read_actor_t actor)
1083{
1084	struct address_space *mapping = filp->f_mapping;
1085	struct inode *inode = mapping->host;
1086	struct file_ra_state *ra = &filp->f_ra;
1087	pgoff_t index;
1088	pgoff_t last_index;
1089	pgoff_t prev_index;
1090	unsigned long offset;      /* offset into pagecache page */
1091	unsigned int prev_offset;
1092	int error;
1093
1094	index = *ppos >> PAGE_CACHE_SHIFT;
1095	prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1096	prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1097	last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1098	offset = *ppos & ~PAGE_CACHE_MASK;
1099
1100	for (;;) {
1101		struct page *page;
1102		pgoff_t end_index;
1103		loff_t isize;
1104		unsigned long nr, ret;
1105
1106		cond_resched();
1107find_page:
1108		page = find_get_page(mapping, index);
1109		if (!page) {
1110			page_cache_sync_readahead(mapping,
1111					ra, filp,
1112					index, last_index - index);
1113			page = find_get_page(mapping, index);
1114			if (unlikely(page == NULL))
1115				goto no_cached_page;
1116		}
1117		if (PageReadahead(page)) {
1118			page_cache_async_readahead(mapping,
1119					ra, filp, page,
1120					index, last_index - index);
1121		}
1122		if (!PageUptodate(page)) {
1123			if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1124					!mapping->a_ops->is_partially_uptodate)
1125				goto page_not_up_to_date;
1126			if (!trylock_page(page))
1127				goto page_not_up_to_date;
1128			/* Did it get truncated before we got the lock? */
1129			if (!page->mapping)
1130				goto page_not_up_to_date_locked;
1131			if (!mapping->a_ops->is_partially_uptodate(page,
1132								desc, offset))
1133				goto page_not_up_to_date_locked;
1134			unlock_page(page);
1135		}
1136page_ok:
1137		/*
1138		 * i_size must be checked after we know the page is Uptodate.
1139		 *
1140		 * Checking i_size after the check allows us to calculate
1141		 * the correct value for "nr", which means the zero-filled
1142		 * part of the page is not copied back to userspace (unless
1143		 * another truncate extends the file - this is desired though).
1144		 */
1145
1146		isize = i_size_read(inode);
1147		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1148		if (unlikely(!isize || index > end_index)) {
1149			page_cache_release(page);
1150			goto out;
1151		}
1152
1153		/* nr is the maximum number of bytes to copy from this page */
1154		nr = PAGE_CACHE_SIZE;
1155		if (index == end_index) {
1156			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1157			if (nr <= offset) {
1158				page_cache_release(page);
1159				goto out;
1160			}
1161		}
1162		nr = nr - offset;
1163
1164		/* If users can be writing to this page using arbitrary
1165		 * virtual addresses, take care about potential aliasing
1166		 * before reading the page on the kernel side.
1167		 */
1168		if (mapping_writably_mapped(mapping))
1169			flush_dcache_page(page);
1170
1171		/*
1172		 * When a sequential read accesses a page several times,
1173		 * only mark it as accessed the first time.
1174		 */
1175		if (prev_index != index || offset != prev_offset)
1176			mark_page_accessed(page);
1177		prev_index = index;
1178
1179		/*
1180		 * Ok, we have the page, and it's up-to-date, so
1181		 * now we can copy it to user space...
1182		 *
1183		 * The actor routine returns how many bytes were actually used..
1184		 * NOTE! This may not be the same as how much of a user buffer
1185		 * we filled up (we may be padding etc), so we can only update
1186		 * "pos" here (the actor routine has to update the user buffer
1187		 * pointers and the remaining count).
1188		 */
1189		ret = actor(desc, page, offset, nr);
1190		offset += ret;
1191		index += offset >> PAGE_CACHE_SHIFT;
1192		offset &= ~PAGE_CACHE_MASK;
1193		prev_offset = offset;
1194
1195		page_cache_release(page);
1196		if (ret == nr && desc->count)
1197			continue;
1198		goto out;
1199
1200page_not_up_to_date:
1201		/* Get exclusive access to the page ... */
1202		error = lock_page_killable(page);
1203		if (unlikely(error))
1204			goto readpage_error;
1205
1206page_not_up_to_date_locked:
1207		/* Did it get truncated before we got the lock? */
1208		if (!page->mapping) {
1209			unlock_page(page);
1210			page_cache_release(page);
1211			continue;
1212		}
1213
1214		/* Did somebody else fill it already? */
1215		if (PageUptodate(page)) {
1216			unlock_page(page);
1217			goto page_ok;
1218		}
1219
1220readpage:
1221		/*
1222		 * A previous I/O error may have been due to temporary
1223		 * failures, eg. multipath errors.
1224		 * PG_error will be set again if readpage fails.
1225		 */
1226		ClearPageError(page);
1227		/* Start the actual read. The read will unlock the page. */
1228		error = mapping->a_ops->readpage(filp, page);
1229
1230		if (unlikely(error)) {
1231			if (error == AOP_TRUNCATED_PAGE) {
1232				page_cache_release(page);
1233				goto find_page;
1234			}
1235			goto readpage_error;
1236		}
1237
1238		if (!PageUptodate(page)) {
1239			error = lock_page_killable(page);
1240			if (unlikely(error))
1241				goto readpage_error;
1242			if (!PageUptodate(page)) {
1243				if (page->mapping == NULL) {
1244					/*
1245					 * invalidate_mapping_pages got it
1246					 */
1247					unlock_page(page);
1248					page_cache_release(page);
1249					goto find_page;
1250				}
1251				unlock_page(page);
1252				shrink_readahead_size_eio(filp, ra);
1253				error = -EIO;
1254				goto readpage_error;
1255			}
1256			unlock_page(page);
1257		}
1258
1259		goto page_ok;
1260
1261readpage_error:
1262		/* UHHUH! A synchronous read error occurred. Report it */
1263		desc->error = error;
1264		page_cache_release(page);
1265		goto out;
1266
1267no_cached_page:
1268		/*
1269		 * Ok, it wasn't cached, so we need to create a new
1270		 * page..
1271		 */
1272		page = page_cache_alloc_cold(mapping);
1273		if (!page) {
1274			desc->error = -ENOMEM;
1275			goto out;
1276		}
1277		error = add_to_page_cache_lru(page, mapping,
1278						index, GFP_KERNEL);
1279		if (error) {
1280			page_cache_release(page);
1281			if (error == -EEXIST)
1282				goto find_page;
1283			desc->error = error;
1284			goto out;
1285		}
1286		goto readpage;
1287	}
1288
1289out:
1290	ra->prev_pos = prev_index;
1291	ra->prev_pos <<= PAGE_CACHE_SHIFT;
1292	ra->prev_pos |= prev_offset;
1293
1294	*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1295	file_accessed(filp);
1296}
1297
1298int file_read_actor(read_descriptor_t *desc, struct page *page,
1299			unsigned long offset, unsigned long size)
1300{
1301	char *kaddr;
1302	unsigned long left, count = desc->count;
1303
1304	if (size > count)
1305		size = count;
1306
1307	/*
1308	 * Faults on the destination of a read are common, so do it before
1309	 * taking the kmap.
1310	 */
1311	if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1312		kaddr = kmap_atomic(page);
1313		left = __copy_to_user_inatomic(desc->arg.buf,
1314						kaddr + offset, size);
1315		kunmap_atomic(kaddr);
1316		if (left == 0)
1317			goto success;
1318	}
1319
1320	/* Do it the slow way */
1321	kaddr = kmap(page);
1322	left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1323	kunmap(page);
1324
1325	if (left) {
1326		size -= left;
1327		desc->error = -EFAULT;
1328	}
1329success:
1330	desc->count = count - size;
1331	desc->written += size;
1332	desc->arg.buf += size;
1333	return size;
1334}
1335
1336/*
1337 * Performs necessary checks before doing a write
1338 * @iov:	io vector request
1339 * @nr_segs:	number of segments in the iovec
1340 * @count:	number of bytes to write
1341 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1342 *
1343 * Adjust number of segments and amount of bytes to write (nr_segs should be
1344 * properly initialized first). Returns appropriate error code that caller
1345 * should return or zero in case that write should be allowed.
1346 */
1347int generic_segment_checks(const struct iovec *iov,
1348			unsigned long *nr_segs, size_t *count, int access_flags)
1349{
1350	unsigned long   seg;
1351	size_t cnt = 0;
1352	for (seg = 0; seg < *nr_segs; seg++) {
1353		const struct iovec *iv = &iov[seg];
1354
1355		/*
1356		 * If any segment has a negative length, or the cumulative
1357		 * length ever wraps negative then return -EINVAL.
1358		 */
1359		cnt += iv->iov_len;
1360		if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1361			return -EINVAL;
1362		if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1363			continue;
1364		if (seg == 0)
1365			return -EFAULT;
1366		*nr_segs = seg;
1367		cnt -= iv->iov_len;	/* This segment is no good */
1368		break;
1369	}
1370	*count = cnt;
1371	return 0;
1372}
1373EXPORT_SYMBOL(generic_segment_checks);
1374
1375/**
1376 * generic_file_aio_read - generic filesystem read routine
1377 * @iocb:	kernel I/O control block
1378 * @iov:	io vector request
1379 * @nr_segs:	number of segments in the iovec
1380 * @pos:	current file position
1381 *
1382 * This is the "read()" routine for all filesystems
1383 * that can use the page cache directly.
1384 */
1385ssize_t
1386generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1387		unsigned long nr_segs, loff_t pos)
1388{
1389	struct file *filp = iocb->ki_filp;
1390	ssize_t retval;
1391	unsigned long seg = 0;
1392	size_t count;
1393	loff_t *ppos = &iocb->ki_pos;
 
1394
1395	count = 0;
1396	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1397	if (retval)
1398		return retval;
1399
 
 
1400	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1401	if (filp->f_flags & O_DIRECT) {
1402		loff_t size;
1403		struct address_space *mapping;
1404		struct inode *inode;
1405
1406		mapping = filp->f_mapping;
1407		inode = mapping->host;
1408		if (!count)
1409			goto out; /* skip atime */
1410		size = i_size_read(inode);
1411		if (pos < size) {
1412			retval = filemap_write_and_wait_range(mapping, pos,
1413					pos + iov_length(iov, nr_segs) - 1);
1414			if (!retval) {
1415				struct blk_plug plug;
1416
1417				blk_start_plug(&plug);
1418				retval = mapping->a_ops->direct_IO(READ, iocb,
1419							iov, pos, nr_segs);
1420				blk_finish_plug(&plug);
1421			}
1422			if (retval > 0) {
1423				*ppos = pos + retval;
1424				count -= retval;
1425			}
1426
1427			/*
1428			 * Btrfs can have a short DIO read if we encounter
1429			 * compressed extents, so if there was an error, or if
1430			 * we've already read everything we wanted to, or if
1431			 * there was a short read because we hit EOF, go ahead
1432			 * and return.  Otherwise fallthrough to buffered io for
1433			 * the rest of the read.
1434			 */
1435			if (retval < 0 || !count || *ppos >= size) {
1436				file_accessed(filp);
1437				goto out;
1438			}
1439		}
1440	}
1441
1442	count = retval;
1443	for (seg = 0; seg < nr_segs; seg++) {
1444		read_descriptor_t desc;
1445		loff_t offset = 0;
1446
1447		/*
1448		 * If we did a short DIO read we need to skip the section of the
1449		 * iov that we've already read data into.
1450		 */
1451		if (count) {
1452			if (count > iov[seg].iov_len) {
1453				count -= iov[seg].iov_len;
1454				continue;
1455			}
1456			offset = count;
1457			count = 0;
1458		}
1459
1460		desc.written = 0;
1461		desc.arg.buf = iov[seg].iov_base + offset;
1462		desc.count = iov[seg].iov_len - offset;
1463		if (desc.count == 0)
1464			continue;
1465		desc.error = 0;
1466		do_generic_file_read(filp, ppos, &desc, file_read_actor);
1467		retval += desc.written;
1468		if (desc.error) {
1469			retval = retval ?: desc.error;
1470			break;
1471		}
1472		if (desc.count > 0)
1473			break;
1474	}
1475out:
 
1476	return retval;
1477}
1478EXPORT_SYMBOL(generic_file_aio_read);
1479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1480#ifdef CONFIG_MMU
1481/**
1482 * page_cache_read - adds requested page to the page cache if not already there
1483 * @file:	file to read
1484 * @offset:	page index
1485 *
1486 * This adds the requested page to the page cache if it isn't already there,
1487 * and schedules an I/O to read in its contents from disk.
1488 */
1489static int page_cache_read(struct file *file, pgoff_t offset)
1490{
1491	struct address_space *mapping = file->f_mapping;
1492	struct page *page; 
1493	int ret;
1494
1495	do {
1496		page = page_cache_alloc_cold(mapping);
1497		if (!page)
1498			return -ENOMEM;
1499
1500		ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1501		if (ret == 0)
1502			ret = mapping->a_ops->readpage(file, page);
1503		else if (ret == -EEXIST)
1504			ret = 0; /* losing race to add is OK */
1505
1506		page_cache_release(page);
1507
1508	} while (ret == AOP_TRUNCATED_PAGE);
1509		
1510	return ret;
1511}
1512
1513#define MMAP_LOTSAMISS  (100)
1514
1515/*
1516 * Synchronous readahead happens when we don't even find
1517 * a page in the page cache at all.
1518 */
1519static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1520				   struct file_ra_state *ra,
1521				   struct file *file,
1522				   pgoff_t offset)
1523{
1524	unsigned long ra_pages;
1525	struct address_space *mapping = file->f_mapping;
1526
1527	/* If we don't want any read-ahead, don't bother */
1528	if (VM_RandomReadHint(vma))
1529		return;
1530	if (!ra->ra_pages)
1531		return;
1532
1533	if (VM_SequentialReadHint(vma)) {
1534		page_cache_sync_readahead(mapping, ra, file, offset,
1535					  ra->ra_pages);
1536		return;
1537	}
1538
1539	/* Avoid banging the cache line if not needed */
1540	if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1541		ra->mmap_miss++;
1542
1543	/*
1544	 * Do we miss much more than hit in this file? If so,
1545	 * stop bothering with read-ahead. It will only hurt.
1546	 */
1547	if (ra->mmap_miss > MMAP_LOTSAMISS)
1548		return;
1549
1550	/*
1551	 * mmap read-around
1552	 */
1553	ra_pages = max_sane_readahead(ra->ra_pages);
1554	ra->start = max_t(long, 0, offset - ra_pages / 2);
1555	ra->size = ra_pages;
1556	ra->async_size = ra_pages / 4;
1557	ra_submit(ra, mapping, file);
1558}
1559
1560/*
1561 * Asynchronous readahead happens when we find the page and PG_readahead,
1562 * so we want to possibly extend the readahead further..
1563 */
1564static void do_async_mmap_readahead(struct vm_area_struct *vma,
1565				    struct file_ra_state *ra,
1566				    struct file *file,
1567				    struct page *page,
1568				    pgoff_t offset)
1569{
1570	struct address_space *mapping = file->f_mapping;
1571
1572	/* If we don't want any read-ahead, don't bother */
1573	if (VM_RandomReadHint(vma))
1574		return;
1575	if (ra->mmap_miss > 0)
1576		ra->mmap_miss--;
1577	if (PageReadahead(page))
1578		page_cache_async_readahead(mapping, ra, file,
1579					   page, offset, ra->ra_pages);
1580}
1581
1582/**
1583 * filemap_fault - read in file data for page fault handling
1584 * @vma:	vma in which the fault was taken
1585 * @vmf:	struct vm_fault containing details of the fault
1586 *
1587 * filemap_fault() is invoked via the vma operations vector for a
1588 * mapped memory region to read in file data during a page fault.
1589 *
1590 * The goto's are kind of ugly, but this streamlines the normal case of having
1591 * it in the page cache, and handles the special cases reasonably without
1592 * having a lot of duplicated code.
1593 */
1594int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1595{
1596	int error;
1597	struct file *file = vma->vm_file;
1598	struct address_space *mapping = file->f_mapping;
1599	struct file_ra_state *ra = &file->f_ra;
1600	struct inode *inode = mapping->host;
1601	pgoff_t offset = vmf->pgoff;
1602	struct page *page;
1603	pgoff_t size;
1604	int ret = 0;
1605
1606	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1607	if (offset >= size)
1608		return VM_FAULT_SIGBUS;
1609
1610	/*
1611	 * Do we have something in the page cache already?
1612	 */
1613	page = find_get_page(mapping, offset);
1614	if (likely(page)) {
1615		/*
1616		 * We found the page, so try async readahead before
1617		 * waiting for the lock.
1618		 */
1619		do_async_mmap_readahead(vma, ra, file, page, offset);
1620	} else {
1621		/* No page in the page cache at all */
1622		do_sync_mmap_readahead(vma, ra, file, offset);
1623		count_vm_event(PGMAJFAULT);
1624		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1625		ret = VM_FAULT_MAJOR;
1626retry_find:
1627		page = find_get_page(mapping, offset);
1628		if (!page)
1629			goto no_cached_page;
1630	}
1631
1632	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1633		page_cache_release(page);
1634		return ret | VM_FAULT_RETRY;
1635	}
1636
1637	/* Did it get truncated? */
1638	if (unlikely(page->mapping != mapping)) {
1639		unlock_page(page);
1640		put_page(page);
1641		goto retry_find;
1642	}
1643	VM_BUG_ON(page->index != offset);
1644
1645	/*
1646	 * We have a locked page in the page cache, now we need to check
1647	 * that it's up-to-date. If not, it is going to be due to an error.
1648	 */
1649	if (unlikely(!PageUptodate(page)))
1650		goto page_not_uptodate;
1651
1652	/*
1653	 * Found the page and have a reference on it.
1654	 * We must recheck i_size under page lock.
1655	 */
1656	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1657	if (unlikely(offset >= size)) {
1658		unlock_page(page);
1659		page_cache_release(page);
1660		return VM_FAULT_SIGBUS;
1661	}
1662
1663	vmf->page = page;
1664	return ret | VM_FAULT_LOCKED;
1665
1666no_cached_page:
1667	/*
1668	 * We're only likely to ever get here if MADV_RANDOM is in
1669	 * effect.
1670	 */
1671	error = page_cache_read(file, offset);
1672
1673	/*
1674	 * The page we want has now been added to the page cache.
1675	 * In the unlikely event that someone removed it in the
1676	 * meantime, we'll just come back here and read it again.
1677	 */
1678	if (error >= 0)
1679		goto retry_find;
1680
1681	/*
1682	 * An error return from page_cache_read can result if the
1683	 * system is low on memory, or a problem occurs while trying
1684	 * to schedule I/O.
1685	 */
1686	if (error == -ENOMEM)
1687		return VM_FAULT_OOM;
1688	return VM_FAULT_SIGBUS;
1689
1690page_not_uptodate:
1691	/*
1692	 * Umm, take care of errors if the page isn't up-to-date.
1693	 * Try to re-read it _once_. We do this synchronously,
1694	 * because there really aren't any performance issues here
1695	 * and we need to check for errors.
1696	 */
1697	ClearPageError(page);
1698	error = mapping->a_ops->readpage(file, page);
1699	if (!error) {
1700		wait_on_page_locked(page);
1701		if (!PageUptodate(page))
1702			error = -EIO;
1703	}
1704	page_cache_release(page);
1705
1706	if (!error || error == AOP_TRUNCATED_PAGE)
1707		goto retry_find;
1708
1709	/* Things didn't work out. Return zero to tell the mm layer so. */
1710	shrink_readahead_size_eio(file, ra);
1711	return VM_FAULT_SIGBUS;
1712}
1713EXPORT_SYMBOL(filemap_fault);
1714
1715const struct vm_operations_struct generic_file_vm_ops = {
1716	.fault		= filemap_fault,
1717};
1718
1719/* This is used for a general mmap of a disk file */
1720
1721int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1722{
1723	struct address_space *mapping = file->f_mapping;
1724
1725	if (!mapping->a_ops->readpage)
1726		return -ENOEXEC;
1727	file_accessed(file);
1728	vma->vm_ops = &generic_file_vm_ops;
1729	vma->vm_flags |= VM_CAN_NONLINEAR;
1730	return 0;
1731}
1732
1733/*
1734 * This is for filesystems which do not implement ->writepage.
1735 */
1736int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1737{
1738	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1739		return -EINVAL;
1740	return generic_file_mmap(file, vma);
1741}
1742#else
1743int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1744{
1745	return -ENOSYS;
1746}
1747int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1748{
1749	return -ENOSYS;
1750}
1751#endif /* CONFIG_MMU */
1752
1753EXPORT_SYMBOL(generic_file_mmap);
1754EXPORT_SYMBOL(generic_file_readonly_mmap);
1755
1756static struct page *__read_cache_page(struct address_space *mapping,
1757				pgoff_t index,
1758				int (*filler)(void *, struct page *),
1759				void *data,
1760				gfp_t gfp)
1761{
1762	struct page *page;
1763	int err;
1764repeat:
1765	page = find_get_page(mapping, index);
1766	if (!page) {
1767		page = __page_cache_alloc(gfp | __GFP_COLD);
1768		if (!page)
1769			return ERR_PTR(-ENOMEM);
1770		err = add_to_page_cache_lru(page, mapping, index, gfp);
1771		if (unlikely(err)) {
1772			page_cache_release(page);
1773			if (err == -EEXIST)
1774				goto repeat;
1775			/* Presumably ENOMEM for radix tree node */
1776			return ERR_PTR(err);
1777		}
1778		err = filler(data, page);
1779		if (err < 0) {
1780			page_cache_release(page);
1781			page = ERR_PTR(err);
1782		}
1783	}
1784	return page;
1785}
1786
1787static struct page *do_read_cache_page(struct address_space *mapping,
1788				pgoff_t index,
1789				int (*filler)(void *, struct page *),
1790				void *data,
1791				gfp_t gfp)
1792
1793{
1794	struct page *page;
1795	int err;
1796
1797retry:
1798	page = __read_cache_page(mapping, index, filler, data, gfp);
1799	if (IS_ERR(page))
1800		return page;
1801	if (PageUptodate(page))
1802		goto out;
1803
1804	lock_page(page);
1805	if (!page->mapping) {
1806		unlock_page(page);
1807		page_cache_release(page);
1808		goto retry;
1809	}
1810	if (PageUptodate(page)) {
1811		unlock_page(page);
1812		goto out;
1813	}
1814	err = filler(data, page);
1815	if (err < 0) {
1816		page_cache_release(page);
1817		return ERR_PTR(err);
1818	}
1819out:
1820	mark_page_accessed(page);
1821	return page;
1822}
1823
1824/**
1825 * read_cache_page_async - read into page cache, fill it if needed
1826 * @mapping:	the page's address_space
1827 * @index:	the page index
1828 * @filler:	function to perform the read
1829 * @data:	first arg to filler(data, page) function, often left as NULL
1830 *
1831 * Same as read_cache_page, but don't wait for page to become unlocked
1832 * after submitting it to the filler.
1833 *
1834 * Read into the page cache. If a page already exists, and PageUptodate() is
1835 * not set, try to fill the page but don't wait for it to become unlocked.
1836 *
1837 * If the page does not get brought uptodate, return -EIO.
1838 */
1839struct page *read_cache_page_async(struct address_space *mapping,
1840				pgoff_t index,
1841				int (*filler)(void *, struct page *),
1842				void *data)
1843{
1844	return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1845}
1846EXPORT_SYMBOL(read_cache_page_async);
1847
1848static struct page *wait_on_page_read(struct page *page)
1849{
1850	if (!IS_ERR(page)) {
1851		wait_on_page_locked(page);
1852		if (!PageUptodate(page)) {
1853			page_cache_release(page);
1854			page = ERR_PTR(-EIO);
1855		}
1856	}
1857	return page;
1858}
1859
1860/**
1861 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
1862 * @mapping:	the page's address_space
1863 * @index:	the page index
1864 * @gfp:	the page allocator flags to use if allocating
1865 *
1866 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
1867 * any new page allocations done using the specified allocation flags.
 
 
 
1868 *
1869 * If the page does not get brought uptodate, return -EIO.
1870 */
1871struct page *read_cache_page_gfp(struct address_space *mapping,
1872				pgoff_t index,
1873				gfp_t gfp)
1874{
1875	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1876
1877	return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1878}
1879EXPORT_SYMBOL(read_cache_page_gfp);
1880
1881/**
1882 * read_cache_page - read into page cache, fill it if needed
1883 * @mapping:	the page's address_space
1884 * @index:	the page index
1885 * @filler:	function to perform the read
1886 * @data:	first arg to filler(data, page) function, often left as NULL
1887 *
1888 * Read into the page cache. If a page already exists, and PageUptodate() is
1889 * not set, try to fill the page then wait for it to become unlocked.
1890 *
1891 * If the page does not get brought uptodate, return -EIO.
1892 */
1893struct page *read_cache_page(struct address_space *mapping,
1894				pgoff_t index,
1895				int (*filler)(void *, struct page *),
1896				void *data)
1897{
1898	return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
1899}
1900EXPORT_SYMBOL(read_cache_page);
1901
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1902static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1903			const struct iovec *iov, size_t base, size_t bytes)
1904{
1905	size_t copied = 0, left = 0;
1906
1907	while (bytes) {
1908		char __user *buf = iov->iov_base + base;
1909		int copy = min(bytes, iov->iov_len - base);
1910
1911		base = 0;
1912		left = __copy_from_user_inatomic(vaddr, buf, copy);
1913		copied += copy;
1914		bytes -= copy;
1915		vaddr += copy;
1916		iov++;
1917
1918		if (unlikely(left))
1919			break;
1920	}
1921	return copied - left;
1922}
1923
1924/*
1925 * Copy as much as we can into the page and return the number of bytes which
1926 * were successfully copied.  If a fault is encountered then return the number of
1927 * bytes which were copied.
1928 */
1929size_t iov_iter_copy_from_user_atomic(struct page *page,
1930		struct iov_iter *i, unsigned long offset, size_t bytes)
1931{
1932	char *kaddr;
1933	size_t copied;
1934
1935	BUG_ON(!in_atomic());
1936	kaddr = kmap_atomic(page);
1937	if (likely(i->nr_segs == 1)) {
1938		int left;
1939		char __user *buf = i->iov->iov_base + i->iov_offset;
1940		left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
1941		copied = bytes - left;
1942	} else {
1943		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1944						i->iov, i->iov_offset, bytes);
1945	}
1946	kunmap_atomic(kaddr);
1947
1948	return copied;
1949}
1950EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1951
1952/*
1953 * This has the same sideeffects and return value as
1954 * iov_iter_copy_from_user_atomic().
1955 * The difference is that it attempts to resolve faults.
1956 * Page must not be locked.
1957 */
1958size_t iov_iter_copy_from_user(struct page *page,
1959		struct iov_iter *i, unsigned long offset, size_t bytes)
1960{
1961	char *kaddr;
1962	size_t copied;
1963
1964	kaddr = kmap(page);
1965	if (likely(i->nr_segs == 1)) {
1966		int left;
1967		char __user *buf = i->iov->iov_base + i->iov_offset;
1968		left = __copy_from_user(kaddr + offset, buf, bytes);
1969		copied = bytes - left;
1970	} else {
1971		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1972						i->iov, i->iov_offset, bytes);
1973	}
1974	kunmap(page);
1975	return copied;
1976}
1977EXPORT_SYMBOL(iov_iter_copy_from_user);
1978
1979void iov_iter_advance(struct iov_iter *i, size_t bytes)
1980{
1981	BUG_ON(i->count < bytes);
1982
1983	if (likely(i->nr_segs == 1)) {
1984		i->iov_offset += bytes;
1985		i->count -= bytes;
1986	} else {
1987		const struct iovec *iov = i->iov;
1988		size_t base = i->iov_offset;
1989		unsigned long nr_segs = i->nr_segs;
1990
1991		/*
1992		 * The !iov->iov_len check ensures we skip over unlikely
1993		 * zero-length segments (without overruning the iovec).
1994		 */
1995		while (bytes || unlikely(i->count && !iov->iov_len)) {
1996			int copy;
1997
1998			copy = min(bytes, iov->iov_len - base);
1999			BUG_ON(!i->count || i->count < copy);
2000			i->count -= copy;
2001			bytes -= copy;
2002			base += copy;
2003			if (iov->iov_len == base) {
2004				iov++;
2005				nr_segs--;
2006				base = 0;
2007			}
2008		}
2009		i->iov = iov;
2010		i->iov_offset = base;
2011		i->nr_segs = nr_segs;
2012	}
2013}
2014EXPORT_SYMBOL(iov_iter_advance);
2015
2016/*
2017 * Fault in the first iovec of the given iov_iter, to a maximum length
2018 * of bytes. Returns 0 on success, or non-zero if the memory could not be
2019 * accessed (ie. because it is an invalid address).
2020 *
2021 * writev-intensive code may want this to prefault several iovecs -- that
2022 * would be possible (callers must not rely on the fact that _only_ the
2023 * first iovec will be faulted with the current implementation).
2024 */
2025int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
2026{
2027	char __user *buf = i->iov->iov_base + i->iov_offset;
2028	bytes = min(bytes, i->iov->iov_len - i->iov_offset);
2029	return fault_in_pages_readable(buf, bytes);
2030}
2031EXPORT_SYMBOL(iov_iter_fault_in_readable);
2032
2033/*
2034 * Return the count of just the current iov_iter segment.
2035 */
2036size_t iov_iter_single_seg_count(struct iov_iter *i)
2037{
2038	const struct iovec *iov = i->iov;
2039	if (i->nr_segs == 1)
2040		return i->count;
2041	else
2042		return min(i->count, iov->iov_len - i->iov_offset);
2043}
2044EXPORT_SYMBOL(iov_iter_single_seg_count);
2045
2046/*
2047 * Performs necessary checks before doing a write
2048 *
2049 * Can adjust writing position or amount of bytes to write.
2050 * Returns appropriate error code that caller should return or
2051 * zero in case that write should be allowed.
2052 */
2053inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
2054{
2055	struct inode *inode = file->f_mapping->host;
2056	unsigned long limit = rlimit(RLIMIT_FSIZE);
2057
2058        if (unlikely(*pos < 0))
2059                return -EINVAL;
2060
2061	if (!isblk) {
2062		/* FIXME: this is for backwards compatibility with 2.4 */
2063		if (file->f_flags & O_APPEND)
2064                        *pos = i_size_read(inode);
2065
2066		if (limit != RLIM_INFINITY) {
2067			if (*pos >= limit) {
2068				send_sig(SIGXFSZ, current, 0);
2069				return -EFBIG;
2070			}
2071			if (*count > limit - (typeof(limit))*pos) {
2072				*count = limit - (typeof(limit))*pos;
2073			}
2074		}
2075	}
2076
2077	/*
2078	 * LFS rule
2079	 */
2080	if (unlikely(*pos + *count > MAX_NON_LFS &&
2081				!(file->f_flags & O_LARGEFILE))) {
2082		if (*pos >= MAX_NON_LFS) {
2083			return -EFBIG;
2084		}
2085		if (*count > MAX_NON_LFS - (unsigned long)*pos) {
2086			*count = MAX_NON_LFS - (unsigned long)*pos;
2087		}
2088	}
2089
2090	/*
2091	 * Are we about to exceed the fs block limit ?
2092	 *
2093	 * If we have written data it becomes a short write.  If we have
2094	 * exceeded without writing data we send a signal and return EFBIG.
2095	 * Linus frestrict idea will clean these up nicely..
2096	 */
2097	if (likely(!isblk)) {
2098		if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
2099			if (*count || *pos > inode->i_sb->s_maxbytes) {
2100				return -EFBIG;
2101			}
2102			/* zero-length writes at ->s_maxbytes are OK */
2103		}
2104
2105		if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
2106			*count = inode->i_sb->s_maxbytes - *pos;
2107	} else {
2108#ifdef CONFIG_BLOCK
2109		loff_t isize;
2110		if (bdev_read_only(I_BDEV(inode)))
2111			return -EPERM;
2112		isize = i_size_read(inode);
2113		if (*pos >= isize) {
2114			if (*count || *pos > isize)
2115				return -ENOSPC;
2116		}
2117
2118		if (*pos + *count > isize)
2119			*count = isize - *pos;
2120#else
2121		return -EPERM;
2122#endif
2123	}
2124	return 0;
2125}
2126EXPORT_SYMBOL(generic_write_checks);
2127
2128int pagecache_write_begin(struct file *file, struct address_space *mapping,
2129				loff_t pos, unsigned len, unsigned flags,
2130				struct page **pagep, void **fsdata)
2131{
2132	const struct address_space_operations *aops = mapping->a_ops;
2133
2134	return aops->write_begin(file, mapping, pos, len, flags,
2135							pagep, fsdata);
2136}
2137EXPORT_SYMBOL(pagecache_write_begin);
2138
2139int pagecache_write_end(struct file *file, struct address_space *mapping,
2140				loff_t pos, unsigned len, unsigned copied,
2141				struct page *page, void *fsdata)
2142{
2143	const struct address_space_operations *aops = mapping->a_ops;
2144
2145	mark_page_accessed(page);
2146	return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2147}
2148EXPORT_SYMBOL(pagecache_write_end);
2149
2150ssize_t
2151generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2152		unsigned long *nr_segs, loff_t pos, loff_t *ppos,
2153		size_t count, size_t ocount)
2154{
2155	struct file	*file = iocb->ki_filp;
2156	struct address_space *mapping = file->f_mapping;
2157	struct inode	*inode = mapping->host;
2158	ssize_t		written;
2159	size_t		write_len;
2160	pgoff_t		end;
2161
2162	if (count != ocount)
2163		*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2164
2165	write_len = iov_length(iov, *nr_segs);
2166	end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2167
2168	written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2169	if (written)
2170		goto out;
2171
2172	/*
2173	 * After a write we want buffered reads to be sure to go to disk to get
2174	 * the new data.  We invalidate clean cached page from the region we're
2175	 * about to write.  We do this *before* the write so that we can return
2176	 * without clobbering -EIOCBQUEUED from ->direct_IO().
2177	 */
2178	if (mapping->nrpages) {
2179		written = invalidate_inode_pages2_range(mapping,
2180					pos >> PAGE_CACHE_SHIFT, end);
2181		/*
2182		 * If a page can not be invalidated, return 0 to fall back
2183		 * to buffered write.
2184		 */
2185		if (written) {
2186			if (written == -EBUSY)
2187				return 0;
2188			goto out;
2189		}
2190	}
2191
2192	written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2193
2194	/*
2195	 * Finally, try again to invalidate clean pages which might have been
2196	 * cached by non-direct readahead, or faulted in by get_user_pages()
2197	 * if the source of the write was an mmap'ed region of the file
2198	 * we're writing.  Either one is a pretty crazy thing to do,
2199	 * so we don't support it 100%.  If this invalidation
2200	 * fails, tough, the write still worked...
2201	 */
2202	if (mapping->nrpages) {
2203		invalidate_inode_pages2_range(mapping,
2204					      pos >> PAGE_CACHE_SHIFT, end);
2205	}
2206
2207	if (written > 0) {
2208		pos += written;
2209		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2210			i_size_write(inode, pos);
2211			mark_inode_dirty(inode);
2212		}
2213		*ppos = pos;
2214	}
2215out:
2216	return written;
2217}
2218EXPORT_SYMBOL(generic_file_direct_write);
2219
2220/*
2221 * Find or create a page at the given pagecache position. Return the locked
2222 * page. This function is specifically for buffered writes.
2223 */
2224struct page *grab_cache_page_write_begin(struct address_space *mapping,
2225					pgoff_t index, unsigned flags)
2226{
2227	int status;
2228	gfp_t gfp_mask;
2229	struct page *page;
2230	gfp_t gfp_notmask = 0;
2231
2232	gfp_mask = mapping_gfp_mask(mapping);
2233	if (mapping_cap_account_dirty(mapping))
2234		gfp_mask |= __GFP_WRITE;
2235	if (flags & AOP_FLAG_NOFS)
2236		gfp_notmask = __GFP_FS;
2237repeat:
2238	page = find_lock_page(mapping, index);
2239	if (page)
2240		goto found;
2241
2242	page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
2243	if (!page)
2244		return NULL;
2245	status = add_to_page_cache_lru(page, mapping, index,
2246						GFP_KERNEL & ~gfp_notmask);
2247	if (unlikely(status)) {
2248		page_cache_release(page);
2249		if (status == -EEXIST)
2250			goto repeat;
2251		return NULL;
2252	}
2253found:
2254	wait_on_page_writeback(page);
2255	return page;
2256}
2257EXPORT_SYMBOL(grab_cache_page_write_begin);
2258
2259static ssize_t generic_perform_write(struct file *file,
2260				struct iov_iter *i, loff_t pos)
2261{
2262	struct address_space *mapping = file->f_mapping;
2263	const struct address_space_operations *a_ops = mapping->a_ops;
2264	long status = 0;
2265	ssize_t written = 0;
2266	unsigned int flags = 0;
2267
2268	/*
2269	 * Copies from kernel address space cannot fail (NFSD is a big user).
2270	 */
2271	if (segment_eq(get_fs(), KERNEL_DS))
2272		flags |= AOP_FLAG_UNINTERRUPTIBLE;
2273
2274	do {
2275		struct page *page;
2276		unsigned long offset;	/* Offset into pagecache page */
2277		unsigned long bytes;	/* Bytes to write to page */
2278		size_t copied;		/* Bytes copied from user */
2279		void *fsdata;
2280
2281		offset = (pos & (PAGE_CACHE_SIZE - 1));
2282		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2283						iov_iter_count(i));
2284
2285again:
 
2286		/*
2287		 * Bring in the user page that we will copy from _first_.
2288		 * Otherwise there's a nasty deadlock on copying from the
2289		 * same page as we're writing to, without it being marked
2290		 * up-to-date.
2291		 *
2292		 * Not only is this an optimisation, but it is also required
2293		 * to check that the address is actually valid, when atomic
2294		 * usercopies are used, below.
2295		 */
2296		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2297			status = -EFAULT;
2298			break;
2299		}
2300
2301		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2302						&page, &fsdata);
2303		if (unlikely(status))
2304			break;
2305
2306		if (mapping_writably_mapped(mapping))
2307			flush_dcache_page(page);
2308
2309		pagefault_disable();
2310		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2311		pagefault_enable();
2312		flush_dcache_page(page);
2313
2314		mark_page_accessed(page);
2315		status = a_ops->write_end(file, mapping, pos, bytes, copied,
2316						page, fsdata);
2317		if (unlikely(status < 0))
2318			break;
2319		copied = status;
2320
2321		cond_resched();
2322
2323		iov_iter_advance(i, copied);
2324		if (unlikely(copied == 0)) {
2325			/*
2326			 * If we were unable to copy any data at all, we must
2327			 * fall back to a single segment length write.
2328			 *
2329			 * If we didn't fallback here, we could livelock
2330			 * because not all segments in the iov can be copied at
2331			 * once without a pagefault.
2332			 */
2333			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2334						iov_iter_single_seg_count(i));
2335			goto again;
2336		}
2337		pos += copied;
2338		written += copied;
2339
2340		balance_dirty_pages_ratelimited(mapping);
2341		if (fatal_signal_pending(current)) {
2342			status = -EINTR;
2343			break;
2344		}
2345	} while (iov_iter_count(i));
2346
2347	return written ? written : status;
2348}
2349
2350ssize_t
2351generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2352		unsigned long nr_segs, loff_t pos, loff_t *ppos,
2353		size_t count, ssize_t written)
2354{
2355	struct file *file = iocb->ki_filp;
2356	ssize_t status;
2357	struct iov_iter i;
2358
2359	iov_iter_init(&i, iov, nr_segs, count, written);
2360	status = generic_perform_write(file, &i, pos);
2361
2362	if (likely(status >= 0)) {
2363		written += status;
2364		*ppos = pos + status;
2365  	}
2366	
2367	return written ? written : status;
2368}
2369EXPORT_SYMBOL(generic_file_buffered_write);
2370
2371/**
2372 * __generic_file_aio_write - write data to a file
2373 * @iocb:	IO state structure (file, offset, etc.)
2374 * @iov:	vector with data to write
2375 * @nr_segs:	number of segments in the vector
2376 * @ppos:	position where to write
2377 *
2378 * This function does all the work needed for actually writing data to a
2379 * file. It does all basic checks, removes SUID from the file, updates
2380 * modification times and calls proper subroutines depending on whether we
2381 * do direct IO or a standard buffered write.
2382 *
2383 * It expects i_mutex to be grabbed unless we work on a block device or similar
2384 * object which does not need locking at all.
2385 *
2386 * This function does *not* take care of syncing data in case of O_SYNC write.
2387 * A caller has to handle it. This is mainly due to the fact that we want to
2388 * avoid syncing under i_mutex.
2389 */
2390ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2391				 unsigned long nr_segs, loff_t *ppos)
2392{
2393	struct file *file = iocb->ki_filp;
2394	struct address_space * mapping = file->f_mapping;
2395	size_t ocount;		/* original count */
2396	size_t count;		/* after file limit checks */
2397	struct inode 	*inode = mapping->host;
2398	loff_t		pos;
2399	ssize_t		written;
2400	ssize_t		err;
2401
2402	ocount = 0;
2403	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2404	if (err)
2405		return err;
2406
2407	count = ocount;
2408	pos = *ppos;
2409
2410	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2411
2412	/* We can write back this queue in page reclaim */
2413	current->backing_dev_info = mapping->backing_dev_info;
2414	written = 0;
2415
2416	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2417	if (err)
2418		goto out;
2419
2420	if (count == 0)
2421		goto out;
2422
2423	err = file_remove_suid(file);
2424	if (err)
2425		goto out;
2426
2427	err = file_update_time(file);
2428	if (err)
2429		goto out;
2430
2431	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2432	if (unlikely(file->f_flags & O_DIRECT)) {
2433		loff_t endbyte;
2434		ssize_t written_buffered;
2435
2436		written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2437							ppos, count, ocount);
2438		if (written < 0 || written == count)
2439			goto out;
2440		/*
2441		 * direct-io write to a hole: fall through to buffered I/O
2442		 * for completing the rest of the request.
2443		 */
2444		pos += written;
2445		count -= written;
2446		written_buffered = generic_file_buffered_write(iocb, iov,
2447						nr_segs, pos, ppos, count,
2448						written);
2449		/*
2450		 * If generic_file_buffered_write() retuned a synchronous error
2451		 * then we want to return the number of bytes which were
2452		 * direct-written, or the error code if that was zero.  Note
2453		 * that this differs from normal direct-io semantics, which
2454		 * will return -EFOO even if some bytes were written.
2455		 */
2456		if (written_buffered < 0) {
2457			err = written_buffered;
2458			goto out;
2459		}
2460
2461		/*
2462		 * We need to ensure that the page cache pages are written to
2463		 * disk and invalidated to preserve the expected O_DIRECT
2464		 * semantics.
2465		 */
2466		endbyte = pos + written_buffered - written - 1;
2467		err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2468		if (err == 0) {
2469			written = written_buffered;
2470			invalidate_mapping_pages(mapping,
2471						 pos >> PAGE_CACHE_SHIFT,
2472						 endbyte >> PAGE_CACHE_SHIFT);
2473		} else {
2474			/*
2475			 * We don't know how much we wrote, so just return
2476			 * the number of bytes which were direct-written
2477			 */
2478		}
2479	} else {
2480		written = generic_file_buffered_write(iocb, iov, nr_segs,
2481				pos, ppos, count, written);
2482	}
2483out:
2484	current->backing_dev_info = NULL;
2485	return written ? written : err;
2486}
2487EXPORT_SYMBOL(__generic_file_aio_write);
2488
2489/**
2490 * generic_file_aio_write - write data to a file
2491 * @iocb:	IO state structure
2492 * @iov:	vector with data to write
2493 * @nr_segs:	number of segments in the vector
2494 * @pos:	position in file where to write
2495 *
2496 * This is a wrapper around __generic_file_aio_write() to be used by most
2497 * filesystems. It takes care of syncing the file in case of O_SYNC file
2498 * and acquires i_mutex as needed.
2499 */
2500ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2501		unsigned long nr_segs, loff_t pos)
2502{
2503	struct file *file = iocb->ki_filp;
2504	struct inode *inode = file->f_mapping->host;
2505	struct blk_plug plug;
2506	ssize_t ret;
2507
2508	BUG_ON(iocb->ki_pos != pos);
2509
2510	mutex_lock(&inode->i_mutex);
2511	blk_start_plug(&plug);
2512	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2513	mutex_unlock(&inode->i_mutex);
2514
2515	if (ret > 0 || ret == -EIOCBQUEUED) {
2516		ssize_t err;
2517
2518		err = generic_write_sync(file, pos, ret);
2519		if (err < 0 && ret > 0)
2520			ret = err;
2521	}
2522	blk_finish_plug(&plug);
2523	return ret;
2524}
2525EXPORT_SYMBOL(generic_file_aio_write);
2526
2527/**
2528 * try_to_release_page() - release old fs-specific metadata on a page
2529 *
2530 * @page: the page which the kernel is trying to free
2531 * @gfp_mask: memory allocation flags (and I/O mode)
2532 *
2533 * The address_space is to try to release any data against the page
2534 * (presumably at page->private).  If the release was successful, return `1'.
2535 * Otherwise return zero.
2536 *
2537 * This may also be called if PG_fscache is set on a page, indicating that the
2538 * page is known to the local caching routines.
2539 *
2540 * The @gfp_mask argument specifies whether I/O may be performed to release
2541 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2542 *
2543 */
2544int try_to_release_page(struct page *page, gfp_t gfp_mask)
2545{
2546	struct address_space * const mapping = page->mapping;
2547
2548	BUG_ON(!PageLocked(page));
2549	if (PageWriteback(page))
2550		return 0;
2551
2552	if (mapping && mapping->a_ops->releasepage)
2553		return mapping->a_ops->releasepage(page, gfp_mask);
2554	return try_to_free_buffers(page);
2555}
2556
2557EXPORT_SYMBOL(try_to_release_page);