Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * mm/truncate.c - code for taking down pages from address_spaces
  3 *
  4 * Copyright (C) 2002, Linus Torvalds
  5 *
  6 * 10Sep2002	Andrew Morton
  7 *		Initial version.
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/backing-dev.h>
 12#include <linux/dax.h>
 13#include <linux/gfp.h>
 14#include <linux/mm.h>
 15#include <linux/swap.h>
 16#include <linux/export.h>
 17#include <linux/pagemap.h>
 18#include <linux/highmem.h>
 19#include <linux/pagevec.h>
 20#include <linux/task_io_accounting_ops.h>
 21#include <linux/buffer_head.h>	/* grr. try_to_release_page,
 22				   do_invalidatepage */
 23#include <linux/cleancache.h>
 24#include <linux/rmap.h>
 25#include "internal.h"
 26
 27static void clear_exceptional_entry(struct address_space *mapping,
 28				    pgoff_t index, void *entry)
 29{
 30	struct radix_tree_node *node;
 31	void **slot;
 32
 33	/* Handled by shmem itself */
 34	if (shmem_mapping(mapping))
 35		return;
 36
 37	spin_lock_irq(&mapping->tree_lock);
 38
 39	if (dax_mapping(mapping)) {
 40		if (radix_tree_delete_item(&mapping->page_tree, index, entry))
 41			mapping->nrexceptional--;
 42	} else {
 43		/*
 44		 * Regular page slots are stabilized by the page lock even
 45		 * without the tree itself locked.  These unlocked entries
 46		 * need verification under the tree lock.
 47		 */
 48		if (!__radix_tree_lookup(&mapping->page_tree, index, &node,
 49					&slot))
 50			goto unlock;
 51		if (*slot != entry)
 52			goto unlock;
 53		radix_tree_replace_slot(slot, NULL);
 54		mapping->nrexceptional--;
 55		if (!node)
 56			goto unlock;
 57		workingset_node_shadows_dec(node);
 58		/*
 59		 * Don't track node without shadow entries.
 60		 *
 61		 * Avoid acquiring the list_lru lock if already untracked.
 62		 * The list_empty() test is safe as node->private_list is
 63		 * protected by mapping->tree_lock.
 64		 */
 65		if (!workingset_node_shadows(node) &&
 66		    !list_empty(&node->private_list))
 67			list_lru_del(&workingset_shadow_nodes,
 68					&node->private_list);
 69		__radix_tree_delete_node(&mapping->page_tree, node);
 70	}
 71unlock:
 72	spin_unlock_irq(&mapping->tree_lock);
 73}
 74
 75/**
 76 * do_invalidatepage - invalidate part or all of a page
 77 * @page: the page which is affected
 78 * @offset: start of the range to invalidate
 79 * @length: length of the range to invalidate
 80 *
 81 * do_invalidatepage() is called when all or part of the page has become
 82 * invalidated by a truncate operation.
 83 *
 84 * do_invalidatepage() does not have to release all buffers, but it must
 85 * ensure that no dirty buffer is left outside @offset and that no I/O
 86 * is underway against any of the blocks which are outside the truncation
 87 * point.  Because the caller is about to free (and possibly reuse) those
 88 * blocks on-disk.
 89 */
 90void do_invalidatepage(struct page *page, unsigned int offset,
 91		       unsigned int length)
 92{
 93	void (*invalidatepage)(struct page *, unsigned int, unsigned int);
 94
 95	invalidatepage = page->mapping->a_ops->invalidatepage;
 96#ifdef CONFIG_BLOCK
 97	if (!invalidatepage)
 98		invalidatepage = block_invalidatepage;
 99#endif
100	if (invalidatepage)
101		(*invalidatepage)(page, offset, length);
102}
103
104/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105 * If truncate cannot remove the fs-private metadata from the page, the page
106 * becomes orphaned.  It will be left on the LRU and may even be mapped into
107 * user pagetables if we're racing with filemap_fault().
108 *
109 * We need to bale out if page->mapping is no longer equal to the original
110 * mapping.  This happens a) when the VM reclaimed the page while we waited on
111 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
112 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
113 */
114static int
115truncate_complete_page(struct address_space *mapping, struct page *page)
116{
117	if (page->mapping != mapping)
118		return -EIO;
119
120	if (page_has_private(page))
121		do_invalidatepage(page, 0, PAGE_SIZE);
 
 
122
123	/*
124	 * Some filesystems seem to re-dirty the page even after
125	 * the VM has canceled the dirty bit (eg ext3 journaling).
126	 * Hence dirty accounting check is placed after invalidation.
127	 */
128	cancel_dirty_page(page);
129	ClearPageMappedToDisk(page);
130	delete_from_page_cache(page);
131	return 0;
132}
133
134/*
135 * This is for invalidate_mapping_pages().  That function can be called at
136 * any time, and is not supposed to throw away dirty pages.  But pages can
137 * be marked dirty at any time too, so use remove_mapping which safely
138 * discards clean, unused pages.
139 *
140 * Returns non-zero if the page was successfully invalidated.
141 */
142static int
143invalidate_complete_page(struct address_space *mapping, struct page *page)
144{
145	int ret;
146
147	if (page->mapping != mapping)
148		return 0;
149
150	if (page_has_private(page) && !try_to_release_page(page, 0))
151		return 0;
152
153	ret = remove_mapping(mapping, page);
154
155	return ret;
156}
157
158int truncate_inode_page(struct address_space *mapping, struct page *page)
159{
160	if (page_mapped(page)) {
161		unmap_mapping_range(mapping,
162				   (loff_t)page->index << PAGE_SHIFT,
163				   PAGE_SIZE, 0);
164	}
165	return truncate_complete_page(mapping, page);
166}
167
168/*
169 * Used to get rid of pages on hardware memory corruption.
170 */
171int generic_error_remove_page(struct address_space *mapping, struct page *page)
172{
173	if (!mapping)
174		return -EINVAL;
175	/*
176	 * Only punch for normal data pages for now.
177	 * Handling other types like directories would need more auditing.
178	 */
179	if (!S_ISREG(mapping->host->i_mode))
180		return -EIO;
181	return truncate_inode_page(mapping, page);
182}
183EXPORT_SYMBOL(generic_error_remove_page);
184
185/*
186 * Safely invalidate one page from its pagecache mapping.
187 * It only drops clean, unused pages. The page must be locked.
188 *
189 * Returns 1 if the page is successfully invalidated, otherwise 0.
190 */
191int invalidate_inode_page(struct page *page)
192{
193	struct address_space *mapping = page_mapping(page);
194	if (!mapping)
195		return 0;
196	if (PageDirty(page) || PageWriteback(page))
197		return 0;
198	if (page_mapped(page))
199		return 0;
200	return invalidate_complete_page(mapping, page);
201}
202
203/**
204 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
205 * @mapping: mapping to truncate
206 * @lstart: offset from which to truncate
207 * @lend: offset to which to truncate (inclusive)
208 *
209 * Truncate the page cache, removing the pages that are between
210 * specified offsets (and zeroing out partial pages
211 * if lstart or lend + 1 is not page aligned).
212 *
213 * Truncate takes two passes - the first pass is nonblocking.  It will not
214 * block on page locks and it will not block on writeback.  The second pass
215 * will wait.  This is to prevent as much IO as possible in the affected region.
216 * The first pass will remove most pages, so the search cost of the second pass
217 * is low.
218 *
219 * We pass down the cache-hot hint to the page freeing code.  Even if the
220 * mapping is large, it is probably the case that the final pages are the most
221 * recently touched, and freeing happens in ascending file offset order.
222 *
223 * Note that since ->invalidatepage() accepts range to invalidate
224 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
225 * page aligned properly.
226 */
227void truncate_inode_pages_range(struct address_space *mapping,
228				loff_t lstart, loff_t lend)
229{
230	pgoff_t		start;		/* inclusive */
231	pgoff_t		end;		/* exclusive */
232	unsigned int	partial_start;	/* inclusive */
233	unsigned int	partial_end;	/* exclusive */
234	struct pagevec	pvec;
235	pgoff_t		indices[PAGEVEC_SIZE];
236	pgoff_t		index;
237	int		i;
238
239	cleancache_invalidate_inode(mapping);
240	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
241		return;
242
243	/* Offsets within partial pages */
244	partial_start = lstart & (PAGE_SIZE - 1);
245	partial_end = (lend + 1) & (PAGE_SIZE - 1);
246
247	/*
248	 * 'start' and 'end' always covers the range of pages to be fully
249	 * truncated. Partial pages are covered with 'partial_start' at the
250	 * start of the range and 'partial_end' at the end of the range.
251	 * Note that 'end' is exclusive while 'lend' is inclusive.
252	 */
253	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
254	if (lend == -1)
255		/*
256		 * lend == -1 indicates end-of-file so we have to set 'end'
257		 * to the highest possible pgoff_t and since the type is
258		 * unsigned we're using -1.
259		 */
260		end = -1;
261	else
262		end = (lend + 1) >> PAGE_SHIFT;
263
264	pagevec_init(&pvec, 0);
265	index = start;
266	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
267			min(end - index, (pgoff_t)PAGEVEC_SIZE),
268			indices)) {
 
269		for (i = 0; i < pagevec_count(&pvec); i++) {
270			struct page *page = pvec.pages[i];
271
272			/* We rely upon deletion not changing page->index */
273			index = indices[i];
274			if (index >= end)
275				break;
276
277			if (radix_tree_exceptional_entry(page)) {
278				clear_exceptional_entry(mapping, index, page);
279				continue;
280			}
281
282			if (!trylock_page(page))
283				continue;
284			WARN_ON(page->index != index);
285			if (PageWriteback(page)) {
286				unlock_page(page);
287				continue;
288			}
289			truncate_inode_page(mapping, page);
290			unlock_page(page);
291		}
292		pagevec_remove_exceptionals(&pvec);
293		pagevec_release(&pvec);
 
294		cond_resched();
295		index++;
296	}
297
298	if (partial_start) {
299		struct page *page = find_lock_page(mapping, start - 1);
300		if (page) {
301			unsigned int top = PAGE_SIZE;
302			if (start > end) {
303				/* Truncation within a single page */
304				top = partial_end;
305				partial_end = 0;
306			}
307			wait_on_page_writeback(page);
308			zero_user_segment(page, partial_start, top);
309			cleancache_invalidate_page(mapping, page);
310			if (page_has_private(page))
311				do_invalidatepage(page, partial_start,
312						  top - partial_start);
313			unlock_page(page);
314			put_page(page);
315		}
316	}
317	if (partial_end) {
318		struct page *page = find_lock_page(mapping, end);
319		if (page) {
320			wait_on_page_writeback(page);
321			zero_user_segment(page, 0, partial_end);
322			cleancache_invalidate_page(mapping, page);
323			if (page_has_private(page))
324				do_invalidatepage(page, 0,
325						  partial_end);
326			unlock_page(page);
327			put_page(page);
328		}
329	}
330	/*
331	 * If the truncation happened within a single page no pages
332	 * will be released, just zeroed, so we can bail out now.
333	 */
334	if (start >= end)
335		return;
336
337	index = start;
338	for ( ; ; ) {
339		cond_resched();
340		if (!pagevec_lookup_entries(&pvec, mapping, index,
341			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
342			/* If all gone from start onwards, we're done */
343			if (index == start)
344				break;
345			/* Otherwise restart to make sure all gone */
346			index = start;
347			continue;
348		}
349		if (index == start && indices[0] >= end) {
350			/* All gone out of hole to be punched, we're done */
351			pagevec_remove_exceptionals(&pvec);
352			pagevec_release(&pvec);
353			break;
354		}
 
355		for (i = 0; i < pagevec_count(&pvec); i++) {
356			struct page *page = pvec.pages[i];
357
358			/* We rely upon deletion not changing page->index */
359			index = indices[i];
360			if (index >= end) {
361				/* Restart punch to make sure all gone */
362				index = start - 1;
363				break;
364			}
365
366			if (radix_tree_exceptional_entry(page)) {
367				clear_exceptional_entry(mapping, index, page);
368				continue;
369			}
370
371			lock_page(page);
372			WARN_ON(page->index != index);
373			wait_on_page_writeback(page);
374			truncate_inode_page(mapping, page);
375			unlock_page(page);
376		}
377		pagevec_remove_exceptionals(&pvec);
378		pagevec_release(&pvec);
 
379		index++;
380	}
381	cleancache_invalidate_inode(mapping);
382}
383EXPORT_SYMBOL(truncate_inode_pages_range);
384
385/**
386 * truncate_inode_pages - truncate *all* the pages from an offset
387 * @mapping: mapping to truncate
388 * @lstart: offset from which to truncate
389 *
390 * Called under (and serialised by) inode->i_mutex.
391 *
392 * Note: When this function returns, there can be a page in the process of
393 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
394 * mapping->nrpages can be non-zero when this function returns even after
395 * truncation of the whole mapping.
396 */
397void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
398{
399	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
400}
401EXPORT_SYMBOL(truncate_inode_pages);
402
403/**
404 * truncate_inode_pages_final - truncate *all* pages before inode dies
405 * @mapping: mapping to truncate
406 *
407 * Called under (and serialized by) inode->i_mutex.
408 *
409 * Filesystems have to use this in the .evict_inode path to inform the
410 * VM that this is the final truncate and the inode is going away.
411 */
412void truncate_inode_pages_final(struct address_space *mapping)
413{
414	unsigned long nrexceptional;
415	unsigned long nrpages;
416
417	/*
418	 * Page reclaim can not participate in regular inode lifetime
419	 * management (can't call iput()) and thus can race with the
420	 * inode teardown.  Tell it when the address space is exiting,
421	 * so that it does not install eviction information after the
422	 * final truncate has begun.
423	 */
424	mapping_set_exiting(mapping);
425
426	/*
427	 * When reclaim installs eviction entries, it increases
428	 * nrexceptional first, then decreases nrpages.  Make sure we see
429	 * this in the right order or we might miss an entry.
430	 */
431	nrpages = mapping->nrpages;
432	smp_rmb();
433	nrexceptional = mapping->nrexceptional;
434
435	if (nrpages || nrexceptional) {
436		/*
437		 * As truncation uses a lockless tree lookup, cycle
438		 * the tree lock to make sure any ongoing tree
439		 * modification that does not see AS_EXITING is
440		 * completed before starting the final truncate.
441		 */
442		spin_lock_irq(&mapping->tree_lock);
443		spin_unlock_irq(&mapping->tree_lock);
444
445		truncate_inode_pages(mapping, 0);
446	}
447}
448EXPORT_SYMBOL(truncate_inode_pages_final);
449
450/**
451 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
452 * @mapping: the address_space which holds the pages to invalidate
453 * @start: the offset 'from' which to invalidate
454 * @end: the offset 'to' which to invalidate (inclusive)
455 *
456 * This function only removes the unlocked pages, if you want to
457 * remove all the pages of one inode, you must call truncate_inode_pages.
458 *
459 * invalidate_mapping_pages() will not block on IO activity. It will not
460 * invalidate pages which are dirty, locked, under writeback or mapped into
461 * pagetables.
462 */
463unsigned long invalidate_mapping_pages(struct address_space *mapping,
464		pgoff_t start, pgoff_t end)
465{
466	pgoff_t indices[PAGEVEC_SIZE];
467	struct pagevec pvec;
468	pgoff_t index = start;
469	unsigned long ret;
470	unsigned long count = 0;
471	int i;
472
473	pagevec_init(&pvec, 0);
474	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
475			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
476			indices)) {
 
477		for (i = 0; i < pagevec_count(&pvec); i++) {
478			struct page *page = pvec.pages[i];
479
480			/* We rely upon deletion not changing page->index */
481			index = indices[i];
482			if (index > end)
483				break;
484
485			if (radix_tree_exceptional_entry(page)) {
486				clear_exceptional_entry(mapping, index, page);
487				continue;
488			}
489
490			if (!trylock_page(page))
491				continue;
492			WARN_ON(page->index != index);
493			ret = invalidate_inode_page(page);
494			unlock_page(page);
495			/*
496			 * Invalidation is a hint that the page is no longer
497			 * of interest and try to speed up its reclaim.
498			 */
499			if (!ret)
500				deactivate_file_page(page);
501			count += ret;
502		}
503		pagevec_remove_exceptionals(&pvec);
504		pagevec_release(&pvec);
 
505		cond_resched();
506		index++;
507	}
508	return count;
509}
510EXPORT_SYMBOL(invalidate_mapping_pages);
511
512/*
513 * This is like invalidate_complete_page(), except it ignores the page's
514 * refcount.  We do this because invalidate_inode_pages2() needs stronger
515 * invalidation guarantees, and cannot afford to leave pages behind because
516 * shrink_page_list() has a temp ref on them, or because they're transiently
517 * sitting in the lru_cache_add() pagevecs.
518 */
519static int
520invalidate_complete_page2(struct address_space *mapping, struct page *page)
521{
522	unsigned long flags;
523
524	if (page->mapping != mapping)
525		return 0;
526
527	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
528		return 0;
529
530	spin_lock_irqsave(&mapping->tree_lock, flags);
531	if (PageDirty(page))
532		goto failed;
533
534	BUG_ON(page_has_private(page));
535	__delete_from_page_cache(page, NULL);
536	spin_unlock_irqrestore(&mapping->tree_lock, flags);
 
537
538	if (mapping->a_ops->freepage)
539		mapping->a_ops->freepage(page);
540
541	put_page(page);	/* pagecache ref */
542	return 1;
543failed:
544	spin_unlock_irqrestore(&mapping->tree_lock, flags);
545	return 0;
546}
547
548static int do_launder_page(struct address_space *mapping, struct page *page)
549{
550	if (!PageDirty(page))
551		return 0;
552	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
553		return 0;
554	return mapping->a_ops->launder_page(page);
555}
556
557/**
558 * invalidate_inode_pages2_range - remove range of pages from an address_space
559 * @mapping: the address_space
560 * @start: the page offset 'from' which to invalidate
561 * @end: the page offset 'to' which to invalidate (inclusive)
562 *
563 * Any pages which are found to be mapped into pagetables are unmapped prior to
564 * invalidation.
565 *
566 * Returns -EBUSY if any pages could not be invalidated.
567 */
568int invalidate_inode_pages2_range(struct address_space *mapping,
569				  pgoff_t start, pgoff_t end)
570{
571	pgoff_t indices[PAGEVEC_SIZE];
572	struct pagevec pvec;
573	pgoff_t index;
574	int i;
575	int ret = 0;
576	int ret2 = 0;
577	int did_range_unmap = 0;
578
579	cleancache_invalidate_inode(mapping);
580	pagevec_init(&pvec, 0);
581	index = start;
582	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
583			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
584			indices)) {
 
585		for (i = 0; i < pagevec_count(&pvec); i++) {
586			struct page *page = pvec.pages[i];
587
588			/* We rely upon deletion not changing page->index */
589			index = indices[i];
590			if (index > end)
591				break;
592
593			if (radix_tree_exceptional_entry(page)) {
594				clear_exceptional_entry(mapping, index, page);
595				continue;
596			}
597
598			lock_page(page);
599			WARN_ON(page->index != index);
600			if (page->mapping != mapping) {
601				unlock_page(page);
602				continue;
603			}
604			wait_on_page_writeback(page);
605			if (page_mapped(page)) {
606				if (!did_range_unmap) {
607					/*
608					 * Zap the rest of the file in one hit.
609					 */
610					unmap_mapping_range(mapping,
611					   (loff_t)index << PAGE_SHIFT,
612					   (loff_t)(1 + end - index)
613							 << PAGE_SHIFT,
614							 0);
615					did_range_unmap = 1;
616				} else {
617					/*
618					 * Just zap this page
619					 */
620					unmap_mapping_range(mapping,
621					   (loff_t)index << PAGE_SHIFT,
622					   PAGE_SIZE, 0);
623				}
624			}
625			BUG_ON(page_mapped(page));
626			ret2 = do_launder_page(mapping, page);
627			if (ret2 == 0) {
628				if (!invalidate_complete_page2(mapping, page))
629					ret2 = -EBUSY;
630			}
631			if (ret2 < 0)
632				ret = ret2;
633			unlock_page(page);
634		}
635		pagevec_remove_exceptionals(&pvec);
636		pagevec_release(&pvec);
 
637		cond_resched();
638		index++;
639	}
640	cleancache_invalidate_inode(mapping);
641	return ret;
642}
643EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
644
645/**
646 * invalidate_inode_pages2 - remove all pages from an address_space
647 * @mapping: the address_space
648 *
649 * Any pages which are found to be mapped into pagetables are unmapped prior to
650 * invalidation.
651 *
652 * Returns -EBUSY if any pages could not be invalidated.
653 */
654int invalidate_inode_pages2(struct address_space *mapping)
655{
656	return invalidate_inode_pages2_range(mapping, 0, -1);
657}
658EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
659
660/**
661 * truncate_pagecache - unmap and remove pagecache that has been truncated
662 * @inode: inode
663 * @newsize: new file size
664 *
665 * inode's new i_size must already be written before truncate_pagecache
666 * is called.
667 *
668 * This function should typically be called before the filesystem
669 * releases resources associated with the freed range (eg. deallocates
670 * blocks). This way, pagecache will always stay logically coherent
671 * with on-disk format, and the filesystem would not have to deal with
672 * situations such as writepage being called for a page that has already
673 * had its underlying blocks deallocated.
674 */
675void truncate_pagecache(struct inode *inode, loff_t newsize)
676{
677	struct address_space *mapping = inode->i_mapping;
678	loff_t holebegin = round_up(newsize, PAGE_SIZE);
679
680	/*
681	 * unmap_mapping_range is called twice, first simply for
682	 * efficiency so that truncate_inode_pages does fewer
683	 * single-page unmaps.  However after this first call, and
684	 * before truncate_inode_pages finishes, it is possible for
685	 * private pages to be COWed, which remain after
686	 * truncate_inode_pages finishes, hence the second
687	 * unmap_mapping_range call must be made for correctness.
688	 */
689	unmap_mapping_range(mapping, holebegin, 0, 1);
690	truncate_inode_pages(mapping, newsize);
691	unmap_mapping_range(mapping, holebegin, 0, 1);
692}
693EXPORT_SYMBOL(truncate_pagecache);
694
695/**
696 * truncate_setsize - update inode and pagecache for a new file size
697 * @inode: inode
698 * @newsize: new file size
699 *
700 * truncate_setsize updates i_size and performs pagecache truncation (if
701 * necessary) to @newsize. It will be typically be called from the filesystem's
702 * setattr function when ATTR_SIZE is passed in.
703 *
704 * Must be called with a lock serializing truncates and writes (generally
705 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
706 * specific block truncation has been performed.
707 */
708void truncate_setsize(struct inode *inode, loff_t newsize)
709{
710	loff_t oldsize = inode->i_size;
711
712	i_size_write(inode, newsize);
713	if (newsize > oldsize)
714		pagecache_isize_extended(inode, oldsize, newsize);
715	truncate_pagecache(inode, newsize);
716}
717EXPORT_SYMBOL(truncate_setsize);
718
719/**
720 * pagecache_isize_extended - update pagecache after extension of i_size
721 * @inode:	inode for which i_size was extended
722 * @from:	original inode size
723 * @to:		new inode size
724 *
725 * Handle extension of inode size either caused by extending truncate or by
726 * write starting after current i_size. We mark the page straddling current
727 * i_size RO so that page_mkwrite() is called on the nearest write access to
728 * the page.  This way filesystem can be sure that page_mkwrite() is called on
729 * the page before user writes to the page via mmap after the i_size has been
730 * changed.
731 *
732 * The function must be called after i_size is updated so that page fault
733 * coming after we unlock the page will already see the new i_size.
734 * The function must be called while we still hold i_mutex - this not only
735 * makes sure i_size is stable but also that userspace cannot observe new
736 * i_size value before we are prepared to store mmap writes at new inode size.
737 */
738void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
739{
740	int bsize = 1 << inode->i_blkbits;
741	loff_t rounded_from;
742	struct page *page;
743	pgoff_t index;
744
745	WARN_ON(to > inode->i_size);
746
747	if (from >= to || bsize == PAGE_SIZE)
748		return;
749	/* Page straddling @from will not have any hole block created? */
750	rounded_from = round_up(from, bsize);
751	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
752		return;
753
754	index = from >> PAGE_SHIFT;
755	page = find_lock_page(inode->i_mapping, index);
756	/* Page not cached? Nothing to do */
757	if (!page)
758		return;
759	/*
760	 * See clear_page_dirty_for_io() for details why set_page_dirty()
761	 * is needed.
762	 */
763	if (page_mkclean(page))
764		set_page_dirty(page);
765	unlock_page(page);
766	put_page(page);
767}
768EXPORT_SYMBOL(pagecache_isize_extended);
769
770/**
771 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
772 * @inode: inode
773 * @lstart: offset of beginning of hole
774 * @lend: offset of last byte of hole
775 *
776 * This function should typically be called before the filesystem
777 * releases resources associated with the freed range (eg. deallocates
778 * blocks). This way, pagecache will always stay logically coherent
779 * with on-disk format, and the filesystem would not have to deal with
780 * situations such as writepage being called for a page that has already
781 * had its underlying blocks deallocated.
782 */
783void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
784{
785	struct address_space *mapping = inode->i_mapping;
786	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
787	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
788	/*
789	 * This rounding is currently just for example: unmap_mapping_range
790	 * expands its hole outwards, whereas we want it to contract the hole
791	 * inwards.  However, existing callers of truncate_pagecache_range are
792	 * doing their own page rounding first.  Note that unmap_mapping_range
793	 * allows holelen 0 for all, and we allow lend -1 for end of file.
794	 */
795
796	/*
797	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
798	 * once (before truncating pagecache), and without "even_cows" flag:
799	 * hole-punching should not remove private COWed pages from the hole.
800	 */
801	if ((u64)unmap_end > (u64)unmap_start)
802		unmap_mapping_range(mapping, unmap_start,
803				    1 + unmap_end - unmap_start, 0);
804	truncate_inode_pages_range(mapping, lstart, lend);
805}
806EXPORT_SYMBOL(truncate_pagecache_range);
v3.15
  1/*
  2 * mm/truncate.c - code for taking down pages from address_spaces
  3 *
  4 * Copyright (C) 2002, Linus Torvalds
  5 *
  6 * 10Sep2002	Andrew Morton
  7 *		Initial version.
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/backing-dev.h>
 
 12#include <linux/gfp.h>
 13#include <linux/mm.h>
 14#include <linux/swap.h>
 15#include <linux/export.h>
 16#include <linux/pagemap.h>
 17#include <linux/highmem.h>
 18#include <linux/pagevec.h>
 19#include <linux/task_io_accounting_ops.h>
 20#include <linux/buffer_head.h>	/* grr. try_to_release_page,
 21				   do_invalidatepage */
 22#include <linux/cleancache.h>
 
 23#include "internal.h"
 24
 25static void clear_exceptional_entry(struct address_space *mapping,
 26				    pgoff_t index, void *entry)
 27{
 28	struct radix_tree_node *node;
 29	void **slot;
 30
 31	/* Handled by shmem itself */
 32	if (shmem_mapping(mapping))
 33		return;
 34
 35	spin_lock_irq(&mapping->tree_lock);
 36	/*
 37	 * Regular page slots are stabilized by the page lock even
 38	 * without the tree itself locked.  These unlocked entries
 39	 * need verification under the tree lock.
 40	 */
 41	if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
 42		goto unlock;
 43	if (*slot != entry)
 44		goto unlock;
 45	radix_tree_replace_slot(slot, NULL);
 46	mapping->nrshadows--;
 47	if (!node)
 48		goto unlock;
 49	workingset_node_shadows_dec(node);
 50	/*
 51	 * Don't track node without shadow entries.
 52	 *
 53	 * Avoid acquiring the list_lru lock if already untracked.
 54	 * The list_empty() test is safe as node->private_list is
 55	 * protected by mapping->tree_lock.
 56	 */
 57	if (!workingset_node_shadows(node) &&
 58	    !list_empty(&node->private_list))
 59		list_lru_del(&workingset_shadow_nodes, &node->private_list);
 60	__radix_tree_delete_node(&mapping->page_tree, node);
 
 
 
 
 
 
 
 
 61unlock:
 62	spin_unlock_irq(&mapping->tree_lock);
 63}
 64
 65/**
 66 * do_invalidatepage - invalidate part or all of a page
 67 * @page: the page which is affected
 68 * @offset: start of the range to invalidate
 69 * @length: length of the range to invalidate
 70 *
 71 * do_invalidatepage() is called when all or part of the page has become
 72 * invalidated by a truncate operation.
 73 *
 74 * do_invalidatepage() does not have to release all buffers, but it must
 75 * ensure that no dirty buffer is left outside @offset and that no I/O
 76 * is underway against any of the blocks which are outside the truncation
 77 * point.  Because the caller is about to free (and possibly reuse) those
 78 * blocks on-disk.
 79 */
 80void do_invalidatepage(struct page *page, unsigned int offset,
 81		       unsigned int length)
 82{
 83	void (*invalidatepage)(struct page *, unsigned int, unsigned int);
 84
 85	invalidatepage = page->mapping->a_ops->invalidatepage;
 86#ifdef CONFIG_BLOCK
 87	if (!invalidatepage)
 88		invalidatepage = block_invalidatepage;
 89#endif
 90	if (invalidatepage)
 91		(*invalidatepage)(page, offset, length);
 92}
 93
 94/*
 95 * This cancels just the dirty bit on the kernel page itself, it
 96 * does NOT actually remove dirty bits on any mmap's that may be
 97 * around. It also leaves the page tagged dirty, so any sync
 98 * activity will still find it on the dirty lists, and in particular,
 99 * clear_page_dirty_for_io() will still look at the dirty bits in
100 * the VM.
101 *
102 * Doing this should *normally* only ever be done when a page
103 * is truncated, and is not actually mapped anywhere at all. However,
104 * fs/buffer.c does this when it notices that somebody has cleaned
105 * out all the buffers on a page without actually doing it through
106 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
107 */
108void cancel_dirty_page(struct page *page, unsigned int account_size)
109{
110	if (TestClearPageDirty(page)) {
111		struct address_space *mapping = page->mapping;
112		if (mapping && mapping_cap_account_dirty(mapping)) {
113			dec_zone_page_state(page, NR_FILE_DIRTY);
114			dec_bdi_stat(mapping->backing_dev_info,
115					BDI_RECLAIMABLE);
116			if (account_size)
117				task_io_account_cancelled_write(account_size);
118		}
119	}
120}
121EXPORT_SYMBOL(cancel_dirty_page);
122
123/*
124 * If truncate cannot remove the fs-private metadata from the page, the page
125 * becomes orphaned.  It will be left on the LRU and may even be mapped into
126 * user pagetables if we're racing with filemap_fault().
127 *
128 * We need to bale out if page->mapping is no longer equal to the original
129 * mapping.  This happens a) when the VM reclaimed the page while we waited on
130 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
131 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
132 */
133static int
134truncate_complete_page(struct address_space *mapping, struct page *page)
135{
136	if (page->mapping != mapping)
137		return -EIO;
138
139	if (page_has_private(page))
140		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
141
142	cancel_dirty_page(page, PAGE_CACHE_SIZE);
143
 
 
 
 
 
 
144	ClearPageMappedToDisk(page);
145	delete_from_page_cache(page);
146	return 0;
147}
148
149/*
150 * This is for invalidate_mapping_pages().  That function can be called at
151 * any time, and is not supposed to throw away dirty pages.  But pages can
152 * be marked dirty at any time too, so use remove_mapping which safely
153 * discards clean, unused pages.
154 *
155 * Returns non-zero if the page was successfully invalidated.
156 */
157static int
158invalidate_complete_page(struct address_space *mapping, struct page *page)
159{
160	int ret;
161
162	if (page->mapping != mapping)
163		return 0;
164
165	if (page_has_private(page) && !try_to_release_page(page, 0))
166		return 0;
167
168	ret = remove_mapping(mapping, page);
169
170	return ret;
171}
172
173int truncate_inode_page(struct address_space *mapping, struct page *page)
174{
175	if (page_mapped(page)) {
176		unmap_mapping_range(mapping,
177				   (loff_t)page->index << PAGE_CACHE_SHIFT,
178				   PAGE_CACHE_SIZE, 0);
179	}
180	return truncate_complete_page(mapping, page);
181}
182
183/*
184 * Used to get rid of pages on hardware memory corruption.
185 */
186int generic_error_remove_page(struct address_space *mapping, struct page *page)
187{
188	if (!mapping)
189		return -EINVAL;
190	/*
191	 * Only punch for normal data pages for now.
192	 * Handling other types like directories would need more auditing.
193	 */
194	if (!S_ISREG(mapping->host->i_mode))
195		return -EIO;
196	return truncate_inode_page(mapping, page);
197}
198EXPORT_SYMBOL(generic_error_remove_page);
199
200/*
201 * Safely invalidate one page from its pagecache mapping.
202 * It only drops clean, unused pages. The page must be locked.
203 *
204 * Returns 1 if the page is successfully invalidated, otherwise 0.
205 */
206int invalidate_inode_page(struct page *page)
207{
208	struct address_space *mapping = page_mapping(page);
209	if (!mapping)
210		return 0;
211	if (PageDirty(page) || PageWriteback(page))
212		return 0;
213	if (page_mapped(page))
214		return 0;
215	return invalidate_complete_page(mapping, page);
216}
217
218/**
219 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
220 * @mapping: mapping to truncate
221 * @lstart: offset from which to truncate
222 * @lend: offset to which to truncate (inclusive)
223 *
224 * Truncate the page cache, removing the pages that are between
225 * specified offsets (and zeroing out partial pages
226 * if lstart or lend + 1 is not page aligned).
227 *
228 * Truncate takes two passes - the first pass is nonblocking.  It will not
229 * block on page locks and it will not block on writeback.  The second pass
230 * will wait.  This is to prevent as much IO as possible in the affected region.
231 * The first pass will remove most pages, so the search cost of the second pass
232 * is low.
233 *
234 * We pass down the cache-hot hint to the page freeing code.  Even if the
235 * mapping is large, it is probably the case that the final pages are the most
236 * recently touched, and freeing happens in ascending file offset order.
237 *
238 * Note that since ->invalidatepage() accepts range to invalidate
239 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
240 * page aligned properly.
241 */
242void truncate_inode_pages_range(struct address_space *mapping,
243				loff_t lstart, loff_t lend)
244{
245	pgoff_t		start;		/* inclusive */
246	pgoff_t		end;		/* exclusive */
247	unsigned int	partial_start;	/* inclusive */
248	unsigned int	partial_end;	/* exclusive */
249	struct pagevec	pvec;
250	pgoff_t		indices[PAGEVEC_SIZE];
251	pgoff_t		index;
252	int		i;
253
254	cleancache_invalidate_inode(mapping);
255	if (mapping->nrpages == 0 && mapping->nrshadows == 0)
256		return;
257
258	/* Offsets within partial pages */
259	partial_start = lstart & (PAGE_CACHE_SIZE - 1);
260	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
261
262	/*
263	 * 'start' and 'end' always covers the range of pages to be fully
264	 * truncated. Partial pages are covered with 'partial_start' at the
265	 * start of the range and 'partial_end' at the end of the range.
266	 * Note that 'end' is exclusive while 'lend' is inclusive.
267	 */
268	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
269	if (lend == -1)
270		/*
271		 * lend == -1 indicates end-of-file so we have to set 'end'
272		 * to the highest possible pgoff_t and since the type is
273		 * unsigned we're using -1.
274		 */
275		end = -1;
276	else
277		end = (lend + 1) >> PAGE_CACHE_SHIFT;
278
279	pagevec_init(&pvec, 0);
280	index = start;
281	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
282			min(end - index, (pgoff_t)PAGEVEC_SIZE),
283			indices)) {
284		mem_cgroup_uncharge_start();
285		for (i = 0; i < pagevec_count(&pvec); i++) {
286			struct page *page = pvec.pages[i];
287
288			/* We rely upon deletion not changing page->index */
289			index = indices[i];
290			if (index >= end)
291				break;
292
293			if (radix_tree_exceptional_entry(page)) {
294				clear_exceptional_entry(mapping, index, page);
295				continue;
296			}
297
298			if (!trylock_page(page))
299				continue;
300			WARN_ON(page->index != index);
301			if (PageWriteback(page)) {
302				unlock_page(page);
303				continue;
304			}
305			truncate_inode_page(mapping, page);
306			unlock_page(page);
307		}
308		pagevec_remove_exceptionals(&pvec);
309		pagevec_release(&pvec);
310		mem_cgroup_uncharge_end();
311		cond_resched();
312		index++;
313	}
314
315	if (partial_start) {
316		struct page *page = find_lock_page(mapping, start - 1);
317		if (page) {
318			unsigned int top = PAGE_CACHE_SIZE;
319			if (start > end) {
320				/* Truncation within a single page */
321				top = partial_end;
322				partial_end = 0;
323			}
324			wait_on_page_writeback(page);
325			zero_user_segment(page, partial_start, top);
326			cleancache_invalidate_page(mapping, page);
327			if (page_has_private(page))
328				do_invalidatepage(page, partial_start,
329						  top - partial_start);
330			unlock_page(page);
331			page_cache_release(page);
332		}
333	}
334	if (partial_end) {
335		struct page *page = find_lock_page(mapping, end);
336		if (page) {
337			wait_on_page_writeback(page);
338			zero_user_segment(page, 0, partial_end);
339			cleancache_invalidate_page(mapping, page);
340			if (page_has_private(page))
341				do_invalidatepage(page, 0,
342						  partial_end);
343			unlock_page(page);
344			page_cache_release(page);
345		}
346	}
347	/*
348	 * If the truncation happened within a single page no pages
349	 * will be released, just zeroed, so we can bail out now.
350	 */
351	if (start >= end)
352		return;
353
354	index = start;
355	for ( ; ; ) {
356		cond_resched();
357		if (!pagevec_lookup_entries(&pvec, mapping, index,
358			min(end - index, (pgoff_t)PAGEVEC_SIZE),
359			indices)) {
360			if (index == start)
361				break;
 
362			index = start;
363			continue;
364		}
365		if (index == start && indices[0] >= end) {
 
366			pagevec_remove_exceptionals(&pvec);
367			pagevec_release(&pvec);
368			break;
369		}
370		mem_cgroup_uncharge_start();
371		for (i = 0; i < pagevec_count(&pvec); i++) {
372			struct page *page = pvec.pages[i];
373
374			/* We rely upon deletion not changing page->index */
375			index = indices[i];
376			if (index >= end)
 
 
377				break;
 
378
379			if (radix_tree_exceptional_entry(page)) {
380				clear_exceptional_entry(mapping, index, page);
381				continue;
382			}
383
384			lock_page(page);
385			WARN_ON(page->index != index);
386			wait_on_page_writeback(page);
387			truncate_inode_page(mapping, page);
388			unlock_page(page);
389		}
390		pagevec_remove_exceptionals(&pvec);
391		pagevec_release(&pvec);
392		mem_cgroup_uncharge_end();
393		index++;
394	}
395	cleancache_invalidate_inode(mapping);
396}
397EXPORT_SYMBOL(truncate_inode_pages_range);
398
399/**
400 * truncate_inode_pages - truncate *all* the pages from an offset
401 * @mapping: mapping to truncate
402 * @lstart: offset from which to truncate
403 *
404 * Called under (and serialised by) inode->i_mutex.
405 *
406 * Note: When this function returns, there can be a page in the process of
407 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
408 * mapping->nrpages can be non-zero when this function returns even after
409 * truncation of the whole mapping.
410 */
411void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
412{
413	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
414}
415EXPORT_SYMBOL(truncate_inode_pages);
416
417/**
418 * truncate_inode_pages_final - truncate *all* pages before inode dies
419 * @mapping: mapping to truncate
420 *
421 * Called under (and serialized by) inode->i_mutex.
422 *
423 * Filesystems have to use this in the .evict_inode path to inform the
424 * VM that this is the final truncate and the inode is going away.
425 */
426void truncate_inode_pages_final(struct address_space *mapping)
427{
428	unsigned long nrshadows;
429	unsigned long nrpages;
430
431	/*
432	 * Page reclaim can not participate in regular inode lifetime
433	 * management (can't call iput()) and thus can race with the
434	 * inode teardown.  Tell it when the address space is exiting,
435	 * so that it does not install eviction information after the
436	 * final truncate has begun.
437	 */
438	mapping_set_exiting(mapping);
439
440	/*
441	 * When reclaim installs eviction entries, it increases
442	 * nrshadows first, then decreases nrpages.  Make sure we see
443	 * this in the right order or we might miss an entry.
444	 */
445	nrpages = mapping->nrpages;
446	smp_rmb();
447	nrshadows = mapping->nrshadows;
448
449	if (nrpages || nrshadows) {
450		/*
451		 * As truncation uses a lockless tree lookup, cycle
452		 * the tree lock to make sure any ongoing tree
453		 * modification that does not see AS_EXITING is
454		 * completed before starting the final truncate.
455		 */
456		spin_lock_irq(&mapping->tree_lock);
457		spin_unlock_irq(&mapping->tree_lock);
458
459		truncate_inode_pages(mapping, 0);
460	}
461}
462EXPORT_SYMBOL(truncate_inode_pages_final);
463
464/**
465 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
466 * @mapping: the address_space which holds the pages to invalidate
467 * @start: the offset 'from' which to invalidate
468 * @end: the offset 'to' which to invalidate (inclusive)
469 *
470 * This function only removes the unlocked pages, if you want to
471 * remove all the pages of one inode, you must call truncate_inode_pages.
472 *
473 * invalidate_mapping_pages() will not block on IO activity. It will not
474 * invalidate pages which are dirty, locked, under writeback or mapped into
475 * pagetables.
476 */
477unsigned long invalidate_mapping_pages(struct address_space *mapping,
478		pgoff_t start, pgoff_t end)
479{
480	pgoff_t indices[PAGEVEC_SIZE];
481	struct pagevec pvec;
482	pgoff_t index = start;
483	unsigned long ret;
484	unsigned long count = 0;
485	int i;
486
487	pagevec_init(&pvec, 0);
488	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
489			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
490			indices)) {
491		mem_cgroup_uncharge_start();
492		for (i = 0; i < pagevec_count(&pvec); i++) {
493			struct page *page = pvec.pages[i];
494
495			/* We rely upon deletion not changing page->index */
496			index = indices[i];
497			if (index > end)
498				break;
499
500			if (radix_tree_exceptional_entry(page)) {
501				clear_exceptional_entry(mapping, index, page);
502				continue;
503			}
504
505			if (!trylock_page(page))
506				continue;
507			WARN_ON(page->index != index);
508			ret = invalidate_inode_page(page);
509			unlock_page(page);
510			/*
511			 * Invalidation is a hint that the page is no longer
512			 * of interest and try to speed up its reclaim.
513			 */
514			if (!ret)
515				deactivate_page(page);
516			count += ret;
517		}
518		pagevec_remove_exceptionals(&pvec);
519		pagevec_release(&pvec);
520		mem_cgroup_uncharge_end();
521		cond_resched();
522		index++;
523	}
524	return count;
525}
526EXPORT_SYMBOL(invalidate_mapping_pages);
527
528/*
529 * This is like invalidate_complete_page(), except it ignores the page's
530 * refcount.  We do this because invalidate_inode_pages2() needs stronger
531 * invalidation guarantees, and cannot afford to leave pages behind because
532 * shrink_page_list() has a temp ref on them, or because they're transiently
533 * sitting in the lru_cache_add() pagevecs.
534 */
535static int
536invalidate_complete_page2(struct address_space *mapping, struct page *page)
537{
 
 
538	if (page->mapping != mapping)
539		return 0;
540
541	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
542		return 0;
543
544	spin_lock_irq(&mapping->tree_lock);
545	if (PageDirty(page))
546		goto failed;
547
548	BUG_ON(page_has_private(page));
549	__delete_from_page_cache(page, NULL);
550	spin_unlock_irq(&mapping->tree_lock);
551	mem_cgroup_uncharge_cache_page(page);
552
553	if (mapping->a_ops->freepage)
554		mapping->a_ops->freepage(page);
555
556	page_cache_release(page);	/* pagecache ref */
557	return 1;
558failed:
559	spin_unlock_irq(&mapping->tree_lock);
560	return 0;
561}
562
563static int do_launder_page(struct address_space *mapping, struct page *page)
564{
565	if (!PageDirty(page))
566		return 0;
567	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
568		return 0;
569	return mapping->a_ops->launder_page(page);
570}
571
572/**
573 * invalidate_inode_pages2_range - remove range of pages from an address_space
574 * @mapping: the address_space
575 * @start: the page offset 'from' which to invalidate
576 * @end: the page offset 'to' which to invalidate (inclusive)
577 *
578 * Any pages which are found to be mapped into pagetables are unmapped prior to
579 * invalidation.
580 *
581 * Returns -EBUSY if any pages could not be invalidated.
582 */
583int invalidate_inode_pages2_range(struct address_space *mapping,
584				  pgoff_t start, pgoff_t end)
585{
586	pgoff_t indices[PAGEVEC_SIZE];
587	struct pagevec pvec;
588	pgoff_t index;
589	int i;
590	int ret = 0;
591	int ret2 = 0;
592	int did_range_unmap = 0;
593
594	cleancache_invalidate_inode(mapping);
595	pagevec_init(&pvec, 0);
596	index = start;
597	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
598			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
599			indices)) {
600		mem_cgroup_uncharge_start();
601		for (i = 0; i < pagevec_count(&pvec); i++) {
602			struct page *page = pvec.pages[i];
603
604			/* We rely upon deletion not changing page->index */
605			index = indices[i];
606			if (index > end)
607				break;
608
609			if (radix_tree_exceptional_entry(page)) {
610				clear_exceptional_entry(mapping, index, page);
611				continue;
612			}
613
614			lock_page(page);
615			WARN_ON(page->index != index);
616			if (page->mapping != mapping) {
617				unlock_page(page);
618				continue;
619			}
620			wait_on_page_writeback(page);
621			if (page_mapped(page)) {
622				if (!did_range_unmap) {
623					/*
624					 * Zap the rest of the file in one hit.
625					 */
626					unmap_mapping_range(mapping,
627					   (loff_t)index << PAGE_CACHE_SHIFT,
628					   (loff_t)(1 + end - index)
629							 << PAGE_CACHE_SHIFT,
630					    0);
631					did_range_unmap = 1;
632				} else {
633					/*
634					 * Just zap this page
635					 */
636					unmap_mapping_range(mapping,
637					   (loff_t)index << PAGE_CACHE_SHIFT,
638					   PAGE_CACHE_SIZE, 0);
639				}
640			}
641			BUG_ON(page_mapped(page));
642			ret2 = do_launder_page(mapping, page);
643			if (ret2 == 0) {
644				if (!invalidate_complete_page2(mapping, page))
645					ret2 = -EBUSY;
646			}
647			if (ret2 < 0)
648				ret = ret2;
649			unlock_page(page);
650		}
651		pagevec_remove_exceptionals(&pvec);
652		pagevec_release(&pvec);
653		mem_cgroup_uncharge_end();
654		cond_resched();
655		index++;
656	}
657	cleancache_invalidate_inode(mapping);
658	return ret;
659}
660EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
661
662/**
663 * invalidate_inode_pages2 - remove all pages from an address_space
664 * @mapping: the address_space
665 *
666 * Any pages which are found to be mapped into pagetables are unmapped prior to
667 * invalidation.
668 *
669 * Returns -EBUSY if any pages could not be invalidated.
670 */
671int invalidate_inode_pages2(struct address_space *mapping)
672{
673	return invalidate_inode_pages2_range(mapping, 0, -1);
674}
675EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
676
677/**
678 * truncate_pagecache - unmap and remove pagecache that has been truncated
679 * @inode: inode
680 * @newsize: new file size
681 *
682 * inode's new i_size must already be written before truncate_pagecache
683 * is called.
684 *
685 * This function should typically be called before the filesystem
686 * releases resources associated with the freed range (eg. deallocates
687 * blocks). This way, pagecache will always stay logically coherent
688 * with on-disk format, and the filesystem would not have to deal with
689 * situations such as writepage being called for a page that has already
690 * had its underlying blocks deallocated.
691 */
692void truncate_pagecache(struct inode *inode, loff_t newsize)
693{
694	struct address_space *mapping = inode->i_mapping;
695	loff_t holebegin = round_up(newsize, PAGE_SIZE);
696
697	/*
698	 * unmap_mapping_range is called twice, first simply for
699	 * efficiency so that truncate_inode_pages does fewer
700	 * single-page unmaps.  However after this first call, and
701	 * before truncate_inode_pages finishes, it is possible for
702	 * private pages to be COWed, which remain after
703	 * truncate_inode_pages finishes, hence the second
704	 * unmap_mapping_range call must be made for correctness.
705	 */
706	unmap_mapping_range(mapping, holebegin, 0, 1);
707	truncate_inode_pages(mapping, newsize);
708	unmap_mapping_range(mapping, holebegin, 0, 1);
709}
710EXPORT_SYMBOL(truncate_pagecache);
711
712/**
713 * truncate_setsize - update inode and pagecache for a new file size
714 * @inode: inode
715 * @newsize: new file size
716 *
717 * truncate_setsize updates i_size and performs pagecache truncation (if
718 * necessary) to @newsize. It will be typically be called from the filesystem's
719 * setattr function when ATTR_SIZE is passed in.
720 *
721 * Must be called with inode_mutex held and before all filesystem specific
722 * block truncation has been performed.
 
723 */
724void truncate_setsize(struct inode *inode, loff_t newsize)
725{
 
 
726	i_size_write(inode, newsize);
 
 
727	truncate_pagecache(inode, newsize);
728}
729EXPORT_SYMBOL(truncate_setsize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
730
731/**
732 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
733 * @inode: inode
734 * @lstart: offset of beginning of hole
735 * @lend: offset of last byte of hole
736 *
737 * This function should typically be called before the filesystem
738 * releases resources associated with the freed range (eg. deallocates
739 * blocks). This way, pagecache will always stay logically coherent
740 * with on-disk format, and the filesystem would not have to deal with
741 * situations such as writepage being called for a page that has already
742 * had its underlying blocks deallocated.
743 */
744void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
745{
746	struct address_space *mapping = inode->i_mapping;
747	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
748	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
749	/*
750	 * This rounding is currently just for example: unmap_mapping_range
751	 * expands its hole outwards, whereas we want it to contract the hole
752	 * inwards.  However, existing callers of truncate_pagecache_range are
753	 * doing their own page rounding first.  Note that unmap_mapping_range
754	 * allows holelen 0 for all, and we allow lend -1 for end of file.
755	 */
756
757	/*
758	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
759	 * once (before truncating pagecache), and without "even_cows" flag:
760	 * hole-punching should not remove private COWed pages from the hole.
761	 */
762	if ((u64)unmap_end > (u64)unmap_start)
763		unmap_mapping_range(mapping, unmap_start,
764				    1 + unmap_end - unmap_start, 0);
765	truncate_inode_pages_range(mapping, lstart, lend);
766}
767EXPORT_SYMBOL(truncate_pagecache_range);