Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/filemap.c
4 *
5 * Copyright (C) 1994-1999 Linus Torvalds
6 */
7
8/*
9 * This file handles the generic file mmap semantics used by
10 * most "normal" filesystems (but you don't /have/ to use this:
11 * the NFS filesystem used to do this differently, for example)
12 */
13#include <linux/export.h>
14#include <linux/compiler.h>
15#include <linux/dax.h>
16#include <linux/fs.h>
17#include <linux/sched/signal.h>
18#include <linux/uaccess.h>
19#include <linux/capability.h>
20#include <linux/kernel_stat.h>
21#include <linux/gfp.h>
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/mman.h>
25#include <linux/pagemap.h>
26#include <linux/file.h>
27#include <linux/uio.h>
28#include <linux/error-injection.h>
29#include <linux/hash.h>
30#include <linux/writeback.h>
31#include <linux/backing-dev.h>
32#include <linux/pagevec.h>
33#include <linux/blkdev.h>
34#include <linux/security.h>
35#include <linux/cpuset.h>
36#include <linux/hugetlb.h>
37#include <linux/memcontrol.h>
38#include <linux/cleancache.h>
39#include <linux/shmem_fs.h>
40#include <linux/rmap.h>
41#include <linux/delayacct.h>
42#include <linux/psi.h>
43#include <linux/ramfs.h>
44#include <linux/page_idle.h>
45#include <asm/pgalloc.h>
46#include <asm/tlbflush.h>
47#include "internal.h"
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/filemap.h>
51
52/*
53 * FIXME: remove all knowledge of the buffer layer from the core VM
54 */
55#include <linux/buffer_head.h> /* for try_to_free_buffers */
56
57#include <asm/mman.h>
58
59/*
60 * Shared mappings implemented 30.11.1994. It's not fully working yet,
61 * though.
62 *
63 * Shared mappings now work. 15.8.1995 Bruno.
64 *
65 * finished 'unifying' the page and buffer cache and SMP-threaded the
66 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
67 *
68 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
69 */
70
71/*
72 * Lock ordering:
73 *
74 * ->i_mmap_rwsem (truncate_pagecache)
75 * ->private_lock (__free_pte->__set_page_dirty_buffers)
76 * ->swap_lock (exclusive_swap_page, others)
77 * ->i_pages lock
78 *
79 * ->i_mutex
80 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
81 *
82 * ->mmap_lock
83 * ->i_mmap_rwsem
84 * ->page_table_lock or pte_lock (various, mainly in memory.c)
85 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
86 *
87 * ->mmap_lock
88 * ->lock_page (access_process_vm)
89 *
90 * ->i_mutex (generic_perform_write)
91 * ->mmap_lock (fault_in_pages_readable->do_page_fault)
92 *
93 * bdi->wb.list_lock
94 * sb_lock (fs/fs-writeback.c)
95 * ->i_pages lock (__sync_single_inode)
96 *
97 * ->i_mmap_rwsem
98 * ->anon_vma.lock (vma_adjust)
99 *
100 * ->anon_vma.lock
101 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
102 *
103 * ->page_table_lock or pte_lock
104 * ->swap_lock (try_to_unmap_one)
105 * ->private_lock (try_to_unmap_one)
106 * ->i_pages lock (try_to_unmap_one)
107 * ->lruvec->lru_lock (follow_page->mark_page_accessed)
108 * ->lruvec->lru_lock (check_pte_range->isolate_lru_page)
109 * ->private_lock (page_remove_rmap->set_page_dirty)
110 * ->i_pages lock (page_remove_rmap->set_page_dirty)
111 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
112 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
113 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
114 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
115 * ->inode->i_lock (zap_pte_range->set_page_dirty)
116 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
117 *
118 * ->i_mmap_rwsem
119 * ->tasklist_lock (memory_failure, collect_procs_ao)
120 */
121
122static void page_cache_delete(struct address_space *mapping,
123 struct page *page, void *shadow)
124{
125 XA_STATE(xas, &mapping->i_pages, page->index);
126 unsigned int nr = 1;
127
128 mapping_set_update(&xas, mapping);
129
130 /* hugetlb pages are represented by a single entry in the xarray */
131 if (!PageHuge(page)) {
132 xas_set_order(&xas, page->index, compound_order(page));
133 nr = compound_nr(page);
134 }
135
136 VM_BUG_ON_PAGE(!PageLocked(page), page);
137 VM_BUG_ON_PAGE(PageTail(page), page);
138 VM_BUG_ON_PAGE(nr != 1 && shadow, page);
139
140 xas_store(&xas, shadow);
141 xas_init_marks(&xas);
142
143 page->mapping = NULL;
144 /* Leave page->index set: truncation lookup relies upon it */
145 mapping->nrpages -= nr;
146}
147
148static void unaccount_page_cache_page(struct address_space *mapping,
149 struct page *page)
150{
151 int nr;
152
153 /*
154 * if we're uptodate, flush out into the cleancache, otherwise
155 * invalidate any existing cleancache entries. We can't leave
156 * stale data around in the cleancache once our page is gone
157 */
158 if (PageUptodate(page) && PageMappedToDisk(page))
159 cleancache_put_page(page);
160 else
161 cleancache_invalidate_page(mapping, page);
162
163 VM_BUG_ON_PAGE(PageTail(page), page);
164 VM_BUG_ON_PAGE(page_mapped(page), page);
165 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
166 int mapcount;
167
168 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
169 current->comm, page_to_pfn(page));
170 dump_page(page, "still mapped when deleted");
171 dump_stack();
172 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
173
174 mapcount = page_mapcount(page);
175 if (mapping_exiting(mapping) &&
176 page_count(page) >= mapcount + 2) {
177 /*
178 * All vmas have already been torn down, so it's
179 * a good bet that actually the page is unmapped,
180 * and we'd prefer not to leak it: if we're wrong,
181 * some other bad page check should catch it later.
182 */
183 page_mapcount_reset(page);
184 page_ref_sub(page, mapcount);
185 }
186 }
187
188 /* hugetlb pages do not participate in page cache accounting. */
189 if (PageHuge(page))
190 return;
191
192 nr = thp_nr_pages(page);
193
194 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
195 if (PageSwapBacked(page)) {
196 __mod_lruvec_page_state(page, NR_SHMEM, -nr);
197 if (PageTransHuge(page))
198 __mod_lruvec_page_state(page, NR_SHMEM_THPS, -nr);
199 } else if (PageTransHuge(page)) {
200 __mod_lruvec_page_state(page, NR_FILE_THPS, -nr);
201 filemap_nr_thps_dec(mapping);
202 }
203
204 /*
205 * At this point page must be either written or cleaned by
206 * truncate. Dirty page here signals a bug and loss of
207 * unwritten data.
208 *
209 * This fixes dirty accounting after removing the page entirely
210 * but leaves PageDirty set: it has no effect for truncated
211 * page and anyway will be cleared before returning page into
212 * buddy allocator.
213 */
214 if (WARN_ON_ONCE(PageDirty(page)))
215 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
216}
217
218/*
219 * Delete a page from the page cache and free it. Caller has to make
220 * sure the page is locked and that nobody else uses it - or that usage
221 * is safe. The caller must hold the i_pages lock.
222 */
223void __delete_from_page_cache(struct page *page, void *shadow)
224{
225 struct address_space *mapping = page->mapping;
226
227 trace_mm_filemap_delete_from_page_cache(page);
228
229 unaccount_page_cache_page(mapping, page);
230 page_cache_delete(mapping, page, shadow);
231}
232
233static void page_cache_free_page(struct address_space *mapping,
234 struct page *page)
235{
236 void (*freepage)(struct page *);
237
238 freepage = mapping->a_ops->freepage;
239 if (freepage)
240 freepage(page);
241
242 if (PageTransHuge(page) && !PageHuge(page)) {
243 page_ref_sub(page, thp_nr_pages(page));
244 VM_BUG_ON_PAGE(page_count(page) <= 0, page);
245 } else {
246 put_page(page);
247 }
248}
249
250/**
251 * delete_from_page_cache - delete page from page cache
252 * @page: the page which the kernel is trying to remove from page cache
253 *
254 * This must be called only on pages that have been verified to be in the page
255 * cache and locked. It will never put the page into the free list, the caller
256 * has a reference on the page.
257 */
258void delete_from_page_cache(struct page *page)
259{
260 struct address_space *mapping = page_mapping(page);
261 unsigned long flags;
262
263 BUG_ON(!PageLocked(page));
264 xa_lock_irqsave(&mapping->i_pages, flags);
265 __delete_from_page_cache(page, NULL);
266 xa_unlock_irqrestore(&mapping->i_pages, flags);
267
268 page_cache_free_page(mapping, page);
269}
270EXPORT_SYMBOL(delete_from_page_cache);
271
272/*
273 * page_cache_delete_batch - delete several pages from page cache
274 * @mapping: the mapping to which pages belong
275 * @pvec: pagevec with pages to delete
276 *
277 * The function walks over mapping->i_pages and removes pages passed in @pvec
278 * from the mapping. The function expects @pvec to be sorted by page index
279 * and is optimised for it to be dense.
280 * It tolerates holes in @pvec (mapping entries at those indices are not
281 * modified). The function expects only THP head pages to be present in the
282 * @pvec.
283 *
284 * The function expects the i_pages lock to be held.
285 */
286static void page_cache_delete_batch(struct address_space *mapping,
287 struct pagevec *pvec)
288{
289 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
290 int total_pages = 0;
291 int i = 0;
292 struct page *page;
293
294 mapping_set_update(&xas, mapping);
295 xas_for_each(&xas, page, ULONG_MAX) {
296 if (i >= pagevec_count(pvec))
297 break;
298
299 /* A swap/dax/shadow entry got inserted? Skip it. */
300 if (xa_is_value(page))
301 continue;
302 /*
303 * A page got inserted in our range? Skip it. We have our
304 * pages locked so they are protected from being removed.
305 * If we see a page whose index is higher than ours, it
306 * means our page has been removed, which shouldn't be
307 * possible because we're holding the PageLock.
308 */
309 if (page != pvec->pages[i]) {
310 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index,
311 page);
312 continue;
313 }
314
315 WARN_ON_ONCE(!PageLocked(page));
316
317 if (page->index == xas.xa_index)
318 page->mapping = NULL;
319 /* Leave page->index set: truncation lookup relies on it */
320
321 /*
322 * Move to the next page in the vector if this is a regular
323 * page or the index is of the last sub-page of this compound
324 * page.
325 */
326 if (page->index + compound_nr(page) - 1 == xas.xa_index)
327 i++;
328 xas_store(&xas, NULL);
329 total_pages++;
330 }
331 mapping->nrpages -= total_pages;
332}
333
334void delete_from_page_cache_batch(struct address_space *mapping,
335 struct pagevec *pvec)
336{
337 int i;
338 unsigned long flags;
339
340 if (!pagevec_count(pvec))
341 return;
342
343 xa_lock_irqsave(&mapping->i_pages, flags);
344 for (i = 0; i < pagevec_count(pvec); i++) {
345 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
346
347 unaccount_page_cache_page(mapping, pvec->pages[i]);
348 }
349 page_cache_delete_batch(mapping, pvec);
350 xa_unlock_irqrestore(&mapping->i_pages, flags);
351
352 for (i = 0; i < pagevec_count(pvec); i++)
353 page_cache_free_page(mapping, pvec->pages[i]);
354}
355
356int filemap_check_errors(struct address_space *mapping)
357{
358 int ret = 0;
359 /* Check for outstanding write errors */
360 if (test_bit(AS_ENOSPC, &mapping->flags) &&
361 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
362 ret = -ENOSPC;
363 if (test_bit(AS_EIO, &mapping->flags) &&
364 test_and_clear_bit(AS_EIO, &mapping->flags))
365 ret = -EIO;
366 return ret;
367}
368EXPORT_SYMBOL(filemap_check_errors);
369
370static int filemap_check_and_keep_errors(struct address_space *mapping)
371{
372 /* Check for outstanding write errors */
373 if (test_bit(AS_EIO, &mapping->flags))
374 return -EIO;
375 if (test_bit(AS_ENOSPC, &mapping->flags))
376 return -ENOSPC;
377 return 0;
378}
379
380/**
381 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
382 * @mapping: address space structure to write
383 * @start: offset in bytes where the range starts
384 * @end: offset in bytes where the range ends (inclusive)
385 * @sync_mode: enable synchronous operation
386 *
387 * Start writeback against all of a mapping's dirty pages that lie
388 * within the byte offsets <start, end> inclusive.
389 *
390 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
391 * opposed to a regular memory cleansing writeback. The difference between
392 * these two operations is that if a dirty page/buffer is encountered, it must
393 * be waited upon, and not just skipped over.
394 *
395 * Return: %0 on success, negative error code otherwise.
396 */
397int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
398 loff_t end, int sync_mode)
399{
400 int ret;
401 struct writeback_control wbc = {
402 .sync_mode = sync_mode,
403 .nr_to_write = LONG_MAX,
404 .range_start = start,
405 .range_end = end,
406 };
407
408 if (!mapping_can_writeback(mapping) ||
409 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
410 return 0;
411
412 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
413 ret = do_writepages(mapping, &wbc);
414 wbc_detach_inode(&wbc);
415 return ret;
416}
417
418static inline int __filemap_fdatawrite(struct address_space *mapping,
419 int sync_mode)
420{
421 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
422}
423
424int filemap_fdatawrite(struct address_space *mapping)
425{
426 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
427}
428EXPORT_SYMBOL(filemap_fdatawrite);
429
430int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
431 loff_t end)
432{
433 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
434}
435EXPORT_SYMBOL(filemap_fdatawrite_range);
436
437/**
438 * filemap_flush - mostly a non-blocking flush
439 * @mapping: target address_space
440 *
441 * This is a mostly non-blocking flush. Not suitable for data-integrity
442 * purposes - I/O may not be started against all dirty pages.
443 *
444 * Return: %0 on success, negative error code otherwise.
445 */
446int filemap_flush(struct address_space *mapping)
447{
448 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
449}
450EXPORT_SYMBOL(filemap_flush);
451
452/**
453 * filemap_range_has_page - check if a page exists in range.
454 * @mapping: address space within which to check
455 * @start_byte: offset in bytes where the range starts
456 * @end_byte: offset in bytes where the range ends (inclusive)
457 *
458 * Find at least one page in the range supplied, usually used to check if
459 * direct writing in this range will trigger a writeback.
460 *
461 * Return: %true if at least one page exists in the specified range,
462 * %false otherwise.
463 */
464bool filemap_range_has_page(struct address_space *mapping,
465 loff_t start_byte, loff_t end_byte)
466{
467 struct page *page;
468 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
469 pgoff_t max = end_byte >> PAGE_SHIFT;
470
471 if (end_byte < start_byte)
472 return false;
473
474 rcu_read_lock();
475 for (;;) {
476 page = xas_find(&xas, max);
477 if (xas_retry(&xas, page))
478 continue;
479 /* Shadow entries don't count */
480 if (xa_is_value(page))
481 continue;
482 /*
483 * We don't need to try to pin this page; we're about to
484 * release the RCU lock anyway. It is enough to know that
485 * there was a page here recently.
486 */
487 break;
488 }
489 rcu_read_unlock();
490
491 return page != NULL;
492}
493EXPORT_SYMBOL(filemap_range_has_page);
494
495static void __filemap_fdatawait_range(struct address_space *mapping,
496 loff_t start_byte, loff_t end_byte)
497{
498 pgoff_t index = start_byte >> PAGE_SHIFT;
499 pgoff_t end = end_byte >> PAGE_SHIFT;
500 struct pagevec pvec;
501 int nr_pages;
502
503 if (end_byte < start_byte)
504 return;
505
506 pagevec_init(&pvec);
507 while (index <= end) {
508 unsigned i;
509
510 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
511 end, PAGECACHE_TAG_WRITEBACK);
512 if (!nr_pages)
513 break;
514
515 for (i = 0; i < nr_pages; i++) {
516 struct page *page = pvec.pages[i];
517
518 wait_on_page_writeback(page);
519 ClearPageError(page);
520 }
521 pagevec_release(&pvec);
522 cond_resched();
523 }
524}
525
526/**
527 * filemap_fdatawait_range - wait for writeback to complete
528 * @mapping: address space structure to wait for
529 * @start_byte: offset in bytes where the range starts
530 * @end_byte: offset in bytes where the range ends (inclusive)
531 *
532 * Walk the list of under-writeback pages of the given address space
533 * in the given range and wait for all of them. Check error status of
534 * the address space and return it.
535 *
536 * Since the error status of the address space is cleared by this function,
537 * callers are responsible for checking the return value and handling and/or
538 * reporting the error.
539 *
540 * Return: error status of the address space.
541 */
542int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
543 loff_t end_byte)
544{
545 __filemap_fdatawait_range(mapping, start_byte, end_byte);
546 return filemap_check_errors(mapping);
547}
548EXPORT_SYMBOL(filemap_fdatawait_range);
549
550/**
551 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
552 * @mapping: address space structure to wait for
553 * @start_byte: offset in bytes where the range starts
554 * @end_byte: offset in bytes where the range ends (inclusive)
555 *
556 * Walk the list of under-writeback pages of the given address space in the
557 * given range and wait for all of them. Unlike filemap_fdatawait_range(),
558 * this function does not clear error status of the address space.
559 *
560 * Use this function if callers don't handle errors themselves. Expected
561 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
562 * fsfreeze(8)
563 */
564int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
565 loff_t start_byte, loff_t end_byte)
566{
567 __filemap_fdatawait_range(mapping, start_byte, end_byte);
568 return filemap_check_and_keep_errors(mapping);
569}
570EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
571
572/**
573 * file_fdatawait_range - wait for writeback to complete
574 * @file: file pointing to address space structure to wait for
575 * @start_byte: offset in bytes where the range starts
576 * @end_byte: offset in bytes where the range ends (inclusive)
577 *
578 * Walk the list of under-writeback pages of the address space that file
579 * refers to, in the given range and wait for all of them. Check error
580 * status of the address space vs. the file->f_wb_err cursor and return it.
581 *
582 * Since the error status of the file is advanced by this function,
583 * callers are responsible for checking the return value and handling and/or
584 * reporting the error.
585 *
586 * Return: error status of the address space vs. the file->f_wb_err cursor.
587 */
588int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
589{
590 struct address_space *mapping = file->f_mapping;
591
592 __filemap_fdatawait_range(mapping, start_byte, end_byte);
593 return file_check_and_advance_wb_err(file);
594}
595EXPORT_SYMBOL(file_fdatawait_range);
596
597/**
598 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
599 * @mapping: address space structure to wait for
600 *
601 * Walk the list of under-writeback pages of the given address space
602 * and wait for all of them. Unlike filemap_fdatawait(), this function
603 * does not clear error status of the address space.
604 *
605 * Use this function if callers don't handle errors themselves. Expected
606 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
607 * fsfreeze(8)
608 *
609 * Return: error status of the address space.
610 */
611int filemap_fdatawait_keep_errors(struct address_space *mapping)
612{
613 __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
614 return filemap_check_and_keep_errors(mapping);
615}
616EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
617
618/* Returns true if writeback might be needed or already in progress. */
619static bool mapping_needs_writeback(struct address_space *mapping)
620{
621 return mapping->nrpages;
622}
623
624/**
625 * filemap_range_needs_writeback - check if range potentially needs writeback
626 * @mapping: address space within which to check
627 * @start_byte: offset in bytes where the range starts
628 * @end_byte: offset in bytes where the range ends (inclusive)
629 *
630 * Find at least one page in the range supplied, usually used to check if
631 * direct writing in this range will trigger a writeback. Used by O_DIRECT
632 * read/write with IOCB_NOWAIT, to see if the caller needs to do
633 * filemap_write_and_wait_range() before proceeding.
634 *
635 * Return: %true if the caller should do filemap_write_and_wait_range() before
636 * doing O_DIRECT to a page in this range, %false otherwise.
637 */
638bool filemap_range_needs_writeback(struct address_space *mapping,
639 loff_t start_byte, loff_t end_byte)
640{
641 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
642 pgoff_t max = end_byte >> PAGE_SHIFT;
643 struct page *page;
644
645 if (!mapping_needs_writeback(mapping))
646 return false;
647 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
648 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
649 return false;
650 if (end_byte < start_byte)
651 return false;
652
653 rcu_read_lock();
654 xas_for_each(&xas, page, max) {
655 if (xas_retry(&xas, page))
656 continue;
657 if (xa_is_value(page))
658 continue;
659 if (PageDirty(page) || PageLocked(page) || PageWriteback(page))
660 break;
661 }
662 rcu_read_unlock();
663 return page != NULL;
664}
665EXPORT_SYMBOL_GPL(filemap_range_needs_writeback);
666
667/**
668 * filemap_write_and_wait_range - write out & wait on a file range
669 * @mapping: the address_space for the pages
670 * @lstart: offset in bytes where the range starts
671 * @lend: offset in bytes where the range ends (inclusive)
672 *
673 * Write out and wait upon file offsets lstart->lend, inclusive.
674 *
675 * Note that @lend is inclusive (describes the last byte to be written) so
676 * that this function can be used to write to the very end-of-file (end = -1).
677 *
678 * Return: error status of the address space.
679 */
680int filemap_write_and_wait_range(struct address_space *mapping,
681 loff_t lstart, loff_t lend)
682{
683 int err = 0;
684
685 if (mapping_needs_writeback(mapping)) {
686 err = __filemap_fdatawrite_range(mapping, lstart, lend,
687 WB_SYNC_ALL);
688 /*
689 * Even if the above returned error, the pages may be
690 * written partially (e.g. -ENOSPC), so we wait for it.
691 * But the -EIO is special case, it may indicate the worst
692 * thing (e.g. bug) happened, so we avoid waiting for it.
693 */
694 if (err != -EIO) {
695 int err2 = filemap_fdatawait_range(mapping,
696 lstart, lend);
697 if (!err)
698 err = err2;
699 } else {
700 /* Clear any previously stored errors */
701 filemap_check_errors(mapping);
702 }
703 } else {
704 err = filemap_check_errors(mapping);
705 }
706 return err;
707}
708EXPORT_SYMBOL(filemap_write_and_wait_range);
709
710void __filemap_set_wb_err(struct address_space *mapping, int err)
711{
712 errseq_t eseq = errseq_set(&mapping->wb_err, err);
713
714 trace_filemap_set_wb_err(mapping, eseq);
715}
716EXPORT_SYMBOL(__filemap_set_wb_err);
717
718/**
719 * file_check_and_advance_wb_err - report wb error (if any) that was previously
720 * and advance wb_err to current one
721 * @file: struct file on which the error is being reported
722 *
723 * When userland calls fsync (or something like nfsd does the equivalent), we
724 * want to report any writeback errors that occurred since the last fsync (or
725 * since the file was opened if there haven't been any).
726 *
727 * Grab the wb_err from the mapping. If it matches what we have in the file,
728 * then just quickly return 0. The file is all caught up.
729 *
730 * If it doesn't match, then take the mapping value, set the "seen" flag in
731 * it and try to swap it into place. If it works, or another task beat us
732 * to it with the new value, then update the f_wb_err and return the error
733 * portion. The error at this point must be reported via proper channels
734 * (a'la fsync, or NFS COMMIT operation, etc.).
735 *
736 * While we handle mapping->wb_err with atomic operations, the f_wb_err
737 * value is protected by the f_lock since we must ensure that it reflects
738 * the latest value swapped in for this file descriptor.
739 *
740 * Return: %0 on success, negative error code otherwise.
741 */
742int file_check_and_advance_wb_err(struct file *file)
743{
744 int err = 0;
745 errseq_t old = READ_ONCE(file->f_wb_err);
746 struct address_space *mapping = file->f_mapping;
747
748 /* Locklessly handle the common case where nothing has changed */
749 if (errseq_check(&mapping->wb_err, old)) {
750 /* Something changed, must use slow path */
751 spin_lock(&file->f_lock);
752 old = file->f_wb_err;
753 err = errseq_check_and_advance(&mapping->wb_err,
754 &file->f_wb_err);
755 trace_file_check_and_advance_wb_err(file, old);
756 spin_unlock(&file->f_lock);
757 }
758
759 /*
760 * We're mostly using this function as a drop in replacement for
761 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
762 * that the legacy code would have had on these flags.
763 */
764 clear_bit(AS_EIO, &mapping->flags);
765 clear_bit(AS_ENOSPC, &mapping->flags);
766 return err;
767}
768EXPORT_SYMBOL(file_check_and_advance_wb_err);
769
770/**
771 * file_write_and_wait_range - write out & wait on a file range
772 * @file: file pointing to address_space with pages
773 * @lstart: offset in bytes where the range starts
774 * @lend: offset in bytes where the range ends (inclusive)
775 *
776 * Write out and wait upon file offsets lstart->lend, inclusive.
777 *
778 * Note that @lend is inclusive (describes the last byte to be written) so
779 * that this function can be used to write to the very end-of-file (end = -1).
780 *
781 * After writing out and waiting on the data, we check and advance the
782 * f_wb_err cursor to the latest value, and return any errors detected there.
783 *
784 * Return: %0 on success, negative error code otherwise.
785 */
786int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
787{
788 int err = 0, err2;
789 struct address_space *mapping = file->f_mapping;
790
791 if (mapping_needs_writeback(mapping)) {
792 err = __filemap_fdatawrite_range(mapping, lstart, lend,
793 WB_SYNC_ALL);
794 /* See comment of filemap_write_and_wait() */
795 if (err != -EIO)
796 __filemap_fdatawait_range(mapping, lstart, lend);
797 }
798 err2 = file_check_and_advance_wb_err(file);
799 if (!err)
800 err = err2;
801 return err;
802}
803EXPORT_SYMBOL(file_write_and_wait_range);
804
805/**
806 * replace_page_cache_page - replace a pagecache page with a new one
807 * @old: page to be replaced
808 * @new: page to replace with
809 *
810 * This function replaces a page in the pagecache with a new one. On
811 * success it acquires the pagecache reference for the new page and
812 * drops it for the old page. Both the old and new pages must be
813 * locked. This function does not add the new page to the LRU, the
814 * caller must do that.
815 *
816 * The remove + add is atomic. This function cannot fail.
817 */
818void replace_page_cache_page(struct page *old, struct page *new)
819{
820 struct address_space *mapping = old->mapping;
821 void (*freepage)(struct page *) = mapping->a_ops->freepage;
822 pgoff_t offset = old->index;
823 XA_STATE(xas, &mapping->i_pages, offset);
824 unsigned long flags;
825
826 VM_BUG_ON_PAGE(!PageLocked(old), old);
827 VM_BUG_ON_PAGE(!PageLocked(new), new);
828 VM_BUG_ON_PAGE(new->mapping, new);
829
830 get_page(new);
831 new->mapping = mapping;
832 new->index = offset;
833
834 mem_cgroup_migrate(old, new);
835
836 xas_lock_irqsave(&xas, flags);
837 xas_store(&xas, new);
838
839 old->mapping = NULL;
840 /* hugetlb pages do not participate in page cache accounting. */
841 if (!PageHuge(old))
842 __dec_lruvec_page_state(old, NR_FILE_PAGES);
843 if (!PageHuge(new))
844 __inc_lruvec_page_state(new, NR_FILE_PAGES);
845 if (PageSwapBacked(old))
846 __dec_lruvec_page_state(old, NR_SHMEM);
847 if (PageSwapBacked(new))
848 __inc_lruvec_page_state(new, NR_SHMEM);
849 xas_unlock_irqrestore(&xas, flags);
850 if (freepage)
851 freepage(old);
852 put_page(old);
853}
854EXPORT_SYMBOL_GPL(replace_page_cache_page);
855
856noinline int __add_to_page_cache_locked(struct page *page,
857 struct address_space *mapping,
858 pgoff_t offset, gfp_t gfp,
859 void **shadowp)
860{
861 XA_STATE(xas, &mapping->i_pages, offset);
862 int huge = PageHuge(page);
863 int error;
864 bool charged = false;
865
866 VM_BUG_ON_PAGE(!PageLocked(page), page);
867 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
868 mapping_set_update(&xas, mapping);
869
870 get_page(page);
871 page->mapping = mapping;
872 page->index = offset;
873
874 if (!huge) {
875 error = mem_cgroup_charge(page, NULL, gfp);
876 if (error)
877 goto error;
878 charged = true;
879 }
880
881 gfp &= GFP_RECLAIM_MASK;
882
883 do {
884 unsigned int order = xa_get_order(xas.xa, xas.xa_index);
885 void *entry, *old = NULL;
886
887 if (order > thp_order(page))
888 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
889 order, gfp);
890 xas_lock_irq(&xas);
891 xas_for_each_conflict(&xas, entry) {
892 old = entry;
893 if (!xa_is_value(entry)) {
894 xas_set_err(&xas, -EEXIST);
895 goto unlock;
896 }
897 }
898
899 if (old) {
900 if (shadowp)
901 *shadowp = old;
902 /* entry may have been split before we acquired lock */
903 order = xa_get_order(xas.xa, xas.xa_index);
904 if (order > thp_order(page)) {
905 xas_split(&xas, old, order);
906 xas_reset(&xas);
907 }
908 }
909
910 xas_store(&xas, page);
911 if (xas_error(&xas))
912 goto unlock;
913
914 mapping->nrpages++;
915
916 /* hugetlb pages do not participate in page cache accounting */
917 if (!huge)
918 __inc_lruvec_page_state(page, NR_FILE_PAGES);
919unlock:
920 xas_unlock_irq(&xas);
921 } while (xas_nomem(&xas, gfp));
922
923 if (xas_error(&xas)) {
924 error = xas_error(&xas);
925 if (charged)
926 mem_cgroup_uncharge(page);
927 goto error;
928 }
929
930 trace_mm_filemap_add_to_page_cache(page);
931 return 0;
932error:
933 page->mapping = NULL;
934 /* Leave page->index set: truncation relies upon it */
935 put_page(page);
936 return error;
937}
938ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
939
940/**
941 * add_to_page_cache_locked - add a locked page to the pagecache
942 * @page: page to add
943 * @mapping: the page's address_space
944 * @offset: page index
945 * @gfp_mask: page allocation mode
946 *
947 * This function is used to add a page to the pagecache. It must be locked.
948 * This function does not add the page to the LRU. The caller must do that.
949 *
950 * Return: %0 on success, negative error code otherwise.
951 */
952int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
953 pgoff_t offset, gfp_t gfp_mask)
954{
955 return __add_to_page_cache_locked(page, mapping, offset,
956 gfp_mask, NULL);
957}
958EXPORT_SYMBOL(add_to_page_cache_locked);
959
960int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
961 pgoff_t offset, gfp_t gfp_mask)
962{
963 void *shadow = NULL;
964 int ret;
965
966 __SetPageLocked(page);
967 ret = __add_to_page_cache_locked(page, mapping, offset,
968 gfp_mask, &shadow);
969 if (unlikely(ret))
970 __ClearPageLocked(page);
971 else {
972 /*
973 * The page might have been evicted from cache only
974 * recently, in which case it should be activated like
975 * any other repeatedly accessed page.
976 * The exception is pages getting rewritten; evicting other
977 * data from the working set, only to cache data that will
978 * get overwritten with something else, is a waste of memory.
979 */
980 WARN_ON_ONCE(PageActive(page));
981 if (!(gfp_mask & __GFP_WRITE) && shadow)
982 workingset_refault(page, shadow);
983 lru_cache_add(page);
984 }
985 return ret;
986}
987EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
988
989#ifdef CONFIG_NUMA
990struct page *__page_cache_alloc(gfp_t gfp)
991{
992 int n;
993 struct page *page;
994
995 if (cpuset_do_page_mem_spread()) {
996 unsigned int cpuset_mems_cookie;
997 do {
998 cpuset_mems_cookie = read_mems_allowed_begin();
999 n = cpuset_mem_spread_node();
1000 page = __alloc_pages_node(n, gfp, 0);
1001 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
1002
1003 return page;
1004 }
1005 return alloc_pages(gfp, 0);
1006}
1007EXPORT_SYMBOL(__page_cache_alloc);
1008#endif
1009
1010/*
1011 * In order to wait for pages to become available there must be
1012 * waitqueues associated with pages. By using a hash table of
1013 * waitqueues where the bucket discipline is to maintain all
1014 * waiters on the same queue and wake all when any of the pages
1015 * become available, and for the woken contexts to check to be
1016 * sure the appropriate page became available, this saves space
1017 * at a cost of "thundering herd" phenomena during rare hash
1018 * collisions.
1019 */
1020#define PAGE_WAIT_TABLE_BITS 8
1021#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1022static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1023
1024static wait_queue_head_t *page_waitqueue(struct page *page)
1025{
1026 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
1027}
1028
1029void __init pagecache_init(void)
1030{
1031 int i;
1032
1033 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1034 init_waitqueue_head(&page_wait_table[i]);
1035
1036 page_writeback_init();
1037}
1038
1039/*
1040 * The page wait code treats the "wait->flags" somewhat unusually, because
1041 * we have multiple different kinds of waits, not just the usual "exclusive"
1042 * one.
1043 *
1044 * We have:
1045 *
1046 * (a) no special bits set:
1047 *
1048 * We're just waiting for the bit to be released, and when a waker
1049 * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1050 * and remove it from the wait queue.
1051 *
1052 * Simple and straightforward.
1053 *
1054 * (b) WQ_FLAG_EXCLUSIVE:
1055 *
1056 * The waiter is waiting to get the lock, and only one waiter should
1057 * be woken up to avoid any thundering herd behavior. We'll set the
1058 * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1059 *
1060 * This is the traditional exclusive wait.
1061 *
1062 * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1063 *
1064 * The waiter is waiting to get the bit, and additionally wants the
1065 * lock to be transferred to it for fair lock behavior. If the lock
1066 * cannot be taken, we stop walking the wait queue without waking
1067 * the waiter.
1068 *
1069 * This is the "fair lock handoff" case, and in addition to setting
1070 * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1071 * that it now has the lock.
1072 */
1073static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1074{
1075 unsigned int flags;
1076 struct wait_page_key *key = arg;
1077 struct wait_page_queue *wait_page
1078 = container_of(wait, struct wait_page_queue, wait);
1079
1080 if (!wake_page_match(wait_page, key))
1081 return 0;
1082
1083 /*
1084 * If it's a lock handoff wait, we get the bit for it, and
1085 * stop walking (and do not wake it up) if we can't.
1086 */
1087 flags = wait->flags;
1088 if (flags & WQ_FLAG_EXCLUSIVE) {
1089 if (test_bit(key->bit_nr, &key->page->flags))
1090 return -1;
1091 if (flags & WQ_FLAG_CUSTOM) {
1092 if (test_and_set_bit(key->bit_nr, &key->page->flags))
1093 return -1;
1094 flags |= WQ_FLAG_DONE;
1095 }
1096 }
1097
1098 /*
1099 * We are holding the wait-queue lock, but the waiter that
1100 * is waiting for this will be checking the flags without
1101 * any locking.
1102 *
1103 * So update the flags atomically, and wake up the waiter
1104 * afterwards to avoid any races. This store-release pairs
1105 * with the load-acquire in wait_on_page_bit_common().
1106 */
1107 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1108 wake_up_state(wait->private, mode);
1109
1110 /*
1111 * Ok, we have successfully done what we're waiting for,
1112 * and we can unconditionally remove the wait entry.
1113 *
1114 * Note that this pairs with the "finish_wait()" in the
1115 * waiter, and has to be the absolute last thing we do.
1116 * After this list_del_init(&wait->entry) the wait entry
1117 * might be de-allocated and the process might even have
1118 * exited.
1119 */
1120 list_del_init_careful(&wait->entry);
1121 return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1122}
1123
1124static void wake_up_page_bit(struct page *page, int bit_nr)
1125{
1126 wait_queue_head_t *q = page_waitqueue(page);
1127 struct wait_page_key key;
1128 unsigned long flags;
1129 wait_queue_entry_t bookmark;
1130
1131 key.page = page;
1132 key.bit_nr = bit_nr;
1133 key.page_match = 0;
1134
1135 bookmark.flags = 0;
1136 bookmark.private = NULL;
1137 bookmark.func = NULL;
1138 INIT_LIST_HEAD(&bookmark.entry);
1139
1140 spin_lock_irqsave(&q->lock, flags);
1141 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1142
1143 while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1144 /*
1145 * Take a breather from holding the lock,
1146 * allow pages that finish wake up asynchronously
1147 * to acquire the lock and remove themselves
1148 * from wait queue
1149 */
1150 spin_unlock_irqrestore(&q->lock, flags);
1151 cpu_relax();
1152 spin_lock_irqsave(&q->lock, flags);
1153 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1154 }
1155
1156 /*
1157 * It is possible for other pages to have collided on the waitqueue
1158 * hash, so in that case check for a page match. That prevents a long-
1159 * term waiter
1160 *
1161 * It is still possible to miss a case here, when we woke page waiters
1162 * and removed them from the waitqueue, but there are still other
1163 * page waiters.
1164 */
1165 if (!waitqueue_active(q) || !key.page_match) {
1166 ClearPageWaiters(page);
1167 /*
1168 * It's possible to miss clearing Waiters here, when we woke
1169 * our page waiters, but the hashed waitqueue has waiters for
1170 * other pages on it.
1171 *
1172 * That's okay, it's a rare case. The next waker will clear it.
1173 */
1174 }
1175 spin_unlock_irqrestore(&q->lock, flags);
1176}
1177
1178static void wake_up_page(struct page *page, int bit)
1179{
1180 if (!PageWaiters(page))
1181 return;
1182 wake_up_page_bit(page, bit);
1183}
1184
1185/*
1186 * A choice of three behaviors for wait_on_page_bit_common():
1187 */
1188enum behavior {
1189 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
1190 * __lock_page() waiting on then setting PG_locked.
1191 */
1192 SHARED, /* Hold ref to page and check the bit when woken, like
1193 * wait_on_page_writeback() waiting on PG_writeback.
1194 */
1195 DROP, /* Drop ref to page before wait, no check when woken,
1196 * like put_and_wait_on_page_locked() on PG_locked.
1197 */
1198};
1199
1200/*
1201 * Attempt to check (or get) the page bit, and mark us done
1202 * if successful.
1203 */
1204static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
1205 struct wait_queue_entry *wait)
1206{
1207 if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1208 if (test_and_set_bit(bit_nr, &page->flags))
1209 return false;
1210 } else if (test_bit(bit_nr, &page->flags))
1211 return false;
1212
1213 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1214 return true;
1215}
1216
1217/* How many times do we accept lock stealing from under a waiter? */
1218int sysctl_page_lock_unfairness = 5;
1219
1220static inline int wait_on_page_bit_common(wait_queue_head_t *q,
1221 struct page *page, int bit_nr, int state, enum behavior behavior)
1222{
1223 int unfairness = sysctl_page_lock_unfairness;
1224 struct wait_page_queue wait_page;
1225 wait_queue_entry_t *wait = &wait_page.wait;
1226 bool thrashing = false;
1227 bool delayacct = false;
1228 unsigned long pflags;
1229
1230 if (bit_nr == PG_locked &&
1231 !PageUptodate(page) && PageWorkingset(page)) {
1232 if (!PageSwapBacked(page)) {
1233 delayacct_thrashing_start();
1234 delayacct = true;
1235 }
1236 psi_memstall_enter(&pflags);
1237 thrashing = true;
1238 }
1239
1240 init_wait(wait);
1241 wait->func = wake_page_function;
1242 wait_page.page = page;
1243 wait_page.bit_nr = bit_nr;
1244
1245repeat:
1246 wait->flags = 0;
1247 if (behavior == EXCLUSIVE) {
1248 wait->flags = WQ_FLAG_EXCLUSIVE;
1249 if (--unfairness < 0)
1250 wait->flags |= WQ_FLAG_CUSTOM;
1251 }
1252
1253 /*
1254 * Do one last check whether we can get the
1255 * page bit synchronously.
1256 *
1257 * Do the SetPageWaiters() marking before that
1258 * to let any waker we _just_ missed know they
1259 * need to wake us up (otherwise they'll never
1260 * even go to the slow case that looks at the
1261 * page queue), and add ourselves to the wait
1262 * queue if we need to sleep.
1263 *
1264 * This part needs to be done under the queue
1265 * lock to avoid races.
1266 */
1267 spin_lock_irq(&q->lock);
1268 SetPageWaiters(page);
1269 if (!trylock_page_bit_common(page, bit_nr, wait))
1270 __add_wait_queue_entry_tail(q, wait);
1271 spin_unlock_irq(&q->lock);
1272
1273 /*
1274 * From now on, all the logic will be based on
1275 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1276 * see whether the page bit testing has already
1277 * been done by the wake function.
1278 *
1279 * We can drop our reference to the page.
1280 */
1281 if (behavior == DROP)
1282 put_page(page);
1283
1284 /*
1285 * Note that until the "finish_wait()", or until
1286 * we see the WQ_FLAG_WOKEN flag, we need to
1287 * be very careful with the 'wait->flags', because
1288 * we may race with a waker that sets them.
1289 */
1290 for (;;) {
1291 unsigned int flags;
1292
1293 set_current_state(state);
1294
1295 /* Loop until we've been woken or interrupted */
1296 flags = smp_load_acquire(&wait->flags);
1297 if (!(flags & WQ_FLAG_WOKEN)) {
1298 if (signal_pending_state(state, current))
1299 break;
1300
1301 io_schedule();
1302 continue;
1303 }
1304
1305 /* If we were non-exclusive, we're done */
1306 if (behavior != EXCLUSIVE)
1307 break;
1308
1309 /* If the waker got the lock for us, we're done */
1310 if (flags & WQ_FLAG_DONE)
1311 break;
1312
1313 /*
1314 * Otherwise, if we're getting the lock, we need to
1315 * try to get it ourselves.
1316 *
1317 * And if that fails, we'll have to retry this all.
1318 */
1319 if (unlikely(test_and_set_bit(bit_nr, &page->flags)))
1320 goto repeat;
1321
1322 wait->flags |= WQ_FLAG_DONE;
1323 break;
1324 }
1325
1326 /*
1327 * If a signal happened, this 'finish_wait()' may remove the last
1328 * waiter from the wait-queues, but the PageWaiters bit will remain
1329 * set. That's ok. The next wakeup will take care of it, and trying
1330 * to do it here would be difficult and prone to races.
1331 */
1332 finish_wait(q, wait);
1333
1334 if (thrashing) {
1335 if (delayacct)
1336 delayacct_thrashing_end();
1337 psi_memstall_leave(&pflags);
1338 }
1339
1340 /*
1341 * NOTE! The wait->flags weren't stable until we've done the
1342 * 'finish_wait()', and we could have exited the loop above due
1343 * to a signal, and had a wakeup event happen after the signal
1344 * test but before the 'finish_wait()'.
1345 *
1346 * So only after the finish_wait() can we reliably determine
1347 * if we got woken up or not, so we can now figure out the final
1348 * return value based on that state without races.
1349 *
1350 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1351 * waiter, but an exclusive one requires WQ_FLAG_DONE.
1352 */
1353 if (behavior == EXCLUSIVE)
1354 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1355
1356 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1357}
1358
1359void wait_on_page_bit(struct page *page, int bit_nr)
1360{
1361 wait_queue_head_t *q = page_waitqueue(page);
1362 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1363}
1364EXPORT_SYMBOL(wait_on_page_bit);
1365
1366int wait_on_page_bit_killable(struct page *page, int bit_nr)
1367{
1368 wait_queue_head_t *q = page_waitqueue(page);
1369 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
1370}
1371EXPORT_SYMBOL(wait_on_page_bit_killable);
1372
1373/**
1374 * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
1375 * @page: The page to wait for.
1376 * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
1377 *
1378 * The caller should hold a reference on @page. They expect the page to
1379 * become unlocked relatively soon, but do not wish to hold up migration
1380 * (for example) by holding the reference while waiting for the page to
1381 * come unlocked. After this function returns, the caller should not
1382 * dereference @page.
1383 *
1384 * Return: 0 if the page was unlocked or -EINTR if interrupted by a signal.
1385 */
1386int put_and_wait_on_page_locked(struct page *page, int state)
1387{
1388 wait_queue_head_t *q;
1389
1390 page = compound_head(page);
1391 q = page_waitqueue(page);
1392 return wait_on_page_bit_common(q, page, PG_locked, state, DROP);
1393}
1394
1395/**
1396 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
1397 * @page: Page defining the wait queue of interest
1398 * @waiter: Waiter to add to the queue
1399 *
1400 * Add an arbitrary @waiter to the wait queue for the nominated @page.
1401 */
1402void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
1403{
1404 wait_queue_head_t *q = page_waitqueue(page);
1405 unsigned long flags;
1406
1407 spin_lock_irqsave(&q->lock, flags);
1408 __add_wait_queue_entry_tail(q, waiter);
1409 SetPageWaiters(page);
1410 spin_unlock_irqrestore(&q->lock, flags);
1411}
1412EXPORT_SYMBOL_GPL(add_page_wait_queue);
1413
1414#ifndef clear_bit_unlock_is_negative_byte
1415
1416/*
1417 * PG_waiters is the high bit in the same byte as PG_lock.
1418 *
1419 * On x86 (and on many other architectures), we can clear PG_lock and
1420 * test the sign bit at the same time. But if the architecture does
1421 * not support that special operation, we just do this all by hand
1422 * instead.
1423 *
1424 * The read of PG_waiters has to be after (or concurrently with) PG_locked
1425 * being cleared, but a memory barrier should be unnecessary since it is
1426 * in the same byte as PG_locked.
1427 */
1428static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1429{
1430 clear_bit_unlock(nr, mem);
1431 /* smp_mb__after_atomic(); */
1432 return test_bit(PG_waiters, mem);
1433}
1434
1435#endif
1436
1437/**
1438 * unlock_page - unlock a locked page
1439 * @page: the page
1440 *
1441 * Unlocks the page and wakes up sleepers in wait_on_page_locked().
1442 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
1443 * mechanism between PageLocked pages and PageWriteback pages is shared.
1444 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
1445 *
1446 * Note that this depends on PG_waiters being the sign bit in the byte
1447 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
1448 * clear the PG_locked bit and test PG_waiters at the same time fairly
1449 * portably (architectures that do LL/SC can test any bit, while x86 can
1450 * test the sign bit).
1451 */
1452void unlock_page(struct page *page)
1453{
1454 BUILD_BUG_ON(PG_waiters != 7);
1455 page = compound_head(page);
1456 VM_BUG_ON_PAGE(!PageLocked(page), page);
1457 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
1458 wake_up_page_bit(page, PG_locked);
1459}
1460EXPORT_SYMBOL(unlock_page);
1461
1462/**
1463 * end_page_private_2 - Clear PG_private_2 and release any waiters
1464 * @page: The page
1465 *
1466 * Clear the PG_private_2 bit on a page and wake up any sleepers waiting for
1467 * this. The page ref held for PG_private_2 being set is released.
1468 *
1469 * This is, for example, used when a netfs page is being written to a local
1470 * disk cache, thereby allowing writes to the cache for the same page to be
1471 * serialised.
1472 */
1473void end_page_private_2(struct page *page)
1474{
1475 page = compound_head(page);
1476 VM_BUG_ON_PAGE(!PagePrivate2(page), page);
1477 clear_bit_unlock(PG_private_2, &page->flags);
1478 wake_up_page_bit(page, PG_private_2);
1479 put_page(page);
1480}
1481EXPORT_SYMBOL(end_page_private_2);
1482
1483/**
1484 * wait_on_page_private_2 - Wait for PG_private_2 to be cleared on a page
1485 * @page: The page to wait on
1486 *
1487 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a page.
1488 */
1489void wait_on_page_private_2(struct page *page)
1490{
1491 page = compound_head(page);
1492 while (PagePrivate2(page))
1493 wait_on_page_bit(page, PG_private_2);
1494}
1495EXPORT_SYMBOL(wait_on_page_private_2);
1496
1497/**
1498 * wait_on_page_private_2_killable - Wait for PG_private_2 to be cleared on a page
1499 * @page: The page to wait on
1500 *
1501 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a page or until a
1502 * fatal signal is received by the calling task.
1503 *
1504 * Return:
1505 * - 0 if successful.
1506 * - -EINTR if a fatal signal was encountered.
1507 */
1508int wait_on_page_private_2_killable(struct page *page)
1509{
1510 int ret = 0;
1511
1512 page = compound_head(page);
1513 while (PagePrivate2(page)) {
1514 ret = wait_on_page_bit_killable(page, PG_private_2);
1515 if (ret < 0)
1516 break;
1517 }
1518
1519 return ret;
1520}
1521EXPORT_SYMBOL(wait_on_page_private_2_killable);
1522
1523/**
1524 * end_page_writeback - end writeback against a page
1525 * @page: the page
1526 */
1527void end_page_writeback(struct page *page)
1528{
1529 /*
1530 * TestClearPageReclaim could be used here but it is an atomic
1531 * operation and overkill in this particular case. Failing to
1532 * shuffle a page marked for immediate reclaim is too mild to
1533 * justify taking an atomic operation penalty at the end of
1534 * ever page writeback.
1535 */
1536 if (PageReclaim(page)) {
1537 ClearPageReclaim(page);
1538 rotate_reclaimable_page(page);
1539 }
1540
1541 /*
1542 * Writeback does not hold a page reference of its own, relying
1543 * on truncation to wait for the clearing of PG_writeback.
1544 * But here we must make sure that the page is not freed and
1545 * reused before the wake_up_page().
1546 */
1547 get_page(page);
1548 if (!test_clear_page_writeback(page))
1549 BUG();
1550
1551 smp_mb__after_atomic();
1552 wake_up_page(page, PG_writeback);
1553 put_page(page);
1554}
1555EXPORT_SYMBOL(end_page_writeback);
1556
1557/*
1558 * After completing I/O on a page, call this routine to update the page
1559 * flags appropriately
1560 */
1561void page_endio(struct page *page, bool is_write, int err)
1562{
1563 if (!is_write) {
1564 if (!err) {
1565 SetPageUptodate(page);
1566 } else {
1567 ClearPageUptodate(page);
1568 SetPageError(page);
1569 }
1570 unlock_page(page);
1571 } else {
1572 if (err) {
1573 struct address_space *mapping;
1574
1575 SetPageError(page);
1576 mapping = page_mapping(page);
1577 if (mapping)
1578 mapping_set_error(mapping, err);
1579 }
1580 end_page_writeback(page);
1581 }
1582}
1583EXPORT_SYMBOL_GPL(page_endio);
1584
1585/**
1586 * __lock_page - get a lock on the page, assuming we need to sleep to get it
1587 * @__page: the page to lock
1588 */
1589void __lock_page(struct page *__page)
1590{
1591 struct page *page = compound_head(__page);
1592 wait_queue_head_t *q = page_waitqueue(page);
1593 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
1594 EXCLUSIVE);
1595}
1596EXPORT_SYMBOL(__lock_page);
1597
1598int __lock_page_killable(struct page *__page)
1599{
1600 struct page *page = compound_head(__page);
1601 wait_queue_head_t *q = page_waitqueue(page);
1602 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
1603 EXCLUSIVE);
1604}
1605EXPORT_SYMBOL_GPL(__lock_page_killable);
1606
1607int __lock_page_async(struct page *page, struct wait_page_queue *wait)
1608{
1609 struct wait_queue_head *q = page_waitqueue(page);
1610 int ret = 0;
1611
1612 wait->page = page;
1613 wait->bit_nr = PG_locked;
1614
1615 spin_lock_irq(&q->lock);
1616 __add_wait_queue_entry_tail(q, &wait->wait);
1617 SetPageWaiters(page);
1618 ret = !trylock_page(page);
1619 /*
1620 * If we were successful now, we know we're still on the
1621 * waitqueue as we're still under the lock. This means it's
1622 * safe to remove and return success, we know the callback
1623 * isn't going to trigger.
1624 */
1625 if (!ret)
1626 __remove_wait_queue(q, &wait->wait);
1627 else
1628 ret = -EIOCBQUEUED;
1629 spin_unlock_irq(&q->lock);
1630 return ret;
1631}
1632
1633/*
1634 * Return values:
1635 * 1 - page is locked; mmap_lock is still held.
1636 * 0 - page is not locked.
1637 * mmap_lock has been released (mmap_read_unlock(), unless flags had both
1638 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1639 * which case mmap_lock is still held.
1640 *
1641 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
1642 * with the page locked and the mmap_lock unperturbed.
1643 */
1644int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
1645 unsigned int flags)
1646{
1647 if (fault_flag_allow_retry_first(flags)) {
1648 /*
1649 * CAUTION! In this case, mmap_lock is not released
1650 * even though return 0.
1651 */
1652 if (flags & FAULT_FLAG_RETRY_NOWAIT)
1653 return 0;
1654
1655 mmap_read_unlock(mm);
1656 if (flags & FAULT_FLAG_KILLABLE)
1657 wait_on_page_locked_killable(page);
1658 else
1659 wait_on_page_locked(page);
1660 return 0;
1661 }
1662 if (flags & FAULT_FLAG_KILLABLE) {
1663 int ret;
1664
1665 ret = __lock_page_killable(page);
1666 if (ret) {
1667 mmap_read_unlock(mm);
1668 return 0;
1669 }
1670 } else {
1671 __lock_page(page);
1672 }
1673 return 1;
1674
1675}
1676
1677/**
1678 * page_cache_next_miss() - Find the next gap in the page cache.
1679 * @mapping: Mapping.
1680 * @index: Index.
1681 * @max_scan: Maximum range to search.
1682 *
1683 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1684 * gap with the lowest index.
1685 *
1686 * This function may be called under the rcu_read_lock. However, this will
1687 * not atomically search a snapshot of the cache at a single point in time.
1688 * For example, if a gap is created at index 5, then subsequently a gap is
1689 * created at index 10, page_cache_next_miss covering both indices may
1690 * return 10 if called under the rcu_read_lock.
1691 *
1692 * Return: The index of the gap if found, otherwise an index outside the
1693 * range specified (in which case 'return - index >= max_scan' will be true).
1694 * In the rare case of index wrap-around, 0 will be returned.
1695 */
1696pgoff_t page_cache_next_miss(struct address_space *mapping,
1697 pgoff_t index, unsigned long max_scan)
1698{
1699 XA_STATE(xas, &mapping->i_pages, index);
1700
1701 while (max_scan--) {
1702 void *entry = xas_next(&xas);
1703 if (!entry || xa_is_value(entry))
1704 break;
1705 if (xas.xa_index == 0)
1706 break;
1707 }
1708
1709 return xas.xa_index;
1710}
1711EXPORT_SYMBOL(page_cache_next_miss);
1712
1713/**
1714 * page_cache_prev_miss() - Find the previous gap in the page cache.
1715 * @mapping: Mapping.
1716 * @index: Index.
1717 * @max_scan: Maximum range to search.
1718 *
1719 * Search the range [max(index - max_scan + 1, 0), index] for the
1720 * gap with the highest index.
1721 *
1722 * This function may be called under the rcu_read_lock. However, this will
1723 * not atomically search a snapshot of the cache at a single point in time.
1724 * For example, if a gap is created at index 10, then subsequently a gap is
1725 * created at index 5, page_cache_prev_miss() covering both indices may
1726 * return 5 if called under the rcu_read_lock.
1727 *
1728 * Return: The index of the gap if found, otherwise an index outside the
1729 * range specified (in which case 'index - return >= max_scan' will be true).
1730 * In the rare case of wrap-around, ULONG_MAX will be returned.
1731 */
1732pgoff_t page_cache_prev_miss(struct address_space *mapping,
1733 pgoff_t index, unsigned long max_scan)
1734{
1735 XA_STATE(xas, &mapping->i_pages, index);
1736
1737 while (max_scan--) {
1738 void *entry = xas_prev(&xas);
1739 if (!entry || xa_is_value(entry))
1740 break;
1741 if (xas.xa_index == ULONG_MAX)
1742 break;
1743 }
1744
1745 return xas.xa_index;
1746}
1747EXPORT_SYMBOL(page_cache_prev_miss);
1748
1749/*
1750 * mapping_get_entry - Get a page cache entry.
1751 * @mapping: the address_space to search
1752 * @index: The page cache index.
1753 *
1754 * Looks up the page cache slot at @mapping & @index. If there is a
1755 * page cache page, the head page is returned with an increased refcount.
1756 *
1757 * If the slot holds a shadow entry of a previously evicted page, or a
1758 * swap entry from shmem/tmpfs, it is returned.
1759 *
1760 * Return: The head page or shadow entry, %NULL if nothing is found.
1761 */
1762static struct page *mapping_get_entry(struct address_space *mapping,
1763 pgoff_t index)
1764{
1765 XA_STATE(xas, &mapping->i_pages, index);
1766 struct page *page;
1767
1768 rcu_read_lock();
1769repeat:
1770 xas_reset(&xas);
1771 page = xas_load(&xas);
1772 if (xas_retry(&xas, page))
1773 goto repeat;
1774 /*
1775 * A shadow entry of a recently evicted page, or a swap entry from
1776 * shmem/tmpfs. Return it without attempting to raise page count.
1777 */
1778 if (!page || xa_is_value(page))
1779 goto out;
1780
1781 if (!page_cache_get_speculative(page))
1782 goto repeat;
1783
1784 /*
1785 * Has the page moved or been split?
1786 * This is part of the lockless pagecache protocol. See
1787 * include/linux/pagemap.h for details.
1788 */
1789 if (unlikely(page != xas_reload(&xas))) {
1790 put_page(page);
1791 goto repeat;
1792 }
1793out:
1794 rcu_read_unlock();
1795
1796 return page;
1797}
1798
1799/**
1800 * pagecache_get_page - Find and get a reference to a page.
1801 * @mapping: The address_space to search.
1802 * @index: The page index.
1803 * @fgp_flags: %FGP flags modify how the page is returned.
1804 * @gfp_mask: Memory allocation flags to use if %FGP_CREAT is specified.
1805 *
1806 * Looks up the page cache entry at @mapping & @index.
1807 *
1808 * @fgp_flags can be zero or more of these flags:
1809 *
1810 * * %FGP_ACCESSED - The page will be marked accessed.
1811 * * %FGP_LOCK - The page is returned locked.
1812 * * %FGP_HEAD - If the page is present and a THP, return the head page
1813 * rather than the exact page specified by the index.
1814 * * %FGP_ENTRY - If there is a shadow / swap / DAX entry, return it
1815 * instead of allocating a new page to replace it.
1816 * * %FGP_CREAT - If no page is present then a new page is allocated using
1817 * @gfp_mask and added to the page cache and the VM's LRU list.
1818 * The page is returned locked and with an increased refcount.
1819 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
1820 * page is already in cache. If the page was allocated, unlock it before
1821 * returning so the caller can do the same dance.
1822 * * %FGP_WRITE - The page will be written
1823 * * %FGP_NOFS - __GFP_FS will get cleared in gfp mask
1824 * * %FGP_NOWAIT - Don't get blocked by page lock
1825 *
1826 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1827 * if the %GFP flags specified for %FGP_CREAT are atomic.
1828 *
1829 * If there is a page cache page, it is returned with an increased refcount.
1830 *
1831 * Return: The found page or %NULL otherwise.
1832 */
1833struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
1834 int fgp_flags, gfp_t gfp_mask)
1835{
1836 struct page *page;
1837
1838repeat:
1839 page = mapping_get_entry(mapping, index);
1840 if (xa_is_value(page)) {
1841 if (fgp_flags & FGP_ENTRY)
1842 return page;
1843 page = NULL;
1844 }
1845 if (!page)
1846 goto no_page;
1847
1848 if (fgp_flags & FGP_LOCK) {
1849 if (fgp_flags & FGP_NOWAIT) {
1850 if (!trylock_page(page)) {
1851 put_page(page);
1852 return NULL;
1853 }
1854 } else {
1855 lock_page(page);
1856 }
1857
1858 /* Has the page been truncated? */
1859 if (unlikely(page->mapping != mapping)) {
1860 unlock_page(page);
1861 put_page(page);
1862 goto repeat;
1863 }
1864 VM_BUG_ON_PAGE(!thp_contains(page, index), page);
1865 }
1866
1867 if (fgp_flags & FGP_ACCESSED)
1868 mark_page_accessed(page);
1869 else if (fgp_flags & FGP_WRITE) {
1870 /* Clear idle flag for buffer write */
1871 if (page_is_idle(page))
1872 clear_page_idle(page);
1873 }
1874 if (!(fgp_flags & FGP_HEAD))
1875 page = find_subpage(page, index);
1876
1877no_page:
1878 if (!page && (fgp_flags & FGP_CREAT)) {
1879 int err;
1880 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1881 gfp_mask |= __GFP_WRITE;
1882 if (fgp_flags & FGP_NOFS)
1883 gfp_mask &= ~__GFP_FS;
1884
1885 page = __page_cache_alloc(gfp_mask);
1886 if (!page)
1887 return NULL;
1888
1889 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1890 fgp_flags |= FGP_LOCK;
1891
1892 /* Init accessed so avoid atomic mark_page_accessed later */
1893 if (fgp_flags & FGP_ACCESSED)
1894 __SetPageReferenced(page);
1895
1896 err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
1897 if (unlikely(err)) {
1898 put_page(page);
1899 page = NULL;
1900 if (err == -EEXIST)
1901 goto repeat;
1902 }
1903
1904 /*
1905 * add_to_page_cache_lru locks the page, and for mmap we expect
1906 * an unlocked page.
1907 */
1908 if (page && (fgp_flags & FGP_FOR_MMAP))
1909 unlock_page(page);
1910 }
1911
1912 return page;
1913}
1914EXPORT_SYMBOL(pagecache_get_page);
1915
1916static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max,
1917 xa_mark_t mark)
1918{
1919 struct page *page;
1920
1921retry:
1922 if (mark == XA_PRESENT)
1923 page = xas_find(xas, max);
1924 else
1925 page = xas_find_marked(xas, max, mark);
1926
1927 if (xas_retry(xas, page))
1928 goto retry;
1929 /*
1930 * A shadow entry of a recently evicted page, a swap
1931 * entry from shmem/tmpfs or a DAX entry. Return it
1932 * without attempting to raise page count.
1933 */
1934 if (!page || xa_is_value(page))
1935 return page;
1936
1937 if (!page_cache_get_speculative(page))
1938 goto reset;
1939
1940 /* Has the page moved or been split? */
1941 if (unlikely(page != xas_reload(xas))) {
1942 put_page(page);
1943 goto reset;
1944 }
1945
1946 return page;
1947reset:
1948 xas_reset(xas);
1949 goto retry;
1950}
1951
1952/**
1953 * find_get_entries - gang pagecache lookup
1954 * @mapping: The address_space to search
1955 * @start: The starting page cache index
1956 * @end: The final page index (inclusive).
1957 * @pvec: Where the resulting entries are placed.
1958 * @indices: The cache indices corresponding to the entries in @entries
1959 *
1960 * find_get_entries() will search for and return a batch of entries in
1961 * the mapping. The entries are placed in @pvec. find_get_entries()
1962 * takes a reference on any actual pages it returns.
1963 *
1964 * The search returns a group of mapping-contiguous page cache entries
1965 * with ascending indexes. There may be holes in the indices due to
1966 * not-present pages.
1967 *
1968 * Any shadow entries of evicted pages, or swap entries from
1969 * shmem/tmpfs, are included in the returned array.
1970 *
1971 * If it finds a Transparent Huge Page, head or tail, find_get_entries()
1972 * stops at that page: the caller is likely to have a better way to handle
1973 * the compound page as a whole, and then skip its extent, than repeatedly
1974 * calling find_get_entries() to return all its tails.
1975 *
1976 * Return: the number of pages and shadow entries which were found.
1977 */
1978unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
1979 pgoff_t end, struct pagevec *pvec, pgoff_t *indices)
1980{
1981 XA_STATE(xas, &mapping->i_pages, start);
1982 struct page *page;
1983 unsigned int ret = 0;
1984 unsigned nr_entries = PAGEVEC_SIZE;
1985
1986 rcu_read_lock();
1987 while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
1988 /*
1989 * Terminate early on finding a THP, to allow the caller to
1990 * handle it all at once; but continue if this is hugetlbfs.
1991 */
1992 if (!xa_is_value(page) && PageTransHuge(page) &&
1993 !PageHuge(page)) {
1994 page = find_subpage(page, xas.xa_index);
1995 nr_entries = ret + 1;
1996 }
1997
1998 indices[ret] = xas.xa_index;
1999 pvec->pages[ret] = page;
2000 if (++ret == nr_entries)
2001 break;
2002 }
2003 rcu_read_unlock();
2004
2005 pvec->nr = ret;
2006 return ret;
2007}
2008
2009/**
2010 * find_lock_entries - Find a batch of pagecache entries.
2011 * @mapping: The address_space to search.
2012 * @start: The starting page cache index.
2013 * @end: The final page index (inclusive).
2014 * @pvec: Where the resulting entries are placed.
2015 * @indices: The cache indices of the entries in @pvec.
2016 *
2017 * find_lock_entries() will return a batch of entries from @mapping.
2018 * Swap, shadow and DAX entries are included. Pages are returned
2019 * locked and with an incremented refcount. Pages which are locked by
2020 * somebody else or under writeback are skipped. Only the head page of
2021 * a THP is returned. Pages which are partially outside the range are
2022 * not returned.
2023 *
2024 * The entries have ascending indexes. The indices may not be consecutive
2025 * due to not-present entries, THP pages, pages which could not be locked
2026 * or pages under writeback.
2027 *
2028 * Return: The number of entries which were found.
2029 */
2030unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
2031 pgoff_t end, struct pagevec *pvec, pgoff_t *indices)
2032{
2033 XA_STATE(xas, &mapping->i_pages, start);
2034 struct page *page;
2035
2036 rcu_read_lock();
2037 while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
2038 if (!xa_is_value(page)) {
2039 if (page->index < start)
2040 goto put;
2041 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2042 if (page->index + thp_nr_pages(page) - 1 > end)
2043 goto put;
2044 if (!trylock_page(page))
2045 goto put;
2046 if (page->mapping != mapping || PageWriteback(page))
2047 goto unlock;
2048 VM_BUG_ON_PAGE(!thp_contains(page, xas.xa_index),
2049 page);
2050 }
2051 indices[pvec->nr] = xas.xa_index;
2052 if (!pagevec_add(pvec, page))
2053 break;
2054 goto next;
2055unlock:
2056 unlock_page(page);
2057put:
2058 put_page(page);
2059next:
2060 if (!xa_is_value(page) && PageTransHuge(page)) {
2061 unsigned int nr_pages = thp_nr_pages(page);
2062
2063 /* Final THP may cross MAX_LFS_FILESIZE on 32-bit */
2064 xas_set(&xas, page->index + nr_pages);
2065 if (xas.xa_index < nr_pages)
2066 break;
2067 }
2068 }
2069 rcu_read_unlock();
2070
2071 return pagevec_count(pvec);
2072}
2073
2074/**
2075 * find_get_pages_range - gang pagecache lookup
2076 * @mapping: The address_space to search
2077 * @start: The starting page index
2078 * @end: The final page index (inclusive)
2079 * @nr_pages: The maximum number of pages
2080 * @pages: Where the resulting pages are placed
2081 *
2082 * find_get_pages_range() will search for and return a group of up to @nr_pages
2083 * pages in the mapping starting at index @start and up to index @end
2084 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes
2085 * a reference against the returned pages.
2086 *
2087 * The search returns a group of mapping-contiguous pages with ascending
2088 * indexes. There may be holes in the indices due to not-present pages.
2089 * We also update @start to index the next page for the traversal.
2090 *
2091 * Return: the number of pages which were found. If this number is
2092 * smaller than @nr_pages, the end of specified range has been
2093 * reached.
2094 */
2095unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
2096 pgoff_t end, unsigned int nr_pages,
2097 struct page **pages)
2098{
2099 XA_STATE(xas, &mapping->i_pages, *start);
2100 struct page *page;
2101 unsigned ret = 0;
2102
2103 if (unlikely(!nr_pages))
2104 return 0;
2105
2106 rcu_read_lock();
2107 while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
2108 /* Skip over shadow, swap and DAX entries */
2109 if (xa_is_value(page))
2110 continue;
2111
2112 pages[ret] = find_subpage(page, xas.xa_index);
2113 if (++ret == nr_pages) {
2114 *start = xas.xa_index + 1;
2115 goto out;
2116 }
2117 }
2118
2119 /*
2120 * We come here when there is no page beyond @end. We take care to not
2121 * overflow the index @start as it confuses some of the callers. This
2122 * breaks the iteration when there is a page at index -1 but that is
2123 * already broken anyway.
2124 */
2125 if (end == (pgoff_t)-1)
2126 *start = (pgoff_t)-1;
2127 else
2128 *start = end + 1;
2129out:
2130 rcu_read_unlock();
2131
2132 return ret;
2133}
2134
2135/**
2136 * find_get_pages_contig - gang contiguous pagecache lookup
2137 * @mapping: The address_space to search
2138 * @index: The starting page index
2139 * @nr_pages: The maximum number of pages
2140 * @pages: Where the resulting pages are placed
2141 *
2142 * find_get_pages_contig() works exactly like find_get_pages(), except
2143 * that the returned number of pages are guaranteed to be contiguous.
2144 *
2145 * Return: the number of pages which were found.
2146 */
2147unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
2148 unsigned int nr_pages, struct page **pages)
2149{
2150 XA_STATE(xas, &mapping->i_pages, index);
2151 struct page *page;
2152 unsigned int ret = 0;
2153
2154 if (unlikely(!nr_pages))
2155 return 0;
2156
2157 rcu_read_lock();
2158 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
2159 if (xas_retry(&xas, page))
2160 continue;
2161 /*
2162 * If the entry has been swapped out, we can stop looking.
2163 * No current caller is looking for DAX entries.
2164 */
2165 if (xa_is_value(page))
2166 break;
2167
2168 if (!page_cache_get_speculative(page))
2169 goto retry;
2170
2171 /* Has the page moved or been split? */
2172 if (unlikely(page != xas_reload(&xas)))
2173 goto put_page;
2174
2175 pages[ret] = find_subpage(page, xas.xa_index);
2176 if (++ret == nr_pages)
2177 break;
2178 continue;
2179put_page:
2180 put_page(page);
2181retry:
2182 xas_reset(&xas);
2183 }
2184 rcu_read_unlock();
2185 return ret;
2186}
2187EXPORT_SYMBOL(find_get_pages_contig);
2188
2189/**
2190 * find_get_pages_range_tag - Find and return head pages matching @tag.
2191 * @mapping: the address_space to search
2192 * @index: the starting page index
2193 * @end: The final page index (inclusive)
2194 * @tag: the tag index
2195 * @nr_pages: the maximum number of pages
2196 * @pages: where the resulting pages are placed
2197 *
2198 * Like find_get_pages(), except we only return head pages which are tagged
2199 * with @tag. @index is updated to the index immediately after the last
2200 * page we return, ready for the next iteration.
2201 *
2202 * Return: the number of pages which were found.
2203 */
2204unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
2205 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
2206 struct page **pages)
2207{
2208 XA_STATE(xas, &mapping->i_pages, *index);
2209 struct page *page;
2210 unsigned ret = 0;
2211
2212 if (unlikely(!nr_pages))
2213 return 0;
2214
2215 rcu_read_lock();
2216 while ((page = find_get_entry(&xas, end, tag))) {
2217 /*
2218 * Shadow entries should never be tagged, but this iteration
2219 * is lockless so there is a window for page reclaim to evict
2220 * a page we saw tagged. Skip over it.
2221 */
2222 if (xa_is_value(page))
2223 continue;
2224
2225 pages[ret] = page;
2226 if (++ret == nr_pages) {
2227 *index = page->index + thp_nr_pages(page);
2228 goto out;
2229 }
2230 }
2231
2232 /*
2233 * We come here when we got to @end. We take care to not overflow the
2234 * index @index as it confuses some of the callers. This breaks the
2235 * iteration when there is a page at index -1 but that is already
2236 * broken anyway.
2237 */
2238 if (end == (pgoff_t)-1)
2239 *index = (pgoff_t)-1;
2240 else
2241 *index = end + 1;
2242out:
2243 rcu_read_unlock();
2244
2245 return ret;
2246}
2247EXPORT_SYMBOL(find_get_pages_range_tag);
2248
2249/*
2250 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2251 * a _large_ part of the i/o request. Imagine the worst scenario:
2252 *
2253 * ---R__________________________________________B__________
2254 * ^ reading here ^ bad block(assume 4k)
2255 *
2256 * read(R) => miss => readahead(R...B) => media error => frustrating retries
2257 * => failing the whole request => read(R) => read(R+1) =>
2258 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2259 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2260 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2261 *
2262 * It is going insane. Fix it by quickly scaling down the readahead size.
2263 */
2264static void shrink_readahead_size_eio(struct file_ra_state *ra)
2265{
2266 ra->ra_pages /= 4;
2267}
2268
2269/*
2270 * filemap_get_read_batch - Get a batch of pages for read
2271 *
2272 * Get a batch of pages which represent a contiguous range of bytes
2273 * in the file. No tail pages will be returned. If @index is in the
2274 * middle of a THP, the entire THP will be returned. The last page in
2275 * the batch may have Readahead set or be not Uptodate so that the
2276 * caller can take the appropriate action.
2277 */
2278static void filemap_get_read_batch(struct address_space *mapping,
2279 pgoff_t index, pgoff_t max, struct pagevec *pvec)
2280{
2281 XA_STATE(xas, &mapping->i_pages, index);
2282 struct page *head;
2283
2284 rcu_read_lock();
2285 for (head = xas_load(&xas); head; head = xas_next(&xas)) {
2286 if (xas_retry(&xas, head))
2287 continue;
2288 if (xas.xa_index > max || xa_is_value(head))
2289 break;
2290 if (!page_cache_get_speculative(head))
2291 goto retry;
2292
2293 /* Has the page moved or been split? */
2294 if (unlikely(head != xas_reload(&xas)))
2295 goto put_page;
2296
2297 if (!pagevec_add(pvec, head))
2298 break;
2299 if (!PageUptodate(head))
2300 break;
2301 if (PageReadahead(head))
2302 break;
2303 xas.xa_index = head->index + thp_nr_pages(head) - 1;
2304 xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK;
2305 continue;
2306put_page:
2307 put_page(head);
2308retry:
2309 xas_reset(&xas);
2310 }
2311 rcu_read_unlock();
2312}
2313
2314static int filemap_read_page(struct file *file, struct address_space *mapping,
2315 struct page *page)
2316{
2317 int error;
2318
2319 /*
2320 * A previous I/O error may have been due to temporary failures,
2321 * eg. multipath errors. PG_error will be set again if readpage
2322 * fails.
2323 */
2324 ClearPageError(page);
2325 /* Start the actual read. The read will unlock the page. */
2326 error = mapping->a_ops->readpage(file, page);
2327 if (error)
2328 return error;
2329
2330 error = wait_on_page_locked_killable(page);
2331 if (error)
2332 return error;
2333 if (PageUptodate(page))
2334 return 0;
2335 shrink_readahead_size_eio(&file->f_ra);
2336 return -EIO;
2337}
2338
2339static bool filemap_range_uptodate(struct address_space *mapping,
2340 loff_t pos, struct iov_iter *iter, struct page *page)
2341{
2342 int count;
2343
2344 if (PageUptodate(page))
2345 return true;
2346 /* pipes can't handle partially uptodate pages */
2347 if (iov_iter_is_pipe(iter))
2348 return false;
2349 if (!mapping->a_ops->is_partially_uptodate)
2350 return false;
2351 if (mapping->host->i_blkbits >= (PAGE_SHIFT + thp_order(page)))
2352 return false;
2353
2354 count = iter->count;
2355 if (page_offset(page) > pos) {
2356 count -= page_offset(page) - pos;
2357 pos = 0;
2358 } else {
2359 pos -= page_offset(page);
2360 }
2361
2362 return mapping->a_ops->is_partially_uptodate(page, pos, count);
2363}
2364
2365static int filemap_update_page(struct kiocb *iocb,
2366 struct address_space *mapping, struct iov_iter *iter,
2367 struct page *page)
2368{
2369 int error;
2370
2371 if (!trylock_page(page)) {
2372 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
2373 return -EAGAIN;
2374 if (!(iocb->ki_flags & IOCB_WAITQ)) {
2375 put_and_wait_on_page_locked(page, TASK_KILLABLE);
2376 return AOP_TRUNCATED_PAGE;
2377 }
2378 error = __lock_page_async(page, iocb->ki_waitq);
2379 if (error)
2380 return error;
2381 }
2382
2383 if (!page->mapping)
2384 goto truncated;
2385
2386 error = 0;
2387 if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, page))
2388 goto unlock;
2389
2390 error = -EAGAIN;
2391 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
2392 goto unlock;
2393
2394 error = filemap_read_page(iocb->ki_filp, mapping, page);
2395 if (error == AOP_TRUNCATED_PAGE)
2396 put_page(page);
2397 return error;
2398truncated:
2399 unlock_page(page);
2400 put_page(page);
2401 return AOP_TRUNCATED_PAGE;
2402unlock:
2403 unlock_page(page);
2404 return error;
2405}
2406
2407static int filemap_create_page(struct file *file,
2408 struct address_space *mapping, pgoff_t index,
2409 struct pagevec *pvec)
2410{
2411 struct page *page;
2412 int error;
2413
2414 page = page_cache_alloc(mapping);
2415 if (!page)
2416 return -ENOMEM;
2417
2418 error = add_to_page_cache_lru(page, mapping, index,
2419 mapping_gfp_constraint(mapping, GFP_KERNEL));
2420 if (error == -EEXIST)
2421 error = AOP_TRUNCATED_PAGE;
2422 if (error)
2423 goto error;
2424
2425 error = filemap_read_page(file, mapping, page);
2426 if (error)
2427 goto error;
2428
2429 pagevec_add(pvec, page);
2430 return 0;
2431error:
2432 put_page(page);
2433 return error;
2434}
2435
2436static int filemap_readahead(struct kiocb *iocb, struct file *file,
2437 struct address_space *mapping, struct page *page,
2438 pgoff_t last_index)
2439{
2440 if (iocb->ki_flags & IOCB_NOIO)
2441 return -EAGAIN;
2442 page_cache_async_readahead(mapping, &file->f_ra, file, page,
2443 page->index, last_index - page->index);
2444 return 0;
2445}
2446
2447static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
2448 struct pagevec *pvec)
2449{
2450 struct file *filp = iocb->ki_filp;
2451 struct address_space *mapping = filp->f_mapping;
2452 struct file_ra_state *ra = &filp->f_ra;
2453 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2454 pgoff_t last_index;
2455 struct page *page;
2456 int err = 0;
2457
2458 last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE);
2459retry:
2460 if (fatal_signal_pending(current))
2461 return -EINTR;
2462
2463 filemap_get_read_batch(mapping, index, last_index, pvec);
2464 if (!pagevec_count(pvec)) {
2465 if (iocb->ki_flags & IOCB_NOIO)
2466 return -EAGAIN;
2467 page_cache_sync_readahead(mapping, ra, filp, index,
2468 last_index - index);
2469 filemap_get_read_batch(mapping, index, last_index, pvec);
2470 }
2471 if (!pagevec_count(pvec)) {
2472 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
2473 return -EAGAIN;
2474 err = filemap_create_page(filp, mapping,
2475 iocb->ki_pos >> PAGE_SHIFT, pvec);
2476 if (err == AOP_TRUNCATED_PAGE)
2477 goto retry;
2478 return err;
2479 }
2480
2481 page = pvec->pages[pagevec_count(pvec) - 1];
2482 if (PageReadahead(page)) {
2483 err = filemap_readahead(iocb, filp, mapping, page, last_index);
2484 if (err)
2485 goto err;
2486 }
2487 if (!PageUptodate(page)) {
2488 if ((iocb->ki_flags & IOCB_WAITQ) && pagevec_count(pvec) > 1)
2489 iocb->ki_flags |= IOCB_NOWAIT;
2490 err = filemap_update_page(iocb, mapping, iter, page);
2491 if (err)
2492 goto err;
2493 }
2494
2495 return 0;
2496err:
2497 if (err < 0)
2498 put_page(page);
2499 if (likely(--pvec->nr))
2500 return 0;
2501 if (err == AOP_TRUNCATED_PAGE)
2502 goto retry;
2503 return err;
2504}
2505
2506/**
2507 * filemap_read - Read data from the page cache.
2508 * @iocb: The iocb to read.
2509 * @iter: Destination for the data.
2510 * @already_read: Number of bytes already read by the caller.
2511 *
2512 * Copies data from the page cache. If the data is not currently present,
2513 * uses the readahead and readpage address_space operations to fetch it.
2514 *
2515 * Return: Total number of bytes copied, including those already read by
2516 * the caller. If an error happens before any bytes are copied, returns
2517 * a negative error number.
2518 */
2519ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2520 ssize_t already_read)
2521{
2522 struct file *filp = iocb->ki_filp;
2523 struct file_ra_state *ra = &filp->f_ra;
2524 struct address_space *mapping = filp->f_mapping;
2525 struct inode *inode = mapping->host;
2526 struct pagevec pvec;
2527 int i, error = 0;
2528 bool writably_mapped;
2529 loff_t isize, end_offset;
2530
2531 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
2532 return 0;
2533 if (unlikely(!iov_iter_count(iter)))
2534 return 0;
2535
2536 iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2537 pagevec_init(&pvec);
2538
2539 do {
2540 cond_resched();
2541
2542 /*
2543 * If we've already successfully copied some data, then we
2544 * can no longer safely return -EIOCBQUEUED. Hence mark
2545 * an async read NOWAIT at that point.
2546 */
2547 if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
2548 iocb->ki_flags |= IOCB_NOWAIT;
2549
2550 error = filemap_get_pages(iocb, iter, &pvec);
2551 if (error < 0)
2552 break;
2553
2554 /*
2555 * i_size must be checked after we know the pages are Uptodate.
2556 *
2557 * Checking i_size after the check allows us to calculate
2558 * the correct value for "nr", which means the zero-filled
2559 * part of the page is not copied back to userspace (unless
2560 * another truncate extends the file - this is desired though).
2561 */
2562 isize = i_size_read(inode);
2563 if (unlikely(iocb->ki_pos >= isize))
2564 goto put_pages;
2565 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
2566
2567 /*
2568 * Once we start copying data, we don't want to be touching any
2569 * cachelines that might be contended:
2570 */
2571 writably_mapped = mapping_writably_mapped(mapping);
2572
2573 /*
2574 * When a sequential read accesses a page several times, only
2575 * mark it as accessed the first time.
2576 */
2577 if (iocb->ki_pos >> PAGE_SHIFT !=
2578 ra->prev_pos >> PAGE_SHIFT)
2579 mark_page_accessed(pvec.pages[0]);
2580
2581 for (i = 0; i < pagevec_count(&pvec); i++) {
2582 struct page *page = pvec.pages[i];
2583 size_t page_size = thp_size(page);
2584 size_t offset = iocb->ki_pos & (page_size - 1);
2585 size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
2586 page_size - offset);
2587 size_t copied;
2588
2589 if (end_offset < page_offset(page))
2590 break;
2591 if (i > 0)
2592 mark_page_accessed(page);
2593 /*
2594 * If users can be writing to this page using arbitrary
2595 * virtual addresses, take care about potential aliasing
2596 * before reading the page on the kernel side.
2597 */
2598 if (writably_mapped) {
2599 int j;
2600
2601 for (j = 0; j < thp_nr_pages(page); j++)
2602 flush_dcache_page(page + j);
2603 }
2604
2605 copied = copy_page_to_iter(page, offset, bytes, iter);
2606
2607 already_read += copied;
2608 iocb->ki_pos += copied;
2609 ra->prev_pos = iocb->ki_pos;
2610
2611 if (copied < bytes) {
2612 error = -EFAULT;
2613 break;
2614 }
2615 }
2616put_pages:
2617 for (i = 0; i < pagevec_count(&pvec); i++)
2618 put_page(pvec.pages[i]);
2619 pagevec_reinit(&pvec);
2620 } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
2621
2622 file_accessed(filp);
2623
2624 return already_read ? already_read : error;
2625}
2626EXPORT_SYMBOL_GPL(filemap_read);
2627
2628/**
2629 * generic_file_read_iter - generic filesystem read routine
2630 * @iocb: kernel I/O control block
2631 * @iter: destination for the data read
2632 *
2633 * This is the "read_iter()" routine for all filesystems
2634 * that can use the page cache directly.
2635 *
2636 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2637 * be returned when no data can be read without waiting for I/O requests
2638 * to complete; it doesn't prevent readahead.
2639 *
2640 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2641 * requests shall be made for the read or for readahead. When no data
2642 * can be read, -EAGAIN shall be returned. When readahead would be
2643 * triggered, a partial, possibly empty read shall be returned.
2644 *
2645 * Return:
2646 * * number of bytes copied, even for partial reads
2647 * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2648 */
2649ssize_t
2650generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2651{
2652 size_t count = iov_iter_count(iter);
2653 ssize_t retval = 0;
2654
2655 if (!count)
2656 return 0; /* skip atime */
2657
2658 if (iocb->ki_flags & IOCB_DIRECT) {
2659 struct file *file = iocb->ki_filp;
2660 struct address_space *mapping = file->f_mapping;
2661 struct inode *inode = mapping->host;
2662 loff_t size;
2663
2664 size = i_size_read(inode);
2665 if (iocb->ki_flags & IOCB_NOWAIT) {
2666 if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
2667 iocb->ki_pos + count - 1))
2668 return -EAGAIN;
2669 } else {
2670 retval = filemap_write_and_wait_range(mapping,
2671 iocb->ki_pos,
2672 iocb->ki_pos + count - 1);
2673 if (retval < 0)
2674 return retval;
2675 }
2676
2677 file_accessed(file);
2678
2679 retval = mapping->a_ops->direct_IO(iocb, iter);
2680 if (retval >= 0) {
2681 iocb->ki_pos += retval;
2682 count -= retval;
2683 }
2684 if (retval != -EIOCBQUEUED)
2685 iov_iter_revert(iter, count - iov_iter_count(iter));
2686
2687 /*
2688 * Btrfs can have a short DIO read if we encounter
2689 * compressed extents, so if there was an error, or if
2690 * we've already read everything we wanted to, or if
2691 * there was a short read because we hit EOF, go ahead
2692 * and return. Otherwise fallthrough to buffered io for
2693 * the rest of the read. Buffered reads will not work for
2694 * DAX files, so don't bother trying.
2695 */
2696 if (retval < 0 || !count || iocb->ki_pos >= size ||
2697 IS_DAX(inode))
2698 return retval;
2699 }
2700
2701 return filemap_read(iocb, iter, retval);
2702}
2703EXPORT_SYMBOL(generic_file_read_iter);
2704
2705static inline loff_t page_seek_hole_data(struct xa_state *xas,
2706 struct address_space *mapping, struct page *page,
2707 loff_t start, loff_t end, bool seek_data)
2708{
2709 const struct address_space_operations *ops = mapping->a_ops;
2710 size_t offset, bsz = i_blocksize(mapping->host);
2711
2712 if (xa_is_value(page) || PageUptodate(page))
2713 return seek_data ? start : end;
2714 if (!ops->is_partially_uptodate)
2715 return seek_data ? end : start;
2716
2717 xas_pause(xas);
2718 rcu_read_unlock();
2719 lock_page(page);
2720 if (unlikely(page->mapping != mapping))
2721 goto unlock;
2722
2723 offset = offset_in_thp(page, start) & ~(bsz - 1);
2724
2725 do {
2726 if (ops->is_partially_uptodate(page, offset, bsz) == seek_data)
2727 break;
2728 start = (start + bsz) & ~(bsz - 1);
2729 offset += bsz;
2730 } while (offset < thp_size(page));
2731unlock:
2732 unlock_page(page);
2733 rcu_read_lock();
2734 return start;
2735}
2736
2737static inline
2738unsigned int seek_page_size(struct xa_state *xas, struct page *page)
2739{
2740 if (xa_is_value(page))
2741 return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
2742 return thp_size(page);
2743}
2744
2745/**
2746 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
2747 * @mapping: Address space to search.
2748 * @start: First byte to consider.
2749 * @end: Limit of search (exclusive).
2750 * @whence: Either SEEK_HOLE or SEEK_DATA.
2751 *
2752 * If the page cache knows which blocks contain holes and which blocks
2753 * contain data, your filesystem can use this function to implement
2754 * SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are
2755 * entirely memory-based such as tmpfs, and filesystems which support
2756 * unwritten extents.
2757 *
2758 * Return: The requested offset on success, or -ENXIO if @whence specifies
2759 * SEEK_DATA and there is no data after @start. There is an implicit hole
2760 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
2761 * and @end contain data.
2762 */
2763loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
2764 loff_t end, int whence)
2765{
2766 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
2767 pgoff_t max = (end - 1) >> PAGE_SHIFT;
2768 bool seek_data = (whence == SEEK_DATA);
2769 struct page *page;
2770
2771 if (end <= start)
2772 return -ENXIO;
2773
2774 rcu_read_lock();
2775 while ((page = find_get_entry(&xas, max, XA_PRESENT))) {
2776 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
2777 unsigned int seek_size;
2778
2779 if (start < pos) {
2780 if (!seek_data)
2781 goto unlock;
2782 start = pos;
2783 }
2784
2785 seek_size = seek_page_size(&xas, page);
2786 pos = round_up(pos + 1, seek_size);
2787 start = page_seek_hole_data(&xas, mapping, page, start, pos,
2788 seek_data);
2789 if (start < pos)
2790 goto unlock;
2791 if (start >= end)
2792 break;
2793 if (seek_size > PAGE_SIZE)
2794 xas_set(&xas, pos >> PAGE_SHIFT);
2795 if (!xa_is_value(page))
2796 put_page(page);
2797 }
2798 if (seek_data)
2799 start = -ENXIO;
2800unlock:
2801 rcu_read_unlock();
2802 if (page && !xa_is_value(page))
2803 put_page(page);
2804 if (start > end)
2805 return end;
2806 return start;
2807}
2808
2809#ifdef CONFIG_MMU
2810#define MMAP_LOTSAMISS (100)
2811/*
2812 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
2813 * @vmf - the vm_fault for this fault.
2814 * @page - the page to lock.
2815 * @fpin - the pointer to the file we may pin (or is already pinned).
2816 *
2817 * This works similar to lock_page_or_retry in that it can drop the mmap_lock.
2818 * It differs in that it actually returns the page locked if it returns 1 and 0
2819 * if it couldn't lock the page. If we did have to drop the mmap_lock then fpin
2820 * will point to the pinned file and needs to be fput()'ed at a later point.
2821 */
2822static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
2823 struct file **fpin)
2824{
2825 if (trylock_page(page))
2826 return 1;
2827
2828 /*
2829 * NOTE! This will make us return with VM_FAULT_RETRY, but with
2830 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
2831 * is supposed to work. We have way too many special cases..
2832 */
2833 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
2834 return 0;
2835
2836 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
2837 if (vmf->flags & FAULT_FLAG_KILLABLE) {
2838 if (__lock_page_killable(page)) {
2839 /*
2840 * We didn't have the right flags to drop the mmap_lock,
2841 * but all fault_handlers only check for fatal signals
2842 * if we return VM_FAULT_RETRY, so we need to drop the
2843 * mmap_lock here and return 0 if we don't have a fpin.
2844 */
2845 if (*fpin == NULL)
2846 mmap_read_unlock(vmf->vma->vm_mm);
2847 return 0;
2848 }
2849 } else
2850 __lock_page(page);
2851 return 1;
2852}
2853
2854
2855/*
2856 * Synchronous readahead happens when we don't even find a page in the page
2857 * cache at all. We don't want to perform IO under the mmap sem, so if we have
2858 * to drop the mmap sem we return the file that was pinned in order for us to do
2859 * that. If we didn't pin a file then we return NULL. The file that is
2860 * returned needs to be fput()'ed when we're done with it.
2861 */
2862static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
2863{
2864 struct file *file = vmf->vma->vm_file;
2865 struct file_ra_state *ra = &file->f_ra;
2866 struct address_space *mapping = file->f_mapping;
2867 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
2868 struct file *fpin = NULL;
2869 unsigned int mmap_miss;
2870
2871 /* If we don't want any read-ahead, don't bother */
2872 if (vmf->vma->vm_flags & VM_RAND_READ)
2873 return fpin;
2874 if (!ra->ra_pages)
2875 return fpin;
2876
2877 if (vmf->vma->vm_flags & VM_SEQ_READ) {
2878 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2879 page_cache_sync_ra(&ractl, ra->ra_pages);
2880 return fpin;
2881 }
2882
2883 /* Avoid banging the cache line if not needed */
2884 mmap_miss = READ_ONCE(ra->mmap_miss);
2885 if (mmap_miss < MMAP_LOTSAMISS * 10)
2886 WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
2887
2888 /*
2889 * Do we miss much more than hit in this file? If so,
2890 * stop bothering with read-ahead. It will only hurt.
2891 */
2892 if (mmap_miss > MMAP_LOTSAMISS)
2893 return fpin;
2894
2895 /*
2896 * mmap read-around
2897 */
2898 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2899 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
2900 ra->size = ra->ra_pages;
2901 ra->async_size = ra->ra_pages / 4;
2902 ractl._index = ra->start;
2903 do_page_cache_ra(&ractl, ra->size, ra->async_size);
2904 return fpin;
2905}
2906
2907/*
2908 * Asynchronous readahead happens when we find the page and PG_readahead,
2909 * so we want to possibly extend the readahead further. We return the file that
2910 * was pinned if we have to drop the mmap_lock in order to do IO.
2911 */
2912static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
2913 struct page *page)
2914{
2915 struct file *file = vmf->vma->vm_file;
2916 struct file_ra_state *ra = &file->f_ra;
2917 struct address_space *mapping = file->f_mapping;
2918 struct file *fpin = NULL;
2919 unsigned int mmap_miss;
2920 pgoff_t offset = vmf->pgoff;
2921
2922 /* If we don't want any read-ahead, don't bother */
2923 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
2924 return fpin;
2925 mmap_miss = READ_ONCE(ra->mmap_miss);
2926 if (mmap_miss)
2927 WRITE_ONCE(ra->mmap_miss, --mmap_miss);
2928 if (PageReadahead(page)) {
2929 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2930 page_cache_async_readahead(mapping, ra, file,
2931 page, offset, ra->ra_pages);
2932 }
2933 return fpin;
2934}
2935
2936/**
2937 * filemap_fault - read in file data for page fault handling
2938 * @vmf: struct vm_fault containing details of the fault
2939 *
2940 * filemap_fault() is invoked via the vma operations vector for a
2941 * mapped memory region to read in file data during a page fault.
2942 *
2943 * The goto's are kind of ugly, but this streamlines the normal case of having
2944 * it in the page cache, and handles the special cases reasonably without
2945 * having a lot of duplicated code.
2946 *
2947 * vma->vm_mm->mmap_lock must be held on entry.
2948 *
2949 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
2950 * may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
2951 *
2952 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
2953 * has not been released.
2954 *
2955 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
2956 *
2957 * Return: bitwise-OR of %VM_FAULT_ codes.
2958 */
2959vm_fault_t filemap_fault(struct vm_fault *vmf)
2960{
2961 int error;
2962 struct file *file = vmf->vma->vm_file;
2963 struct file *fpin = NULL;
2964 struct address_space *mapping = file->f_mapping;
2965 struct inode *inode = mapping->host;
2966 pgoff_t offset = vmf->pgoff;
2967 pgoff_t max_off;
2968 struct page *page;
2969 vm_fault_t ret = 0;
2970
2971 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2972 if (unlikely(offset >= max_off))
2973 return VM_FAULT_SIGBUS;
2974
2975 /*
2976 * Do we have something in the page cache already?
2977 */
2978 page = find_get_page(mapping, offset);
2979 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
2980 /*
2981 * We found the page, so try async readahead before
2982 * waiting for the lock.
2983 */
2984 fpin = do_async_mmap_readahead(vmf, page);
2985 } else if (!page) {
2986 /* No page in the page cache at all */
2987 count_vm_event(PGMAJFAULT);
2988 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
2989 ret = VM_FAULT_MAJOR;
2990 fpin = do_sync_mmap_readahead(vmf);
2991retry_find:
2992 page = pagecache_get_page(mapping, offset,
2993 FGP_CREAT|FGP_FOR_MMAP,
2994 vmf->gfp_mask);
2995 if (!page) {
2996 if (fpin)
2997 goto out_retry;
2998 return VM_FAULT_OOM;
2999 }
3000 }
3001
3002 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin))
3003 goto out_retry;
3004
3005 /* Did it get truncated? */
3006 if (unlikely(compound_head(page)->mapping != mapping)) {
3007 unlock_page(page);
3008 put_page(page);
3009 goto retry_find;
3010 }
3011 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
3012
3013 /*
3014 * We have a locked page in the page cache, now we need to check
3015 * that it's up-to-date. If not, it is going to be due to an error.
3016 */
3017 if (unlikely(!PageUptodate(page)))
3018 goto page_not_uptodate;
3019
3020 /*
3021 * We've made it this far and we had to drop our mmap_lock, now is the
3022 * time to return to the upper layer and have it re-find the vma and
3023 * redo the fault.
3024 */
3025 if (fpin) {
3026 unlock_page(page);
3027 goto out_retry;
3028 }
3029
3030 /*
3031 * Found the page and have a reference on it.
3032 * We must recheck i_size under page lock.
3033 */
3034 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3035 if (unlikely(offset >= max_off)) {
3036 unlock_page(page);
3037 put_page(page);
3038 return VM_FAULT_SIGBUS;
3039 }
3040
3041 vmf->page = page;
3042 return ret | VM_FAULT_LOCKED;
3043
3044page_not_uptodate:
3045 /*
3046 * Umm, take care of errors if the page isn't up-to-date.
3047 * Try to re-read it _once_. We do this synchronously,
3048 * because there really aren't any performance issues here
3049 * and we need to check for errors.
3050 */
3051 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3052 error = filemap_read_page(file, mapping, page);
3053 if (fpin)
3054 goto out_retry;
3055 put_page(page);
3056
3057 if (!error || error == AOP_TRUNCATED_PAGE)
3058 goto retry_find;
3059
3060 return VM_FAULT_SIGBUS;
3061
3062out_retry:
3063 /*
3064 * We dropped the mmap_lock, we need to return to the fault handler to
3065 * re-find the vma and come back and find our hopefully still populated
3066 * page.
3067 */
3068 if (page)
3069 put_page(page);
3070 if (fpin)
3071 fput(fpin);
3072 return ret | VM_FAULT_RETRY;
3073}
3074EXPORT_SYMBOL(filemap_fault);
3075
3076static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
3077{
3078 struct mm_struct *mm = vmf->vma->vm_mm;
3079
3080 /* Huge page is mapped? No need to proceed. */
3081 if (pmd_trans_huge(*vmf->pmd)) {
3082 unlock_page(page);
3083 put_page(page);
3084 return true;
3085 }
3086
3087 if (pmd_none(*vmf->pmd) && PageTransHuge(page)) {
3088 vm_fault_t ret = do_set_pmd(vmf, page);
3089 if (!ret) {
3090 /* The page is mapped successfully, reference consumed. */
3091 unlock_page(page);
3092 return true;
3093 }
3094 }
3095
3096 if (pmd_none(*vmf->pmd)) {
3097 vmf->ptl = pmd_lock(mm, vmf->pmd);
3098 if (likely(pmd_none(*vmf->pmd))) {
3099 mm_inc_nr_ptes(mm);
3100 pmd_populate(mm, vmf->pmd, vmf->prealloc_pte);
3101 vmf->prealloc_pte = NULL;
3102 }
3103 spin_unlock(vmf->ptl);
3104 }
3105
3106 /* See comment in handle_pte_fault() */
3107 if (pmd_devmap_trans_unstable(vmf->pmd)) {
3108 unlock_page(page);
3109 put_page(page);
3110 return true;
3111 }
3112
3113 return false;
3114}
3115
3116static struct page *next_uptodate_page(struct page *page,
3117 struct address_space *mapping,
3118 struct xa_state *xas, pgoff_t end_pgoff)
3119{
3120 unsigned long max_idx;
3121
3122 do {
3123 if (!page)
3124 return NULL;
3125 if (xas_retry(xas, page))
3126 continue;
3127 if (xa_is_value(page))
3128 continue;
3129 if (PageLocked(page))
3130 continue;
3131 if (!page_cache_get_speculative(page))
3132 continue;
3133 /* Has the page moved or been split? */
3134 if (unlikely(page != xas_reload(xas)))
3135 goto skip;
3136 if (!PageUptodate(page) || PageReadahead(page))
3137 goto skip;
3138 if (PageHWPoison(page))
3139 goto skip;
3140 if (!trylock_page(page))
3141 goto skip;
3142 if (page->mapping != mapping)
3143 goto unlock;
3144 if (!PageUptodate(page))
3145 goto unlock;
3146 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3147 if (xas->xa_index >= max_idx)
3148 goto unlock;
3149 return page;
3150unlock:
3151 unlock_page(page);
3152skip:
3153 put_page(page);
3154 } while ((page = xas_next_entry(xas, end_pgoff)) != NULL);
3155
3156 return NULL;
3157}
3158
3159static inline struct page *first_map_page(struct address_space *mapping,
3160 struct xa_state *xas,
3161 pgoff_t end_pgoff)
3162{
3163 return next_uptodate_page(xas_find(xas, end_pgoff),
3164 mapping, xas, end_pgoff);
3165}
3166
3167static inline struct page *next_map_page(struct address_space *mapping,
3168 struct xa_state *xas,
3169 pgoff_t end_pgoff)
3170{
3171 return next_uptodate_page(xas_next_entry(xas, end_pgoff),
3172 mapping, xas, end_pgoff);
3173}
3174
3175vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3176 pgoff_t start_pgoff, pgoff_t end_pgoff)
3177{
3178 struct vm_area_struct *vma = vmf->vma;
3179 struct file *file = vma->vm_file;
3180 struct address_space *mapping = file->f_mapping;
3181 pgoff_t last_pgoff = start_pgoff;
3182 unsigned long addr;
3183 XA_STATE(xas, &mapping->i_pages, start_pgoff);
3184 struct page *head, *page;
3185 unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
3186 vm_fault_t ret = 0;
3187
3188 rcu_read_lock();
3189 head = first_map_page(mapping, &xas, end_pgoff);
3190 if (!head)
3191 goto out;
3192
3193 if (filemap_map_pmd(vmf, head)) {
3194 ret = VM_FAULT_NOPAGE;
3195 goto out;
3196 }
3197
3198 addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3199 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3200 do {
3201 page = find_subpage(head, xas.xa_index);
3202 if (PageHWPoison(page))
3203 goto unlock;
3204
3205 if (mmap_miss > 0)
3206 mmap_miss--;
3207
3208 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3209 vmf->pte += xas.xa_index - last_pgoff;
3210 last_pgoff = xas.xa_index;
3211
3212 if (!pte_none(*vmf->pte))
3213 goto unlock;
3214
3215 /* We're about to handle the fault */
3216 if (vmf->address == addr)
3217 ret = VM_FAULT_NOPAGE;
3218
3219 do_set_pte(vmf, page, addr);
3220 /* no need to invalidate: a not-present page won't be cached */
3221 update_mmu_cache(vma, addr, vmf->pte);
3222 unlock_page(head);
3223 continue;
3224unlock:
3225 unlock_page(head);
3226 put_page(head);
3227 } while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL);
3228 pte_unmap_unlock(vmf->pte, vmf->ptl);
3229out:
3230 rcu_read_unlock();
3231 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
3232 return ret;
3233}
3234EXPORT_SYMBOL(filemap_map_pages);
3235
3236vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3237{
3238 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3239 struct page *page = vmf->page;
3240 vm_fault_t ret = VM_FAULT_LOCKED;
3241
3242 sb_start_pagefault(mapping->host->i_sb);
3243 file_update_time(vmf->vma->vm_file);
3244 lock_page(page);
3245 if (page->mapping != mapping) {
3246 unlock_page(page);
3247 ret = VM_FAULT_NOPAGE;
3248 goto out;
3249 }
3250 /*
3251 * We mark the page dirty already here so that when freeze is in
3252 * progress, we are guaranteed that writeback during freezing will
3253 * see the dirty page and writeprotect it again.
3254 */
3255 set_page_dirty(page);
3256 wait_for_stable_page(page);
3257out:
3258 sb_end_pagefault(mapping->host->i_sb);
3259 return ret;
3260}
3261
3262const struct vm_operations_struct generic_file_vm_ops = {
3263 .fault = filemap_fault,
3264 .map_pages = filemap_map_pages,
3265 .page_mkwrite = filemap_page_mkwrite,
3266};
3267
3268/* This is used for a general mmap of a disk file */
3269
3270int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3271{
3272 struct address_space *mapping = file->f_mapping;
3273
3274 if (!mapping->a_ops->readpage)
3275 return -ENOEXEC;
3276 file_accessed(file);
3277 vma->vm_ops = &generic_file_vm_ops;
3278 return 0;
3279}
3280
3281/*
3282 * This is for filesystems which do not implement ->writepage.
3283 */
3284int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3285{
3286 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
3287 return -EINVAL;
3288 return generic_file_mmap(file, vma);
3289}
3290#else
3291vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3292{
3293 return VM_FAULT_SIGBUS;
3294}
3295int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3296{
3297 return -ENOSYS;
3298}
3299int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3300{
3301 return -ENOSYS;
3302}
3303#endif /* CONFIG_MMU */
3304
3305EXPORT_SYMBOL(filemap_page_mkwrite);
3306EXPORT_SYMBOL(generic_file_mmap);
3307EXPORT_SYMBOL(generic_file_readonly_mmap);
3308
3309static struct page *wait_on_page_read(struct page *page)
3310{
3311 if (!IS_ERR(page)) {
3312 wait_on_page_locked(page);
3313 if (!PageUptodate(page)) {
3314 put_page(page);
3315 page = ERR_PTR(-EIO);
3316 }
3317 }
3318 return page;
3319}
3320
3321static struct page *do_read_cache_page(struct address_space *mapping,
3322 pgoff_t index,
3323 int (*filler)(void *, struct page *),
3324 void *data,
3325 gfp_t gfp)
3326{
3327 struct page *page;
3328 int err;
3329repeat:
3330 page = find_get_page(mapping, index);
3331 if (!page) {
3332 page = __page_cache_alloc(gfp);
3333 if (!page)
3334 return ERR_PTR(-ENOMEM);
3335 err = add_to_page_cache_lru(page, mapping, index, gfp);
3336 if (unlikely(err)) {
3337 put_page(page);
3338 if (err == -EEXIST)
3339 goto repeat;
3340 /* Presumably ENOMEM for xarray node */
3341 return ERR_PTR(err);
3342 }
3343
3344filler:
3345 if (filler)
3346 err = filler(data, page);
3347 else
3348 err = mapping->a_ops->readpage(data, page);
3349
3350 if (err < 0) {
3351 put_page(page);
3352 return ERR_PTR(err);
3353 }
3354
3355 page = wait_on_page_read(page);
3356 if (IS_ERR(page))
3357 return page;
3358 goto out;
3359 }
3360 if (PageUptodate(page))
3361 goto out;
3362
3363 /*
3364 * Page is not up to date and may be locked due to one of the following
3365 * case a: Page is being filled and the page lock is held
3366 * case b: Read/write error clearing the page uptodate status
3367 * case c: Truncation in progress (page locked)
3368 * case d: Reclaim in progress
3369 *
3370 * Case a, the page will be up to date when the page is unlocked.
3371 * There is no need to serialise on the page lock here as the page
3372 * is pinned so the lock gives no additional protection. Even if the
3373 * page is truncated, the data is still valid if PageUptodate as
3374 * it's a race vs truncate race.
3375 * Case b, the page will not be up to date
3376 * Case c, the page may be truncated but in itself, the data may still
3377 * be valid after IO completes as it's a read vs truncate race. The
3378 * operation must restart if the page is not uptodate on unlock but
3379 * otherwise serialising on page lock to stabilise the mapping gives
3380 * no additional guarantees to the caller as the page lock is
3381 * released before return.
3382 * Case d, similar to truncation. If reclaim holds the page lock, it
3383 * will be a race with remove_mapping that determines if the mapping
3384 * is valid on unlock but otherwise the data is valid and there is
3385 * no need to serialise with page lock.
3386 *
3387 * As the page lock gives no additional guarantee, we optimistically
3388 * wait on the page to be unlocked and check if it's up to date and
3389 * use the page if it is. Otherwise, the page lock is required to
3390 * distinguish between the different cases. The motivation is that we
3391 * avoid spurious serialisations and wakeups when multiple processes
3392 * wait on the same page for IO to complete.
3393 */
3394 wait_on_page_locked(page);
3395 if (PageUptodate(page))
3396 goto out;
3397
3398 /* Distinguish between all the cases under the safety of the lock */
3399 lock_page(page);
3400
3401 /* Case c or d, restart the operation */
3402 if (!page->mapping) {
3403 unlock_page(page);
3404 put_page(page);
3405 goto repeat;
3406 }
3407
3408 /* Someone else locked and filled the page in a very small window */
3409 if (PageUptodate(page)) {
3410 unlock_page(page);
3411 goto out;
3412 }
3413
3414 /*
3415 * A previous I/O error may have been due to temporary
3416 * failures.
3417 * Clear page error before actual read, PG_error will be
3418 * set again if read page fails.
3419 */
3420 ClearPageError(page);
3421 goto filler;
3422
3423out:
3424 mark_page_accessed(page);
3425 return page;
3426}
3427
3428/**
3429 * read_cache_page - read into page cache, fill it if needed
3430 * @mapping: the page's address_space
3431 * @index: the page index
3432 * @filler: function to perform the read
3433 * @data: first arg to filler(data, page) function, often left as NULL
3434 *
3435 * Read into the page cache. If a page already exists, and PageUptodate() is
3436 * not set, try to fill the page and wait for it to become unlocked.
3437 *
3438 * If the page does not get brought uptodate, return -EIO.
3439 *
3440 * Return: up to date page on success, ERR_PTR() on failure.
3441 */
3442struct page *read_cache_page(struct address_space *mapping,
3443 pgoff_t index,
3444 int (*filler)(void *, struct page *),
3445 void *data)
3446{
3447 return do_read_cache_page(mapping, index, filler, data,
3448 mapping_gfp_mask(mapping));
3449}
3450EXPORT_SYMBOL(read_cache_page);
3451
3452/**
3453 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3454 * @mapping: the page's address_space
3455 * @index: the page index
3456 * @gfp: the page allocator flags to use if allocating
3457 *
3458 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3459 * any new page allocations done using the specified allocation flags.
3460 *
3461 * If the page does not get brought uptodate, return -EIO.
3462 *
3463 * Return: up to date page on success, ERR_PTR() on failure.
3464 */
3465struct page *read_cache_page_gfp(struct address_space *mapping,
3466 pgoff_t index,
3467 gfp_t gfp)
3468{
3469 return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3470}
3471EXPORT_SYMBOL(read_cache_page_gfp);
3472
3473int pagecache_write_begin(struct file *file, struct address_space *mapping,
3474 loff_t pos, unsigned len, unsigned flags,
3475 struct page **pagep, void **fsdata)
3476{
3477 const struct address_space_operations *aops = mapping->a_ops;
3478
3479 return aops->write_begin(file, mapping, pos, len, flags,
3480 pagep, fsdata);
3481}
3482EXPORT_SYMBOL(pagecache_write_begin);
3483
3484int pagecache_write_end(struct file *file, struct address_space *mapping,
3485 loff_t pos, unsigned len, unsigned copied,
3486 struct page *page, void *fsdata)
3487{
3488 const struct address_space_operations *aops = mapping->a_ops;
3489
3490 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
3491}
3492EXPORT_SYMBOL(pagecache_write_end);
3493
3494/*
3495 * Warn about a page cache invalidation failure during a direct I/O write.
3496 */
3497void dio_warn_stale_pagecache(struct file *filp)
3498{
3499 static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3500 char pathname[128];
3501 char *path;
3502
3503 errseq_set(&filp->f_mapping->wb_err, -EIO);
3504 if (__ratelimit(&_rs)) {
3505 path = file_path(filp, pathname, sizeof(pathname));
3506 if (IS_ERR(path))
3507 path = "(unknown)";
3508 pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
3509 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3510 current->comm);
3511 }
3512}
3513
3514ssize_t
3515generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3516{
3517 struct file *file = iocb->ki_filp;
3518 struct address_space *mapping = file->f_mapping;
3519 struct inode *inode = mapping->host;
3520 loff_t pos = iocb->ki_pos;
3521 ssize_t written;
3522 size_t write_len;
3523 pgoff_t end;
3524
3525 write_len = iov_iter_count(from);
3526 end = (pos + write_len - 1) >> PAGE_SHIFT;
3527
3528 if (iocb->ki_flags & IOCB_NOWAIT) {
3529 /* If there are pages to writeback, return */
3530 if (filemap_range_has_page(file->f_mapping, pos,
3531 pos + write_len - 1))
3532 return -EAGAIN;
3533 } else {
3534 written = filemap_write_and_wait_range(mapping, pos,
3535 pos + write_len - 1);
3536 if (written)
3537 goto out;
3538 }
3539
3540 /*
3541 * After a write we want buffered reads to be sure to go to disk to get
3542 * the new data. We invalidate clean cached page from the region we're
3543 * about to write. We do this *before* the write so that we can return
3544 * without clobbering -EIOCBQUEUED from ->direct_IO().
3545 */
3546 written = invalidate_inode_pages2_range(mapping,
3547 pos >> PAGE_SHIFT, end);
3548 /*
3549 * If a page can not be invalidated, return 0 to fall back
3550 * to buffered write.
3551 */
3552 if (written) {
3553 if (written == -EBUSY)
3554 return 0;
3555 goto out;
3556 }
3557
3558 written = mapping->a_ops->direct_IO(iocb, from);
3559
3560 /*
3561 * Finally, try again to invalidate clean pages which might have been
3562 * cached by non-direct readahead, or faulted in by get_user_pages()
3563 * if the source of the write was an mmap'ed region of the file
3564 * we're writing. Either one is a pretty crazy thing to do,
3565 * so we don't support it 100%. If this invalidation
3566 * fails, tough, the write still worked...
3567 *
3568 * Most of the time we do not need this since dio_complete() will do
3569 * the invalidation for us. However there are some file systems that
3570 * do not end up with dio_complete() being called, so let's not break
3571 * them by removing it completely.
3572 *
3573 * Noticeable example is a blkdev_direct_IO().
3574 *
3575 * Skip invalidation for async writes or if mapping has no pages.
3576 */
3577 if (written > 0 && mapping->nrpages &&
3578 invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end))
3579 dio_warn_stale_pagecache(file);
3580
3581 if (written > 0) {
3582 pos += written;
3583 write_len -= written;
3584 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3585 i_size_write(inode, pos);
3586 mark_inode_dirty(inode);
3587 }
3588 iocb->ki_pos = pos;
3589 }
3590 if (written != -EIOCBQUEUED)
3591 iov_iter_revert(from, write_len - iov_iter_count(from));
3592out:
3593 return written;
3594}
3595EXPORT_SYMBOL(generic_file_direct_write);
3596
3597/*
3598 * Find or create a page at the given pagecache position. Return the locked
3599 * page. This function is specifically for buffered writes.
3600 */
3601struct page *grab_cache_page_write_begin(struct address_space *mapping,
3602 pgoff_t index, unsigned flags)
3603{
3604 struct page *page;
3605 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
3606
3607 if (flags & AOP_FLAG_NOFS)
3608 fgp_flags |= FGP_NOFS;
3609
3610 page = pagecache_get_page(mapping, index, fgp_flags,
3611 mapping_gfp_mask(mapping));
3612 if (page)
3613 wait_for_stable_page(page);
3614
3615 return page;
3616}
3617EXPORT_SYMBOL(grab_cache_page_write_begin);
3618
3619ssize_t generic_perform_write(struct file *file,
3620 struct iov_iter *i, loff_t pos)
3621{
3622 struct address_space *mapping = file->f_mapping;
3623 const struct address_space_operations *a_ops = mapping->a_ops;
3624 long status = 0;
3625 ssize_t written = 0;
3626 unsigned int flags = 0;
3627
3628 do {
3629 struct page *page;
3630 unsigned long offset; /* Offset into pagecache page */
3631 unsigned long bytes; /* Bytes to write to page */
3632 size_t copied; /* Bytes copied from user */
3633 void *fsdata;
3634
3635 offset = (pos & (PAGE_SIZE - 1));
3636 bytes = min_t(unsigned long, PAGE_SIZE - offset,
3637 iov_iter_count(i));
3638
3639again:
3640 /*
3641 * Bring in the user page that we will copy from _first_.
3642 * Otherwise there's a nasty deadlock on copying from the
3643 * same page as we're writing to, without it being marked
3644 * up-to-date.
3645 */
3646 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
3647 status = -EFAULT;
3648 break;
3649 }
3650
3651 if (fatal_signal_pending(current)) {
3652 status = -EINTR;
3653 break;
3654 }
3655
3656 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
3657 &page, &fsdata);
3658 if (unlikely(status < 0))
3659 break;
3660
3661 if (mapping_writably_mapped(mapping))
3662 flush_dcache_page(page);
3663
3664 copied = copy_page_from_iter_atomic(page, offset, bytes, i);
3665 flush_dcache_page(page);
3666
3667 status = a_ops->write_end(file, mapping, pos, bytes, copied,
3668 page, fsdata);
3669 if (unlikely(status != copied)) {
3670 iov_iter_revert(i, copied - max(status, 0L));
3671 if (unlikely(status < 0))
3672 break;
3673 }
3674 cond_resched();
3675
3676 if (unlikely(status == 0)) {
3677 /*
3678 * A short copy made ->write_end() reject the
3679 * thing entirely. Might be memory poisoning
3680 * halfway through, might be a race with munmap,
3681 * might be severe memory pressure.
3682 */
3683 if (copied)
3684 bytes = copied;
3685 goto again;
3686 }
3687 pos += status;
3688 written += status;
3689
3690 balance_dirty_pages_ratelimited(mapping);
3691 } while (iov_iter_count(i));
3692
3693 return written ? written : status;
3694}
3695EXPORT_SYMBOL(generic_perform_write);
3696
3697/**
3698 * __generic_file_write_iter - write data to a file
3699 * @iocb: IO state structure (file, offset, etc.)
3700 * @from: iov_iter with data to write
3701 *
3702 * This function does all the work needed for actually writing data to a
3703 * file. It does all basic checks, removes SUID from the file, updates
3704 * modification times and calls proper subroutines depending on whether we
3705 * do direct IO or a standard buffered write.
3706 *
3707 * It expects i_mutex to be grabbed unless we work on a block device or similar
3708 * object which does not need locking at all.
3709 *
3710 * This function does *not* take care of syncing data in case of O_SYNC write.
3711 * A caller has to handle it. This is mainly due to the fact that we want to
3712 * avoid syncing under i_mutex.
3713 *
3714 * Return:
3715 * * number of bytes written, even for truncated writes
3716 * * negative error code if no data has been written at all
3717 */
3718ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3719{
3720 struct file *file = iocb->ki_filp;
3721 struct address_space *mapping = file->f_mapping;
3722 struct inode *inode = mapping->host;
3723 ssize_t written = 0;
3724 ssize_t err;
3725 ssize_t status;
3726
3727 /* We can write back this queue in page reclaim */
3728 current->backing_dev_info = inode_to_bdi(inode);
3729 err = file_remove_privs(file);
3730 if (err)
3731 goto out;
3732
3733 err = file_update_time(file);
3734 if (err)
3735 goto out;
3736
3737 if (iocb->ki_flags & IOCB_DIRECT) {
3738 loff_t pos, endbyte;
3739
3740 written = generic_file_direct_write(iocb, from);
3741 /*
3742 * If the write stopped short of completing, fall back to
3743 * buffered writes. Some filesystems do this for writes to
3744 * holes, for example. For DAX files, a buffered write will
3745 * not succeed (even if it did, DAX does not handle dirty
3746 * page-cache pages correctly).
3747 */
3748 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
3749 goto out;
3750
3751 status = generic_perform_write(file, from, pos = iocb->ki_pos);
3752 /*
3753 * If generic_perform_write() returned a synchronous error
3754 * then we want to return the number of bytes which were
3755 * direct-written, or the error code if that was zero. Note
3756 * that this differs from normal direct-io semantics, which
3757 * will return -EFOO even if some bytes were written.
3758 */
3759 if (unlikely(status < 0)) {
3760 err = status;
3761 goto out;
3762 }
3763 /*
3764 * We need to ensure that the page cache pages are written to
3765 * disk and invalidated to preserve the expected O_DIRECT
3766 * semantics.
3767 */
3768 endbyte = pos + status - 1;
3769 err = filemap_write_and_wait_range(mapping, pos, endbyte);
3770 if (err == 0) {
3771 iocb->ki_pos = endbyte + 1;
3772 written += status;
3773 invalidate_mapping_pages(mapping,
3774 pos >> PAGE_SHIFT,
3775 endbyte >> PAGE_SHIFT);
3776 } else {
3777 /*
3778 * We don't know how much we wrote, so just return
3779 * the number of bytes which were direct-written
3780 */
3781 }
3782 } else {
3783 written = generic_perform_write(file, from, iocb->ki_pos);
3784 if (likely(written > 0))
3785 iocb->ki_pos += written;
3786 }
3787out:
3788 current->backing_dev_info = NULL;
3789 return written ? written : err;
3790}
3791EXPORT_SYMBOL(__generic_file_write_iter);
3792
3793/**
3794 * generic_file_write_iter - write data to a file
3795 * @iocb: IO state structure
3796 * @from: iov_iter with data to write
3797 *
3798 * This is a wrapper around __generic_file_write_iter() to be used by most
3799 * filesystems. It takes care of syncing the file in case of O_SYNC file
3800 * and acquires i_mutex as needed.
3801 * Return:
3802 * * negative error code if no data has been written at all of
3803 * vfs_fsync_range() failed for a synchronous write
3804 * * number of bytes written, even for truncated writes
3805 */
3806ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3807{
3808 struct file *file = iocb->ki_filp;
3809 struct inode *inode = file->f_mapping->host;
3810 ssize_t ret;
3811
3812 inode_lock(inode);
3813 ret = generic_write_checks(iocb, from);
3814 if (ret > 0)
3815 ret = __generic_file_write_iter(iocb, from);
3816 inode_unlock(inode);
3817
3818 if (ret > 0)
3819 ret = generic_write_sync(iocb, ret);
3820 return ret;
3821}
3822EXPORT_SYMBOL(generic_file_write_iter);
3823
3824/**
3825 * try_to_release_page() - release old fs-specific metadata on a page
3826 *
3827 * @page: the page which the kernel is trying to free
3828 * @gfp_mask: memory allocation flags (and I/O mode)
3829 *
3830 * The address_space is to try to release any data against the page
3831 * (presumably at page->private).
3832 *
3833 * This may also be called if PG_fscache is set on a page, indicating that the
3834 * page is known to the local caching routines.
3835 *
3836 * The @gfp_mask argument specifies whether I/O may be performed to release
3837 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3838 *
3839 * Return: %1 if the release was successful, otherwise return zero.
3840 */
3841int try_to_release_page(struct page *page, gfp_t gfp_mask)
3842{
3843 struct address_space * const mapping = page->mapping;
3844
3845 BUG_ON(!PageLocked(page));
3846 if (PageWriteback(page))
3847 return 0;
3848
3849 if (mapping && mapping->a_ops->releasepage)
3850 return mapping->a_ops->releasepage(page, gfp_mask);
3851 return try_to_free_buffers(page);
3852}
3853
3854EXPORT_SYMBOL(try_to_release_page);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/filemap.c
4 *
5 * Copyright (C) 1994-1999 Linus Torvalds
6 */
7
8/*
9 * This file handles the generic file mmap semantics used by
10 * most "normal" filesystems (but you don't /have/ to use this:
11 * the NFS filesystem used to do this differently, for example)
12 */
13#include <linux/export.h>
14#include <linux/compiler.h>
15#include <linux/dax.h>
16#include <linux/fs.h>
17#include <linux/sched/signal.h>
18#include <linux/uaccess.h>
19#include <linux/capability.h>
20#include <linux/kernel_stat.h>
21#include <linux/gfp.h>
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/mman.h>
25#include <linux/pagemap.h>
26#include <linux/file.h>
27#include <linux/uio.h>
28#include <linux/error-injection.h>
29#include <linux/hash.h>
30#include <linux/writeback.h>
31#include <linux/backing-dev.h>
32#include <linux/pagevec.h>
33#include <linux/blkdev.h>
34#include <linux/security.h>
35#include <linux/cpuset.h>
36#include <linux/hugetlb.h>
37#include <linux/memcontrol.h>
38#include <linux/cleancache.h>
39#include <linux/shmem_fs.h>
40#include <linux/rmap.h>
41#include <linux/delayacct.h>
42#include <linux/psi.h>
43#include <linux/ramfs.h>
44#include "internal.h"
45
46#define CREATE_TRACE_POINTS
47#include <trace/events/filemap.h>
48
49/*
50 * FIXME: remove all knowledge of the buffer layer from the core VM
51 */
52#include <linux/buffer_head.h> /* for try_to_free_buffers */
53
54#include <asm/mman.h>
55
56/*
57 * Shared mappings implemented 30.11.1994. It's not fully working yet,
58 * though.
59 *
60 * Shared mappings now work. 15.8.1995 Bruno.
61 *
62 * finished 'unifying' the page and buffer cache and SMP-threaded the
63 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
64 *
65 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
66 */
67
68/*
69 * Lock ordering:
70 *
71 * ->i_mmap_rwsem (truncate_pagecache)
72 * ->private_lock (__free_pte->__set_page_dirty_buffers)
73 * ->swap_lock (exclusive_swap_page, others)
74 * ->i_pages lock
75 *
76 * ->i_mutex
77 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
78 *
79 * ->mmap_sem
80 * ->i_mmap_rwsem
81 * ->page_table_lock or pte_lock (various, mainly in memory.c)
82 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
83 *
84 * ->mmap_sem
85 * ->lock_page (access_process_vm)
86 *
87 * ->i_mutex (generic_perform_write)
88 * ->mmap_sem (fault_in_pages_readable->do_page_fault)
89 *
90 * bdi->wb.list_lock
91 * sb_lock (fs/fs-writeback.c)
92 * ->i_pages lock (__sync_single_inode)
93 *
94 * ->i_mmap_rwsem
95 * ->anon_vma.lock (vma_adjust)
96 *
97 * ->anon_vma.lock
98 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
99 *
100 * ->page_table_lock or pte_lock
101 * ->swap_lock (try_to_unmap_one)
102 * ->private_lock (try_to_unmap_one)
103 * ->i_pages lock (try_to_unmap_one)
104 * ->pgdat->lru_lock (follow_page->mark_page_accessed)
105 * ->pgdat->lru_lock (check_pte_range->isolate_lru_page)
106 * ->private_lock (page_remove_rmap->set_page_dirty)
107 * ->i_pages lock (page_remove_rmap->set_page_dirty)
108 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
109 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
110 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
111 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
112 * ->inode->i_lock (zap_pte_range->set_page_dirty)
113 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
114 *
115 * ->i_mmap_rwsem
116 * ->tasklist_lock (memory_failure, collect_procs_ao)
117 */
118
119static void page_cache_delete(struct address_space *mapping,
120 struct page *page, void *shadow)
121{
122 XA_STATE(xas, &mapping->i_pages, page->index);
123 unsigned int nr = 1;
124
125 mapping_set_update(&xas, mapping);
126
127 /* hugetlb pages are represented by a single entry in the xarray */
128 if (!PageHuge(page)) {
129 xas_set_order(&xas, page->index, compound_order(page));
130 nr = compound_nr(page);
131 }
132
133 VM_BUG_ON_PAGE(!PageLocked(page), page);
134 VM_BUG_ON_PAGE(PageTail(page), page);
135 VM_BUG_ON_PAGE(nr != 1 && shadow, page);
136
137 xas_store(&xas, shadow);
138 xas_init_marks(&xas);
139
140 page->mapping = NULL;
141 /* Leave page->index set: truncation lookup relies upon it */
142
143 if (shadow) {
144 mapping->nrexceptional += nr;
145 /*
146 * Make sure the nrexceptional update is committed before
147 * the nrpages update so that final truncate racing
148 * with reclaim does not see both counters 0 at the
149 * same time and miss a shadow entry.
150 */
151 smp_wmb();
152 }
153 mapping->nrpages -= nr;
154}
155
156static void unaccount_page_cache_page(struct address_space *mapping,
157 struct page *page)
158{
159 int nr;
160
161 /*
162 * if we're uptodate, flush out into the cleancache, otherwise
163 * invalidate any existing cleancache entries. We can't leave
164 * stale data around in the cleancache once our page is gone
165 */
166 if (PageUptodate(page) && PageMappedToDisk(page))
167 cleancache_put_page(page);
168 else
169 cleancache_invalidate_page(mapping, page);
170
171 VM_BUG_ON_PAGE(PageTail(page), page);
172 VM_BUG_ON_PAGE(page_mapped(page), page);
173 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
174 int mapcount;
175
176 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
177 current->comm, page_to_pfn(page));
178 dump_page(page, "still mapped when deleted");
179 dump_stack();
180 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
181
182 mapcount = page_mapcount(page);
183 if (mapping_exiting(mapping) &&
184 page_count(page) >= mapcount + 2) {
185 /*
186 * All vmas have already been torn down, so it's
187 * a good bet that actually the page is unmapped,
188 * and we'd prefer not to leak it: if we're wrong,
189 * some other bad page check should catch it later.
190 */
191 page_mapcount_reset(page);
192 page_ref_sub(page, mapcount);
193 }
194 }
195
196 /* hugetlb pages do not participate in page cache accounting. */
197 if (PageHuge(page))
198 return;
199
200 nr = hpage_nr_pages(page);
201
202 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
203 if (PageSwapBacked(page)) {
204 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
205 if (PageTransHuge(page))
206 __dec_node_page_state(page, NR_SHMEM_THPS);
207 } else if (PageTransHuge(page)) {
208 __dec_node_page_state(page, NR_FILE_THPS);
209 filemap_nr_thps_dec(mapping);
210 }
211
212 /*
213 * At this point page must be either written or cleaned by
214 * truncate. Dirty page here signals a bug and loss of
215 * unwritten data.
216 *
217 * This fixes dirty accounting after removing the page entirely
218 * but leaves PageDirty set: it has no effect for truncated
219 * page and anyway will be cleared before returning page into
220 * buddy allocator.
221 */
222 if (WARN_ON_ONCE(PageDirty(page)))
223 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
224}
225
226/*
227 * Delete a page from the page cache and free it. Caller has to make
228 * sure the page is locked and that nobody else uses it - or that usage
229 * is safe. The caller must hold the i_pages lock.
230 */
231void __delete_from_page_cache(struct page *page, void *shadow)
232{
233 struct address_space *mapping = page->mapping;
234
235 trace_mm_filemap_delete_from_page_cache(page);
236
237 unaccount_page_cache_page(mapping, page);
238 page_cache_delete(mapping, page, shadow);
239}
240
241static void page_cache_free_page(struct address_space *mapping,
242 struct page *page)
243{
244 void (*freepage)(struct page *);
245
246 freepage = mapping->a_ops->freepage;
247 if (freepage)
248 freepage(page);
249
250 if (PageTransHuge(page) && !PageHuge(page)) {
251 page_ref_sub(page, HPAGE_PMD_NR);
252 VM_BUG_ON_PAGE(page_count(page) <= 0, page);
253 } else {
254 put_page(page);
255 }
256}
257
258/**
259 * delete_from_page_cache - delete page from page cache
260 * @page: the page which the kernel is trying to remove from page cache
261 *
262 * This must be called only on pages that have been verified to be in the page
263 * cache and locked. It will never put the page into the free list, the caller
264 * has a reference on the page.
265 */
266void delete_from_page_cache(struct page *page)
267{
268 struct address_space *mapping = page_mapping(page);
269 unsigned long flags;
270
271 BUG_ON(!PageLocked(page));
272 xa_lock_irqsave(&mapping->i_pages, flags);
273 __delete_from_page_cache(page, NULL);
274 xa_unlock_irqrestore(&mapping->i_pages, flags);
275
276 page_cache_free_page(mapping, page);
277}
278EXPORT_SYMBOL(delete_from_page_cache);
279
280/*
281 * page_cache_delete_batch - delete several pages from page cache
282 * @mapping: the mapping to which pages belong
283 * @pvec: pagevec with pages to delete
284 *
285 * The function walks over mapping->i_pages and removes pages passed in @pvec
286 * from the mapping. The function expects @pvec to be sorted by page index
287 * and is optimised for it to be dense.
288 * It tolerates holes in @pvec (mapping entries at those indices are not
289 * modified). The function expects only THP head pages to be present in the
290 * @pvec.
291 *
292 * The function expects the i_pages lock to be held.
293 */
294static void page_cache_delete_batch(struct address_space *mapping,
295 struct pagevec *pvec)
296{
297 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
298 int total_pages = 0;
299 int i = 0;
300 struct page *page;
301
302 mapping_set_update(&xas, mapping);
303 xas_for_each(&xas, page, ULONG_MAX) {
304 if (i >= pagevec_count(pvec))
305 break;
306
307 /* A swap/dax/shadow entry got inserted? Skip it. */
308 if (xa_is_value(page))
309 continue;
310 /*
311 * A page got inserted in our range? Skip it. We have our
312 * pages locked so they are protected from being removed.
313 * If we see a page whose index is higher than ours, it
314 * means our page has been removed, which shouldn't be
315 * possible because we're holding the PageLock.
316 */
317 if (page != pvec->pages[i]) {
318 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index,
319 page);
320 continue;
321 }
322
323 WARN_ON_ONCE(!PageLocked(page));
324
325 if (page->index == xas.xa_index)
326 page->mapping = NULL;
327 /* Leave page->index set: truncation lookup relies on it */
328
329 /*
330 * Move to the next page in the vector if this is a regular
331 * page or the index is of the last sub-page of this compound
332 * page.
333 */
334 if (page->index + compound_nr(page) - 1 == xas.xa_index)
335 i++;
336 xas_store(&xas, NULL);
337 total_pages++;
338 }
339 mapping->nrpages -= total_pages;
340}
341
342void delete_from_page_cache_batch(struct address_space *mapping,
343 struct pagevec *pvec)
344{
345 int i;
346 unsigned long flags;
347
348 if (!pagevec_count(pvec))
349 return;
350
351 xa_lock_irqsave(&mapping->i_pages, flags);
352 for (i = 0; i < pagevec_count(pvec); i++) {
353 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
354
355 unaccount_page_cache_page(mapping, pvec->pages[i]);
356 }
357 page_cache_delete_batch(mapping, pvec);
358 xa_unlock_irqrestore(&mapping->i_pages, flags);
359
360 for (i = 0; i < pagevec_count(pvec); i++)
361 page_cache_free_page(mapping, pvec->pages[i]);
362}
363
364int filemap_check_errors(struct address_space *mapping)
365{
366 int ret = 0;
367 /* Check for outstanding write errors */
368 if (test_bit(AS_ENOSPC, &mapping->flags) &&
369 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
370 ret = -ENOSPC;
371 if (test_bit(AS_EIO, &mapping->flags) &&
372 test_and_clear_bit(AS_EIO, &mapping->flags))
373 ret = -EIO;
374 return ret;
375}
376EXPORT_SYMBOL(filemap_check_errors);
377
378static int filemap_check_and_keep_errors(struct address_space *mapping)
379{
380 /* Check for outstanding write errors */
381 if (test_bit(AS_EIO, &mapping->flags))
382 return -EIO;
383 if (test_bit(AS_ENOSPC, &mapping->flags))
384 return -ENOSPC;
385 return 0;
386}
387
388/**
389 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
390 * @mapping: address space structure to write
391 * @start: offset in bytes where the range starts
392 * @end: offset in bytes where the range ends (inclusive)
393 * @sync_mode: enable synchronous operation
394 *
395 * Start writeback against all of a mapping's dirty pages that lie
396 * within the byte offsets <start, end> inclusive.
397 *
398 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
399 * opposed to a regular memory cleansing writeback. The difference between
400 * these two operations is that if a dirty page/buffer is encountered, it must
401 * be waited upon, and not just skipped over.
402 *
403 * Return: %0 on success, negative error code otherwise.
404 */
405int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
406 loff_t end, int sync_mode)
407{
408 int ret;
409 struct writeback_control wbc = {
410 .sync_mode = sync_mode,
411 .nr_to_write = LONG_MAX,
412 .range_start = start,
413 .range_end = end,
414 };
415
416 if (!mapping_cap_writeback_dirty(mapping) ||
417 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
418 return 0;
419
420 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
421 ret = do_writepages(mapping, &wbc);
422 wbc_detach_inode(&wbc);
423 return ret;
424}
425
426static inline int __filemap_fdatawrite(struct address_space *mapping,
427 int sync_mode)
428{
429 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
430}
431
432int filemap_fdatawrite(struct address_space *mapping)
433{
434 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
435}
436EXPORT_SYMBOL(filemap_fdatawrite);
437
438int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
439 loff_t end)
440{
441 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
442}
443EXPORT_SYMBOL(filemap_fdatawrite_range);
444
445/**
446 * filemap_flush - mostly a non-blocking flush
447 * @mapping: target address_space
448 *
449 * This is a mostly non-blocking flush. Not suitable for data-integrity
450 * purposes - I/O may not be started against all dirty pages.
451 *
452 * Return: %0 on success, negative error code otherwise.
453 */
454int filemap_flush(struct address_space *mapping)
455{
456 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
457}
458EXPORT_SYMBOL(filemap_flush);
459
460/**
461 * filemap_range_has_page - check if a page exists in range.
462 * @mapping: address space within which to check
463 * @start_byte: offset in bytes where the range starts
464 * @end_byte: offset in bytes where the range ends (inclusive)
465 *
466 * Find at least one page in the range supplied, usually used to check if
467 * direct writing in this range will trigger a writeback.
468 *
469 * Return: %true if at least one page exists in the specified range,
470 * %false otherwise.
471 */
472bool filemap_range_has_page(struct address_space *mapping,
473 loff_t start_byte, loff_t end_byte)
474{
475 struct page *page;
476 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
477 pgoff_t max = end_byte >> PAGE_SHIFT;
478
479 if (end_byte < start_byte)
480 return false;
481
482 rcu_read_lock();
483 for (;;) {
484 page = xas_find(&xas, max);
485 if (xas_retry(&xas, page))
486 continue;
487 /* Shadow entries don't count */
488 if (xa_is_value(page))
489 continue;
490 /*
491 * We don't need to try to pin this page; we're about to
492 * release the RCU lock anyway. It is enough to know that
493 * there was a page here recently.
494 */
495 break;
496 }
497 rcu_read_unlock();
498
499 return page != NULL;
500}
501EXPORT_SYMBOL(filemap_range_has_page);
502
503static void __filemap_fdatawait_range(struct address_space *mapping,
504 loff_t start_byte, loff_t end_byte)
505{
506 pgoff_t index = start_byte >> PAGE_SHIFT;
507 pgoff_t end = end_byte >> PAGE_SHIFT;
508 struct pagevec pvec;
509 int nr_pages;
510
511 if (end_byte < start_byte)
512 return;
513
514 pagevec_init(&pvec);
515 while (index <= end) {
516 unsigned i;
517
518 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
519 end, PAGECACHE_TAG_WRITEBACK);
520 if (!nr_pages)
521 break;
522
523 for (i = 0; i < nr_pages; i++) {
524 struct page *page = pvec.pages[i];
525
526 wait_on_page_writeback(page);
527 ClearPageError(page);
528 }
529 pagevec_release(&pvec);
530 cond_resched();
531 }
532}
533
534/**
535 * filemap_fdatawait_range - wait for writeback to complete
536 * @mapping: address space structure to wait for
537 * @start_byte: offset in bytes where the range starts
538 * @end_byte: offset in bytes where the range ends (inclusive)
539 *
540 * Walk the list of under-writeback pages of the given address space
541 * in the given range and wait for all of them. Check error status of
542 * the address space and return it.
543 *
544 * Since the error status of the address space is cleared by this function,
545 * callers are responsible for checking the return value and handling and/or
546 * reporting the error.
547 *
548 * Return: error status of the address space.
549 */
550int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
551 loff_t end_byte)
552{
553 __filemap_fdatawait_range(mapping, start_byte, end_byte);
554 return filemap_check_errors(mapping);
555}
556EXPORT_SYMBOL(filemap_fdatawait_range);
557
558/**
559 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
560 * @mapping: address space structure to wait for
561 * @start_byte: offset in bytes where the range starts
562 * @end_byte: offset in bytes where the range ends (inclusive)
563 *
564 * Walk the list of under-writeback pages of the given address space in the
565 * given range and wait for all of them. Unlike filemap_fdatawait_range(),
566 * this function does not clear error status of the address space.
567 *
568 * Use this function if callers don't handle errors themselves. Expected
569 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
570 * fsfreeze(8)
571 */
572int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
573 loff_t start_byte, loff_t end_byte)
574{
575 __filemap_fdatawait_range(mapping, start_byte, end_byte);
576 return filemap_check_and_keep_errors(mapping);
577}
578EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
579
580/**
581 * file_fdatawait_range - wait for writeback to complete
582 * @file: file pointing to address space structure to wait for
583 * @start_byte: offset in bytes where the range starts
584 * @end_byte: offset in bytes where the range ends (inclusive)
585 *
586 * Walk the list of under-writeback pages of the address space that file
587 * refers to, in the given range and wait for all of them. Check error
588 * status of the address space vs. the file->f_wb_err cursor and return it.
589 *
590 * Since the error status of the file is advanced by this function,
591 * callers are responsible for checking the return value and handling and/or
592 * reporting the error.
593 *
594 * Return: error status of the address space vs. the file->f_wb_err cursor.
595 */
596int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
597{
598 struct address_space *mapping = file->f_mapping;
599
600 __filemap_fdatawait_range(mapping, start_byte, end_byte);
601 return file_check_and_advance_wb_err(file);
602}
603EXPORT_SYMBOL(file_fdatawait_range);
604
605/**
606 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
607 * @mapping: address space structure to wait for
608 *
609 * Walk the list of under-writeback pages of the given address space
610 * and wait for all of them. Unlike filemap_fdatawait(), this function
611 * does not clear error status of the address space.
612 *
613 * Use this function if callers don't handle errors themselves. Expected
614 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
615 * fsfreeze(8)
616 *
617 * Return: error status of the address space.
618 */
619int filemap_fdatawait_keep_errors(struct address_space *mapping)
620{
621 __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
622 return filemap_check_and_keep_errors(mapping);
623}
624EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
625
626/* Returns true if writeback might be needed or already in progress. */
627static bool mapping_needs_writeback(struct address_space *mapping)
628{
629 if (dax_mapping(mapping))
630 return mapping->nrexceptional;
631
632 return mapping->nrpages;
633}
634
635int filemap_write_and_wait(struct address_space *mapping)
636{
637 int err = 0;
638
639 if (mapping_needs_writeback(mapping)) {
640 err = filemap_fdatawrite(mapping);
641 /*
642 * Even if the above returned error, the pages may be
643 * written partially (e.g. -ENOSPC), so we wait for it.
644 * But the -EIO is special case, it may indicate the worst
645 * thing (e.g. bug) happened, so we avoid waiting for it.
646 */
647 if (err != -EIO) {
648 int err2 = filemap_fdatawait(mapping);
649 if (!err)
650 err = err2;
651 } else {
652 /* Clear any previously stored errors */
653 filemap_check_errors(mapping);
654 }
655 } else {
656 err = filemap_check_errors(mapping);
657 }
658 return err;
659}
660EXPORT_SYMBOL(filemap_write_and_wait);
661
662/**
663 * filemap_write_and_wait_range - write out & wait on a file range
664 * @mapping: the address_space for the pages
665 * @lstart: offset in bytes where the range starts
666 * @lend: offset in bytes where the range ends (inclusive)
667 *
668 * Write out and wait upon file offsets lstart->lend, inclusive.
669 *
670 * Note that @lend is inclusive (describes the last byte to be written) so
671 * that this function can be used to write to the very end-of-file (end = -1).
672 *
673 * Return: error status of the address space.
674 */
675int filemap_write_and_wait_range(struct address_space *mapping,
676 loff_t lstart, loff_t lend)
677{
678 int err = 0;
679
680 if (mapping_needs_writeback(mapping)) {
681 err = __filemap_fdatawrite_range(mapping, lstart, lend,
682 WB_SYNC_ALL);
683 /* See comment of filemap_write_and_wait() */
684 if (err != -EIO) {
685 int err2 = filemap_fdatawait_range(mapping,
686 lstart, lend);
687 if (!err)
688 err = err2;
689 } else {
690 /* Clear any previously stored errors */
691 filemap_check_errors(mapping);
692 }
693 } else {
694 err = filemap_check_errors(mapping);
695 }
696 return err;
697}
698EXPORT_SYMBOL(filemap_write_and_wait_range);
699
700void __filemap_set_wb_err(struct address_space *mapping, int err)
701{
702 errseq_t eseq = errseq_set(&mapping->wb_err, err);
703
704 trace_filemap_set_wb_err(mapping, eseq);
705}
706EXPORT_SYMBOL(__filemap_set_wb_err);
707
708/**
709 * file_check_and_advance_wb_err - report wb error (if any) that was previously
710 * and advance wb_err to current one
711 * @file: struct file on which the error is being reported
712 *
713 * When userland calls fsync (or something like nfsd does the equivalent), we
714 * want to report any writeback errors that occurred since the last fsync (or
715 * since the file was opened if there haven't been any).
716 *
717 * Grab the wb_err from the mapping. If it matches what we have in the file,
718 * then just quickly return 0. The file is all caught up.
719 *
720 * If it doesn't match, then take the mapping value, set the "seen" flag in
721 * it and try to swap it into place. If it works, or another task beat us
722 * to it with the new value, then update the f_wb_err and return the error
723 * portion. The error at this point must be reported via proper channels
724 * (a'la fsync, or NFS COMMIT operation, etc.).
725 *
726 * While we handle mapping->wb_err with atomic operations, the f_wb_err
727 * value is protected by the f_lock since we must ensure that it reflects
728 * the latest value swapped in for this file descriptor.
729 *
730 * Return: %0 on success, negative error code otherwise.
731 */
732int file_check_and_advance_wb_err(struct file *file)
733{
734 int err = 0;
735 errseq_t old = READ_ONCE(file->f_wb_err);
736 struct address_space *mapping = file->f_mapping;
737
738 /* Locklessly handle the common case where nothing has changed */
739 if (errseq_check(&mapping->wb_err, old)) {
740 /* Something changed, must use slow path */
741 spin_lock(&file->f_lock);
742 old = file->f_wb_err;
743 err = errseq_check_and_advance(&mapping->wb_err,
744 &file->f_wb_err);
745 trace_file_check_and_advance_wb_err(file, old);
746 spin_unlock(&file->f_lock);
747 }
748
749 /*
750 * We're mostly using this function as a drop in replacement for
751 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
752 * that the legacy code would have had on these flags.
753 */
754 clear_bit(AS_EIO, &mapping->flags);
755 clear_bit(AS_ENOSPC, &mapping->flags);
756 return err;
757}
758EXPORT_SYMBOL(file_check_and_advance_wb_err);
759
760/**
761 * file_write_and_wait_range - write out & wait on a file range
762 * @file: file pointing to address_space with pages
763 * @lstart: offset in bytes where the range starts
764 * @lend: offset in bytes where the range ends (inclusive)
765 *
766 * Write out and wait upon file offsets lstart->lend, inclusive.
767 *
768 * Note that @lend is inclusive (describes the last byte to be written) so
769 * that this function can be used to write to the very end-of-file (end = -1).
770 *
771 * After writing out and waiting on the data, we check and advance the
772 * f_wb_err cursor to the latest value, and return any errors detected there.
773 *
774 * Return: %0 on success, negative error code otherwise.
775 */
776int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
777{
778 int err = 0, err2;
779 struct address_space *mapping = file->f_mapping;
780
781 if (mapping_needs_writeback(mapping)) {
782 err = __filemap_fdatawrite_range(mapping, lstart, lend,
783 WB_SYNC_ALL);
784 /* See comment of filemap_write_and_wait() */
785 if (err != -EIO)
786 __filemap_fdatawait_range(mapping, lstart, lend);
787 }
788 err2 = file_check_and_advance_wb_err(file);
789 if (!err)
790 err = err2;
791 return err;
792}
793EXPORT_SYMBOL(file_write_and_wait_range);
794
795/**
796 * replace_page_cache_page - replace a pagecache page with a new one
797 * @old: page to be replaced
798 * @new: page to replace with
799 * @gfp_mask: allocation mode
800 *
801 * This function replaces a page in the pagecache with a new one. On
802 * success it acquires the pagecache reference for the new page and
803 * drops it for the old page. Both the old and new pages must be
804 * locked. This function does not add the new page to the LRU, the
805 * caller must do that.
806 *
807 * The remove + add is atomic. This function cannot fail.
808 *
809 * Return: %0
810 */
811int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
812{
813 struct address_space *mapping = old->mapping;
814 void (*freepage)(struct page *) = mapping->a_ops->freepage;
815 pgoff_t offset = old->index;
816 XA_STATE(xas, &mapping->i_pages, offset);
817 unsigned long flags;
818
819 VM_BUG_ON_PAGE(!PageLocked(old), old);
820 VM_BUG_ON_PAGE(!PageLocked(new), new);
821 VM_BUG_ON_PAGE(new->mapping, new);
822
823 get_page(new);
824 new->mapping = mapping;
825 new->index = offset;
826
827 xas_lock_irqsave(&xas, flags);
828 xas_store(&xas, new);
829
830 old->mapping = NULL;
831 /* hugetlb pages do not participate in page cache accounting. */
832 if (!PageHuge(old))
833 __dec_node_page_state(new, NR_FILE_PAGES);
834 if (!PageHuge(new))
835 __inc_node_page_state(new, NR_FILE_PAGES);
836 if (PageSwapBacked(old))
837 __dec_node_page_state(new, NR_SHMEM);
838 if (PageSwapBacked(new))
839 __inc_node_page_state(new, NR_SHMEM);
840 xas_unlock_irqrestore(&xas, flags);
841 mem_cgroup_migrate(old, new);
842 if (freepage)
843 freepage(old);
844 put_page(old);
845
846 return 0;
847}
848EXPORT_SYMBOL_GPL(replace_page_cache_page);
849
850static int __add_to_page_cache_locked(struct page *page,
851 struct address_space *mapping,
852 pgoff_t offset, gfp_t gfp_mask,
853 void **shadowp)
854{
855 XA_STATE(xas, &mapping->i_pages, offset);
856 int huge = PageHuge(page);
857 struct mem_cgroup *memcg;
858 int error;
859 void *old;
860
861 VM_BUG_ON_PAGE(!PageLocked(page), page);
862 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
863 mapping_set_update(&xas, mapping);
864
865 if (!huge) {
866 error = mem_cgroup_try_charge(page, current->mm,
867 gfp_mask, &memcg, false);
868 if (error)
869 return error;
870 }
871
872 get_page(page);
873 page->mapping = mapping;
874 page->index = offset;
875
876 do {
877 xas_lock_irq(&xas);
878 old = xas_load(&xas);
879 if (old && !xa_is_value(old))
880 xas_set_err(&xas, -EEXIST);
881 xas_store(&xas, page);
882 if (xas_error(&xas))
883 goto unlock;
884
885 if (xa_is_value(old)) {
886 mapping->nrexceptional--;
887 if (shadowp)
888 *shadowp = old;
889 }
890 mapping->nrpages++;
891
892 /* hugetlb pages do not participate in page cache accounting */
893 if (!huge)
894 __inc_node_page_state(page, NR_FILE_PAGES);
895unlock:
896 xas_unlock_irq(&xas);
897 } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
898
899 if (xas_error(&xas))
900 goto error;
901
902 if (!huge)
903 mem_cgroup_commit_charge(page, memcg, false, false);
904 trace_mm_filemap_add_to_page_cache(page);
905 return 0;
906error:
907 page->mapping = NULL;
908 /* Leave page->index set: truncation relies upon it */
909 if (!huge)
910 mem_cgroup_cancel_charge(page, memcg, false);
911 put_page(page);
912 return xas_error(&xas);
913}
914ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
915
916/**
917 * add_to_page_cache_locked - add a locked page to the pagecache
918 * @page: page to add
919 * @mapping: the page's address_space
920 * @offset: page index
921 * @gfp_mask: page allocation mode
922 *
923 * This function is used to add a page to the pagecache. It must be locked.
924 * This function does not add the page to the LRU. The caller must do that.
925 *
926 * Return: %0 on success, negative error code otherwise.
927 */
928int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
929 pgoff_t offset, gfp_t gfp_mask)
930{
931 return __add_to_page_cache_locked(page, mapping, offset,
932 gfp_mask, NULL);
933}
934EXPORT_SYMBOL(add_to_page_cache_locked);
935
936int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
937 pgoff_t offset, gfp_t gfp_mask)
938{
939 void *shadow = NULL;
940 int ret;
941
942 __SetPageLocked(page);
943 ret = __add_to_page_cache_locked(page, mapping, offset,
944 gfp_mask, &shadow);
945 if (unlikely(ret))
946 __ClearPageLocked(page);
947 else {
948 /*
949 * The page might have been evicted from cache only
950 * recently, in which case it should be activated like
951 * any other repeatedly accessed page.
952 * The exception is pages getting rewritten; evicting other
953 * data from the working set, only to cache data that will
954 * get overwritten with something else, is a waste of memory.
955 */
956 WARN_ON_ONCE(PageActive(page));
957 if (!(gfp_mask & __GFP_WRITE) && shadow)
958 workingset_refault(page, shadow);
959 lru_cache_add(page);
960 }
961 return ret;
962}
963EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
964
965#ifdef CONFIG_NUMA
966struct page *__page_cache_alloc(gfp_t gfp)
967{
968 int n;
969 struct page *page;
970
971 if (cpuset_do_page_mem_spread()) {
972 unsigned int cpuset_mems_cookie;
973 do {
974 cpuset_mems_cookie = read_mems_allowed_begin();
975 n = cpuset_mem_spread_node();
976 page = __alloc_pages_node(n, gfp, 0);
977 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
978
979 return page;
980 }
981 return alloc_pages(gfp, 0);
982}
983EXPORT_SYMBOL(__page_cache_alloc);
984#endif
985
986/*
987 * In order to wait for pages to become available there must be
988 * waitqueues associated with pages. By using a hash table of
989 * waitqueues where the bucket discipline is to maintain all
990 * waiters on the same queue and wake all when any of the pages
991 * become available, and for the woken contexts to check to be
992 * sure the appropriate page became available, this saves space
993 * at a cost of "thundering herd" phenomena during rare hash
994 * collisions.
995 */
996#define PAGE_WAIT_TABLE_BITS 8
997#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
998static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
999
1000static wait_queue_head_t *page_waitqueue(struct page *page)
1001{
1002 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
1003}
1004
1005void __init pagecache_init(void)
1006{
1007 int i;
1008
1009 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1010 init_waitqueue_head(&page_wait_table[i]);
1011
1012 page_writeback_init();
1013}
1014
1015/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
1016struct wait_page_key {
1017 struct page *page;
1018 int bit_nr;
1019 int page_match;
1020};
1021
1022struct wait_page_queue {
1023 struct page *page;
1024 int bit_nr;
1025 wait_queue_entry_t wait;
1026};
1027
1028static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1029{
1030 struct wait_page_key *key = arg;
1031 struct wait_page_queue *wait_page
1032 = container_of(wait, struct wait_page_queue, wait);
1033
1034 if (wait_page->page != key->page)
1035 return 0;
1036 key->page_match = 1;
1037
1038 if (wait_page->bit_nr != key->bit_nr)
1039 return 0;
1040
1041 /*
1042 * Stop walking if it's locked.
1043 * Is this safe if put_and_wait_on_page_locked() is in use?
1044 * Yes: the waker must hold a reference to this page, and if PG_locked
1045 * has now already been set by another task, that task must also hold
1046 * a reference to the *same usage* of this page; so there is no need
1047 * to walk on to wake even the put_and_wait_on_page_locked() callers.
1048 */
1049 if (test_bit(key->bit_nr, &key->page->flags))
1050 return -1;
1051
1052 return autoremove_wake_function(wait, mode, sync, key);
1053}
1054
1055static void wake_up_page_bit(struct page *page, int bit_nr)
1056{
1057 wait_queue_head_t *q = page_waitqueue(page);
1058 struct wait_page_key key;
1059 unsigned long flags;
1060 wait_queue_entry_t bookmark;
1061
1062 key.page = page;
1063 key.bit_nr = bit_nr;
1064 key.page_match = 0;
1065
1066 bookmark.flags = 0;
1067 bookmark.private = NULL;
1068 bookmark.func = NULL;
1069 INIT_LIST_HEAD(&bookmark.entry);
1070
1071 spin_lock_irqsave(&q->lock, flags);
1072 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1073
1074 while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1075 /*
1076 * Take a breather from holding the lock,
1077 * allow pages that finish wake up asynchronously
1078 * to acquire the lock and remove themselves
1079 * from wait queue
1080 */
1081 spin_unlock_irqrestore(&q->lock, flags);
1082 cpu_relax();
1083 spin_lock_irqsave(&q->lock, flags);
1084 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1085 }
1086
1087 /*
1088 * It is possible for other pages to have collided on the waitqueue
1089 * hash, so in that case check for a page match. That prevents a long-
1090 * term waiter
1091 *
1092 * It is still possible to miss a case here, when we woke page waiters
1093 * and removed them from the waitqueue, but there are still other
1094 * page waiters.
1095 */
1096 if (!waitqueue_active(q) || !key.page_match) {
1097 ClearPageWaiters(page);
1098 /*
1099 * It's possible to miss clearing Waiters here, when we woke
1100 * our page waiters, but the hashed waitqueue has waiters for
1101 * other pages on it.
1102 *
1103 * That's okay, it's a rare case. The next waker will clear it.
1104 */
1105 }
1106 spin_unlock_irqrestore(&q->lock, flags);
1107}
1108
1109static void wake_up_page(struct page *page, int bit)
1110{
1111 if (!PageWaiters(page))
1112 return;
1113 wake_up_page_bit(page, bit);
1114}
1115
1116/*
1117 * A choice of three behaviors for wait_on_page_bit_common():
1118 */
1119enum behavior {
1120 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
1121 * __lock_page() waiting on then setting PG_locked.
1122 */
1123 SHARED, /* Hold ref to page and check the bit when woken, like
1124 * wait_on_page_writeback() waiting on PG_writeback.
1125 */
1126 DROP, /* Drop ref to page before wait, no check when woken,
1127 * like put_and_wait_on_page_locked() on PG_locked.
1128 */
1129};
1130
1131static inline int wait_on_page_bit_common(wait_queue_head_t *q,
1132 struct page *page, int bit_nr, int state, enum behavior behavior)
1133{
1134 struct wait_page_queue wait_page;
1135 wait_queue_entry_t *wait = &wait_page.wait;
1136 bool bit_is_set;
1137 bool thrashing = false;
1138 bool delayacct = false;
1139 unsigned long pflags;
1140 int ret = 0;
1141
1142 if (bit_nr == PG_locked &&
1143 !PageUptodate(page) && PageWorkingset(page)) {
1144 if (!PageSwapBacked(page)) {
1145 delayacct_thrashing_start();
1146 delayacct = true;
1147 }
1148 psi_memstall_enter(&pflags);
1149 thrashing = true;
1150 }
1151
1152 init_wait(wait);
1153 wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0;
1154 wait->func = wake_page_function;
1155 wait_page.page = page;
1156 wait_page.bit_nr = bit_nr;
1157
1158 for (;;) {
1159 spin_lock_irq(&q->lock);
1160
1161 if (likely(list_empty(&wait->entry))) {
1162 __add_wait_queue_entry_tail(q, wait);
1163 SetPageWaiters(page);
1164 }
1165
1166 set_current_state(state);
1167
1168 spin_unlock_irq(&q->lock);
1169
1170 bit_is_set = test_bit(bit_nr, &page->flags);
1171 if (behavior == DROP)
1172 put_page(page);
1173
1174 if (likely(bit_is_set))
1175 io_schedule();
1176
1177 if (behavior == EXCLUSIVE) {
1178 if (!test_and_set_bit_lock(bit_nr, &page->flags))
1179 break;
1180 } else if (behavior == SHARED) {
1181 if (!test_bit(bit_nr, &page->flags))
1182 break;
1183 }
1184
1185 if (signal_pending_state(state, current)) {
1186 ret = -EINTR;
1187 break;
1188 }
1189
1190 if (behavior == DROP) {
1191 /*
1192 * We can no longer safely access page->flags:
1193 * even if CONFIG_MEMORY_HOTREMOVE is not enabled,
1194 * there is a risk of waiting forever on a page reused
1195 * for something that keeps it locked indefinitely.
1196 * But best check for -EINTR above before breaking.
1197 */
1198 break;
1199 }
1200 }
1201
1202 finish_wait(q, wait);
1203
1204 if (thrashing) {
1205 if (delayacct)
1206 delayacct_thrashing_end();
1207 psi_memstall_leave(&pflags);
1208 }
1209
1210 /*
1211 * A signal could leave PageWaiters set. Clearing it here if
1212 * !waitqueue_active would be possible (by open-coding finish_wait),
1213 * but still fail to catch it in the case of wait hash collision. We
1214 * already can fail to clear wait hash collision cases, so don't
1215 * bother with signals either.
1216 */
1217
1218 return ret;
1219}
1220
1221void wait_on_page_bit(struct page *page, int bit_nr)
1222{
1223 wait_queue_head_t *q = page_waitqueue(page);
1224 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1225}
1226EXPORT_SYMBOL(wait_on_page_bit);
1227
1228int wait_on_page_bit_killable(struct page *page, int bit_nr)
1229{
1230 wait_queue_head_t *q = page_waitqueue(page);
1231 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
1232}
1233EXPORT_SYMBOL(wait_on_page_bit_killable);
1234
1235/**
1236 * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
1237 * @page: The page to wait for.
1238 *
1239 * The caller should hold a reference on @page. They expect the page to
1240 * become unlocked relatively soon, but do not wish to hold up migration
1241 * (for example) by holding the reference while waiting for the page to
1242 * come unlocked. After this function returns, the caller should not
1243 * dereference @page.
1244 */
1245void put_and_wait_on_page_locked(struct page *page)
1246{
1247 wait_queue_head_t *q;
1248
1249 page = compound_head(page);
1250 q = page_waitqueue(page);
1251 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP);
1252}
1253
1254/**
1255 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
1256 * @page: Page defining the wait queue of interest
1257 * @waiter: Waiter to add to the queue
1258 *
1259 * Add an arbitrary @waiter to the wait queue for the nominated @page.
1260 */
1261void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
1262{
1263 wait_queue_head_t *q = page_waitqueue(page);
1264 unsigned long flags;
1265
1266 spin_lock_irqsave(&q->lock, flags);
1267 __add_wait_queue_entry_tail(q, waiter);
1268 SetPageWaiters(page);
1269 spin_unlock_irqrestore(&q->lock, flags);
1270}
1271EXPORT_SYMBOL_GPL(add_page_wait_queue);
1272
1273#ifndef clear_bit_unlock_is_negative_byte
1274
1275/*
1276 * PG_waiters is the high bit in the same byte as PG_lock.
1277 *
1278 * On x86 (and on many other architectures), we can clear PG_lock and
1279 * test the sign bit at the same time. But if the architecture does
1280 * not support that special operation, we just do this all by hand
1281 * instead.
1282 *
1283 * The read of PG_waiters has to be after (or concurrently with) PG_locked
1284 * being cleared, but a memory barrier should be unneccssary since it is
1285 * in the same byte as PG_locked.
1286 */
1287static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1288{
1289 clear_bit_unlock(nr, mem);
1290 /* smp_mb__after_atomic(); */
1291 return test_bit(PG_waiters, mem);
1292}
1293
1294#endif
1295
1296/**
1297 * unlock_page - unlock a locked page
1298 * @page: the page
1299 *
1300 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
1301 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
1302 * mechanism between PageLocked pages and PageWriteback pages is shared.
1303 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
1304 *
1305 * Note that this depends on PG_waiters being the sign bit in the byte
1306 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
1307 * clear the PG_locked bit and test PG_waiters at the same time fairly
1308 * portably (architectures that do LL/SC can test any bit, while x86 can
1309 * test the sign bit).
1310 */
1311void unlock_page(struct page *page)
1312{
1313 BUILD_BUG_ON(PG_waiters != 7);
1314 page = compound_head(page);
1315 VM_BUG_ON_PAGE(!PageLocked(page), page);
1316 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
1317 wake_up_page_bit(page, PG_locked);
1318}
1319EXPORT_SYMBOL(unlock_page);
1320
1321/**
1322 * end_page_writeback - end writeback against a page
1323 * @page: the page
1324 */
1325void end_page_writeback(struct page *page)
1326{
1327 /*
1328 * TestClearPageReclaim could be used here but it is an atomic
1329 * operation and overkill in this particular case. Failing to
1330 * shuffle a page marked for immediate reclaim is too mild to
1331 * justify taking an atomic operation penalty at the end of
1332 * ever page writeback.
1333 */
1334 if (PageReclaim(page)) {
1335 ClearPageReclaim(page);
1336 rotate_reclaimable_page(page);
1337 }
1338
1339 if (!test_clear_page_writeback(page))
1340 BUG();
1341
1342 smp_mb__after_atomic();
1343 wake_up_page(page, PG_writeback);
1344}
1345EXPORT_SYMBOL(end_page_writeback);
1346
1347/*
1348 * After completing I/O on a page, call this routine to update the page
1349 * flags appropriately
1350 */
1351void page_endio(struct page *page, bool is_write, int err)
1352{
1353 if (!is_write) {
1354 if (!err) {
1355 SetPageUptodate(page);
1356 } else {
1357 ClearPageUptodate(page);
1358 SetPageError(page);
1359 }
1360 unlock_page(page);
1361 } else {
1362 if (err) {
1363 struct address_space *mapping;
1364
1365 SetPageError(page);
1366 mapping = page_mapping(page);
1367 if (mapping)
1368 mapping_set_error(mapping, err);
1369 }
1370 end_page_writeback(page);
1371 }
1372}
1373EXPORT_SYMBOL_GPL(page_endio);
1374
1375/**
1376 * __lock_page - get a lock on the page, assuming we need to sleep to get it
1377 * @__page: the page to lock
1378 */
1379void __lock_page(struct page *__page)
1380{
1381 struct page *page = compound_head(__page);
1382 wait_queue_head_t *q = page_waitqueue(page);
1383 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
1384 EXCLUSIVE);
1385}
1386EXPORT_SYMBOL(__lock_page);
1387
1388int __lock_page_killable(struct page *__page)
1389{
1390 struct page *page = compound_head(__page);
1391 wait_queue_head_t *q = page_waitqueue(page);
1392 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
1393 EXCLUSIVE);
1394}
1395EXPORT_SYMBOL_GPL(__lock_page_killable);
1396
1397/*
1398 * Return values:
1399 * 1 - page is locked; mmap_sem is still held.
1400 * 0 - page is not locked.
1401 * mmap_sem has been released (up_read()), unless flags had both
1402 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1403 * which case mmap_sem is still held.
1404 *
1405 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
1406 * with the page locked and the mmap_sem unperturbed.
1407 */
1408int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
1409 unsigned int flags)
1410{
1411 if (flags & FAULT_FLAG_ALLOW_RETRY) {
1412 /*
1413 * CAUTION! In this case, mmap_sem is not released
1414 * even though return 0.
1415 */
1416 if (flags & FAULT_FLAG_RETRY_NOWAIT)
1417 return 0;
1418
1419 up_read(&mm->mmap_sem);
1420 if (flags & FAULT_FLAG_KILLABLE)
1421 wait_on_page_locked_killable(page);
1422 else
1423 wait_on_page_locked(page);
1424 return 0;
1425 } else {
1426 if (flags & FAULT_FLAG_KILLABLE) {
1427 int ret;
1428
1429 ret = __lock_page_killable(page);
1430 if (ret) {
1431 up_read(&mm->mmap_sem);
1432 return 0;
1433 }
1434 } else
1435 __lock_page(page);
1436 return 1;
1437 }
1438}
1439
1440/**
1441 * page_cache_next_miss() - Find the next gap in the page cache.
1442 * @mapping: Mapping.
1443 * @index: Index.
1444 * @max_scan: Maximum range to search.
1445 *
1446 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1447 * gap with the lowest index.
1448 *
1449 * This function may be called under the rcu_read_lock. However, this will
1450 * not atomically search a snapshot of the cache at a single point in time.
1451 * For example, if a gap is created at index 5, then subsequently a gap is
1452 * created at index 10, page_cache_next_miss covering both indices may
1453 * return 10 if called under the rcu_read_lock.
1454 *
1455 * Return: The index of the gap if found, otherwise an index outside the
1456 * range specified (in which case 'return - index >= max_scan' will be true).
1457 * In the rare case of index wrap-around, 0 will be returned.
1458 */
1459pgoff_t page_cache_next_miss(struct address_space *mapping,
1460 pgoff_t index, unsigned long max_scan)
1461{
1462 XA_STATE(xas, &mapping->i_pages, index);
1463
1464 while (max_scan--) {
1465 void *entry = xas_next(&xas);
1466 if (!entry || xa_is_value(entry))
1467 break;
1468 if (xas.xa_index == 0)
1469 break;
1470 }
1471
1472 return xas.xa_index;
1473}
1474EXPORT_SYMBOL(page_cache_next_miss);
1475
1476/**
1477 * page_cache_prev_miss() - Find the previous gap in the page cache.
1478 * @mapping: Mapping.
1479 * @index: Index.
1480 * @max_scan: Maximum range to search.
1481 *
1482 * Search the range [max(index - max_scan + 1, 0), index] for the
1483 * gap with the highest index.
1484 *
1485 * This function may be called under the rcu_read_lock. However, this will
1486 * not atomically search a snapshot of the cache at a single point in time.
1487 * For example, if a gap is created at index 10, then subsequently a gap is
1488 * created at index 5, page_cache_prev_miss() covering both indices may
1489 * return 5 if called under the rcu_read_lock.
1490 *
1491 * Return: The index of the gap if found, otherwise an index outside the
1492 * range specified (in which case 'index - return >= max_scan' will be true).
1493 * In the rare case of wrap-around, ULONG_MAX will be returned.
1494 */
1495pgoff_t page_cache_prev_miss(struct address_space *mapping,
1496 pgoff_t index, unsigned long max_scan)
1497{
1498 XA_STATE(xas, &mapping->i_pages, index);
1499
1500 while (max_scan--) {
1501 void *entry = xas_prev(&xas);
1502 if (!entry || xa_is_value(entry))
1503 break;
1504 if (xas.xa_index == ULONG_MAX)
1505 break;
1506 }
1507
1508 return xas.xa_index;
1509}
1510EXPORT_SYMBOL(page_cache_prev_miss);
1511
1512/**
1513 * find_get_entry - find and get a page cache entry
1514 * @mapping: the address_space to search
1515 * @offset: the page cache index
1516 *
1517 * Looks up the page cache slot at @mapping & @offset. If there is a
1518 * page cache page, it is returned with an increased refcount.
1519 *
1520 * If the slot holds a shadow entry of a previously evicted page, or a
1521 * swap entry from shmem/tmpfs, it is returned.
1522 *
1523 * Return: the found page or shadow entry, %NULL if nothing is found.
1524 */
1525struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1526{
1527 XA_STATE(xas, &mapping->i_pages, offset);
1528 struct page *page;
1529
1530 rcu_read_lock();
1531repeat:
1532 xas_reset(&xas);
1533 page = xas_load(&xas);
1534 if (xas_retry(&xas, page))
1535 goto repeat;
1536 /*
1537 * A shadow entry of a recently evicted page, or a swap entry from
1538 * shmem/tmpfs. Return it without attempting to raise page count.
1539 */
1540 if (!page || xa_is_value(page))
1541 goto out;
1542
1543 if (!page_cache_get_speculative(page))
1544 goto repeat;
1545
1546 /*
1547 * Has the page moved or been split?
1548 * This is part of the lockless pagecache protocol. See
1549 * include/linux/pagemap.h for details.
1550 */
1551 if (unlikely(page != xas_reload(&xas))) {
1552 put_page(page);
1553 goto repeat;
1554 }
1555 page = find_subpage(page, offset);
1556out:
1557 rcu_read_unlock();
1558
1559 return page;
1560}
1561EXPORT_SYMBOL(find_get_entry);
1562
1563/**
1564 * find_lock_entry - locate, pin and lock a page cache entry
1565 * @mapping: the address_space to search
1566 * @offset: the page cache index
1567 *
1568 * Looks up the page cache slot at @mapping & @offset. If there is a
1569 * page cache page, it is returned locked and with an increased
1570 * refcount.
1571 *
1572 * If the slot holds a shadow entry of a previously evicted page, or a
1573 * swap entry from shmem/tmpfs, it is returned.
1574 *
1575 * find_lock_entry() may sleep.
1576 *
1577 * Return: the found page or shadow entry, %NULL if nothing is found.
1578 */
1579struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1580{
1581 struct page *page;
1582
1583repeat:
1584 page = find_get_entry(mapping, offset);
1585 if (page && !xa_is_value(page)) {
1586 lock_page(page);
1587 /* Has the page been truncated? */
1588 if (unlikely(page_mapping(page) != mapping)) {
1589 unlock_page(page);
1590 put_page(page);
1591 goto repeat;
1592 }
1593 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
1594 }
1595 return page;
1596}
1597EXPORT_SYMBOL(find_lock_entry);
1598
1599/**
1600 * pagecache_get_page - find and get a page reference
1601 * @mapping: the address_space to search
1602 * @offset: the page index
1603 * @fgp_flags: PCG flags
1604 * @gfp_mask: gfp mask to use for the page cache data page allocation
1605 *
1606 * Looks up the page cache slot at @mapping & @offset.
1607 *
1608 * PCG flags modify how the page is returned.
1609 *
1610 * @fgp_flags can be:
1611 *
1612 * - FGP_ACCESSED: the page will be marked accessed
1613 * - FGP_LOCK: Page is return locked
1614 * - FGP_CREAT: If page is not present then a new page is allocated using
1615 * @gfp_mask and added to the page cache and the VM's LRU
1616 * list. The page is returned locked and with an increased
1617 * refcount.
1618 * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
1619 * its own locking dance if the page is already in cache, or unlock the page
1620 * before returning if we had to add the page to pagecache.
1621 *
1622 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1623 * if the GFP flags specified for FGP_CREAT are atomic.
1624 *
1625 * If there is a page cache page, it is returned with an increased refcount.
1626 *
1627 * Return: the found page or %NULL otherwise.
1628 */
1629struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1630 int fgp_flags, gfp_t gfp_mask)
1631{
1632 struct page *page;
1633
1634repeat:
1635 page = find_get_entry(mapping, offset);
1636 if (xa_is_value(page))
1637 page = NULL;
1638 if (!page)
1639 goto no_page;
1640
1641 if (fgp_flags & FGP_LOCK) {
1642 if (fgp_flags & FGP_NOWAIT) {
1643 if (!trylock_page(page)) {
1644 put_page(page);
1645 return NULL;
1646 }
1647 } else {
1648 lock_page(page);
1649 }
1650
1651 /* Has the page been truncated? */
1652 if (unlikely(compound_head(page)->mapping != mapping)) {
1653 unlock_page(page);
1654 put_page(page);
1655 goto repeat;
1656 }
1657 VM_BUG_ON_PAGE(page->index != offset, page);
1658 }
1659
1660 if (fgp_flags & FGP_ACCESSED)
1661 mark_page_accessed(page);
1662
1663no_page:
1664 if (!page && (fgp_flags & FGP_CREAT)) {
1665 int err;
1666 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
1667 gfp_mask |= __GFP_WRITE;
1668 if (fgp_flags & FGP_NOFS)
1669 gfp_mask &= ~__GFP_FS;
1670
1671 page = __page_cache_alloc(gfp_mask);
1672 if (!page)
1673 return NULL;
1674
1675 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1676 fgp_flags |= FGP_LOCK;
1677
1678 /* Init accessed so avoid atomic mark_page_accessed later */
1679 if (fgp_flags & FGP_ACCESSED)
1680 __SetPageReferenced(page);
1681
1682 err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
1683 if (unlikely(err)) {
1684 put_page(page);
1685 page = NULL;
1686 if (err == -EEXIST)
1687 goto repeat;
1688 }
1689
1690 /*
1691 * add_to_page_cache_lru locks the page, and for mmap we expect
1692 * an unlocked page.
1693 */
1694 if (page && (fgp_flags & FGP_FOR_MMAP))
1695 unlock_page(page);
1696 }
1697
1698 return page;
1699}
1700EXPORT_SYMBOL(pagecache_get_page);
1701
1702/**
1703 * find_get_entries - gang pagecache lookup
1704 * @mapping: The address_space to search
1705 * @start: The starting page cache index
1706 * @nr_entries: The maximum number of entries
1707 * @entries: Where the resulting entries are placed
1708 * @indices: The cache indices corresponding to the entries in @entries
1709 *
1710 * find_get_entries() will search for and return a group of up to
1711 * @nr_entries entries in the mapping. The entries are placed at
1712 * @entries. find_get_entries() takes a reference against any actual
1713 * pages it returns.
1714 *
1715 * The search returns a group of mapping-contiguous page cache entries
1716 * with ascending indexes. There may be holes in the indices due to
1717 * not-present pages.
1718 *
1719 * Any shadow entries of evicted pages, or swap entries from
1720 * shmem/tmpfs, are included in the returned array.
1721 *
1722 * Return: the number of pages and shadow entries which were found.
1723 */
1724unsigned find_get_entries(struct address_space *mapping,
1725 pgoff_t start, unsigned int nr_entries,
1726 struct page **entries, pgoff_t *indices)
1727{
1728 XA_STATE(xas, &mapping->i_pages, start);
1729 struct page *page;
1730 unsigned int ret = 0;
1731
1732 if (!nr_entries)
1733 return 0;
1734
1735 rcu_read_lock();
1736 xas_for_each(&xas, page, ULONG_MAX) {
1737 if (xas_retry(&xas, page))
1738 continue;
1739 /*
1740 * A shadow entry of a recently evicted page, a swap
1741 * entry from shmem/tmpfs or a DAX entry. Return it
1742 * without attempting to raise page count.
1743 */
1744 if (xa_is_value(page))
1745 goto export;
1746
1747 if (!page_cache_get_speculative(page))
1748 goto retry;
1749
1750 /* Has the page moved or been split? */
1751 if (unlikely(page != xas_reload(&xas)))
1752 goto put_page;
1753 page = find_subpage(page, xas.xa_index);
1754
1755export:
1756 indices[ret] = xas.xa_index;
1757 entries[ret] = page;
1758 if (++ret == nr_entries)
1759 break;
1760 continue;
1761put_page:
1762 put_page(page);
1763retry:
1764 xas_reset(&xas);
1765 }
1766 rcu_read_unlock();
1767 return ret;
1768}
1769
1770/**
1771 * find_get_pages_range - gang pagecache lookup
1772 * @mapping: The address_space to search
1773 * @start: The starting page index
1774 * @end: The final page index (inclusive)
1775 * @nr_pages: The maximum number of pages
1776 * @pages: Where the resulting pages are placed
1777 *
1778 * find_get_pages_range() will search for and return a group of up to @nr_pages
1779 * pages in the mapping starting at index @start and up to index @end
1780 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes
1781 * a reference against the returned pages.
1782 *
1783 * The search returns a group of mapping-contiguous pages with ascending
1784 * indexes. There may be holes in the indices due to not-present pages.
1785 * We also update @start to index the next page for the traversal.
1786 *
1787 * Return: the number of pages which were found. If this number is
1788 * smaller than @nr_pages, the end of specified range has been
1789 * reached.
1790 */
1791unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
1792 pgoff_t end, unsigned int nr_pages,
1793 struct page **pages)
1794{
1795 XA_STATE(xas, &mapping->i_pages, *start);
1796 struct page *page;
1797 unsigned ret = 0;
1798
1799 if (unlikely(!nr_pages))
1800 return 0;
1801
1802 rcu_read_lock();
1803 xas_for_each(&xas, page, end) {
1804 if (xas_retry(&xas, page))
1805 continue;
1806 /* Skip over shadow, swap and DAX entries */
1807 if (xa_is_value(page))
1808 continue;
1809
1810 if (!page_cache_get_speculative(page))
1811 goto retry;
1812
1813 /* Has the page moved or been split? */
1814 if (unlikely(page != xas_reload(&xas)))
1815 goto put_page;
1816
1817 pages[ret] = find_subpage(page, xas.xa_index);
1818 if (++ret == nr_pages) {
1819 *start = xas.xa_index + 1;
1820 goto out;
1821 }
1822 continue;
1823put_page:
1824 put_page(page);
1825retry:
1826 xas_reset(&xas);
1827 }
1828
1829 /*
1830 * We come here when there is no page beyond @end. We take care to not
1831 * overflow the index @start as it confuses some of the callers. This
1832 * breaks the iteration when there is a page at index -1 but that is
1833 * already broken anyway.
1834 */
1835 if (end == (pgoff_t)-1)
1836 *start = (pgoff_t)-1;
1837 else
1838 *start = end + 1;
1839out:
1840 rcu_read_unlock();
1841
1842 return ret;
1843}
1844
1845/**
1846 * find_get_pages_contig - gang contiguous pagecache lookup
1847 * @mapping: The address_space to search
1848 * @index: The starting page index
1849 * @nr_pages: The maximum number of pages
1850 * @pages: Where the resulting pages are placed
1851 *
1852 * find_get_pages_contig() works exactly like find_get_pages(), except
1853 * that the returned number of pages are guaranteed to be contiguous.
1854 *
1855 * Return: the number of pages which were found.
1856 */
1857unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1858 unsigned int nr_pages, struct page **pages)
1859{
1860 XA_STATE(xas, &mapping->i_pages, index);
1861 struct page *page;
1862 unsigned int ret = 0;
1863
1864 if (unlikely(!nr_pages))
1865 return 0;
1866
1867 rcu_read_lock();
1868 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1869 if (xas_retry(&xas, page))
1870 continue;
1871 /*
1872 * If the entry has been swapped out, we can stop looking.
1873 * No current caller is looking for DAX entries.
1874 */
1875 if (xa_is_value(page))
1876 break;
1877
1878 if (!page_cache_get_speculative(page))
1879 goto retry;
1880
1881 /* Has the page moved or been split? */
1882 if (unlikely(page != xas_reload(&xas)))
1883 goto put_page;
1884
1885 pages[ret] = find_subpage(page, xas.xa_index);
1886 if (++ret == nr_pages)
1887 break;
1888 continue;
1889put_page:
1890 put_page(page);
1891retry:
1892 xas_reset(&xas);
1893 }
1894 rcu_read_unlock();
1895 return ret;
1896}
1897EXPORT_SYMBOL(find_get_pages_contig);
1898
1899/**
1900 * find_get_pages_range_tag - find and return pages in given range matching @tag
1901 * @mapping: the address_space to search
1902 * @index: the starting page index
1903 * @end: The final page index (inclusive)
1904 * @tag: the tag index
1905 * @nr_pages: the maximum number of pages
1906 * @pages: where the resulting pages are placed
1907 *
1908 * Like find_get_pages, except we only return pages which are tagged with
1909 * @tag. We update @index to index the next page for the traversal.
1910 *
1911 * Return: the number of pages which were found.
1912 */
1913unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
1914 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
1915 struct page **pages)
1916{
1917 XA_STATE(xas, &mapping->i_pages, *index);
1918 struct page *page;
1919 unsigned ret = 0;
1920
1921 if (unlikely(!nr_pages))
1922 return 0;
1923
1924 rcu_read_lock();
1925 xas_for_each_marked(&xas, page, end, tag) {
1926 if (xas_retry(&xas, page))
1927 continue;
1928 /*
1929 * Shadow entries should never be tagged, but this iteration
1930 * is lockless so there is a window for page reclaim to evict
1931 * a page we saw tagged. Skip over it.
1932 */
1933 if (xa_is_value(page))
1934 continue;
1935
1936 if (!page_cache_get_speculative(page))
1937 goto retry;
1938
1939 /* Has the page moved or been split? */
1940 if (unlikely(page != xas_reload(&xas)))
1941 goto put_page;
1942
1943 pages[ret] = find_subpage(page, xas.xa_index);
1944 if (++ret == nr_pages) {
1945 *index = xas.xa_index + 1;
1946 goto out;
1947 }
1948 continue;
1949put_page:
1950 put_page(page);
1951retry:
1952 xas_reset(&xas);
1953 }
1954
1955 /*
1956 * We come here when we got to @end. We take care to not overflow the
1957 * index @index as it confuses some of the callers. This breaks the
1958 * iteration when there is a page at index -1 but that is already
1959 * broken anyway.
1960 */
1961 if (end == (pgoff_t)-1)
1962 *index = (pgoff_t)-1;
1963 else
1964 *index = end + 1;
1965out:
1966 rcu_read_unlock();
1967
1968 return ret;
1969}
1970EXPORT_SYMBOL(find_get_pages_range_tag);
1971
1972/*
1973 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1974 * a _large_ part of the i/o request. Imagine the worst scenario:
1975 *
1976 * ---R__________________________________________B__________
1977 * ^ reading here ^ bad block(assume 4k)
1978 *
1979 * read(R) => miss => readahead(R...B) => media error => frustrating retries
1980 * => failing the whole request => read(R) => read(R+1) =>
1981 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1982 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1983 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1984 *
1985 * It is going insane. Fix it by quickly scaling down the readahead size.
1986 */
1987static void shrink_readahead_size_eio(struct file *filp,
1988 struct file_ra_state *ra)
1989{
1990 ra->ra_pages /= 4;
1991}
1992
1993/**
1994 * generic_file_buffered_read - generic file read routine
1995 * @iocb: the iocb to read
1996 * @iter: data destination
1997 * @written: already copied
1998 *
1999 * This is a generic file read routine, and uses the
2000 * mapping->a_ops->readpage() function for the actual low-level stuff.
2001 *
2002 * This is really ugly. But the goto's actually try to clarify some
2003 * of the logic when it comes to error handling etc.
2004 *
2005 * Return:
2006 * * total number of bytes copied, including those the were already @written
2007 * * negative error code if nothing was copied
2008 */
2009static ssize_t generic_file_buffered_read(struct kiocb *iocb,
2010 struct iov_iter *iter, ssize_t written)
2011{
2012 struct file *filp = iocb->ki_filp;
2013 struct address_space *mapping = filp->f_mapping;
2014 struct inode *inode = mapping->host;
2015 struct file_ra_state *ra = &filp->f_ra;
2016 loff_t *ppos = &iocb->ki_pos;
2017 pgoff_t index;
2018 pgoff_t last_index;
2019 pgoff_t prev_index;
2020 unsigned long offset; /* offset into pagecache page */
2021 unsigned int prev_offset;
2022 int error = 0;
2023
2024 if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
2025 return 0;
2026 iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2027
2028 index = *ppos >> PAGE_SHIFT;
2029 prev_index = ra->prev_pos >> PAGE_SHIFT;
2030 prev_offset = ra->prev_pos & (PAGE_SIZE-1);
2031 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
2032 offset = *ppos & ~PAGE_MASK;
2033
2034 for (;;) {
2035 struct page *page;
2036 pgoff_t end_index;
2037 loff_t isize;
2038 unsigned long nr, ret;
2039
2040 cond_resched();
2041find_page:
2042 if (fatal_signal_pending(current)) {
2043 error = -EINTR;
2044 goto out;
2045 }
2046
2047 page = find_get_page(mapping, index);
2048 if (!page) {
2049 if (iocb->ki_flags & IOCB_NOWAIT)
2050 goto would_block;
2051 page_cache_sync_readahead(mapping,
2052 ra, filp,
2053 index, last_index - index);
2054 page = find_get_page(mapping, index);
2055 if (unlikely(page == NULL))
2056 goto no_cached_page;
2057 }
2058 if (PageReadahead(page)) {
2059 page_cache_async_readahead(mapping,
2060 ra, filp, page,
2061 index, last_index - index);
2062 }
2063 if (!PageUptodate(page)) {
2064 if (iocb->ki_flags & IOCB_NOWAIT) {
2065 put_page(page);
2066 goto would_block;
2067 }
2068
2069 /*
2070 * See comment in do_read_cache_page on why
2071 * wait_on_page_locked is used to avoid unnecessarily
2072 * serialisations and why it's safe.
2073 */
2074 error = wait_on_page_locked_killable(page);
2075 if (unlikely(error))
2076 goto readpage_error;
2077 if (PageUptodate(page))
2078 goto page_ok;
2079
2080 if (inode->i_blkbits == PAGE_SHIFT ||
2081 !mapping->a_ops->is_partially_uptodate)
2082 goto page_not_up_to_date;
2083 /* pipes can't handle partially uptodate pages */
2084 if (unlikely(iov_iter_is_pipe(iter)))
2085 goto page_not_up_to_date;
2086 if (!trylock_page(page))
2087 goto page_not_up_to_date;
2088 /* Did it get truncated before we got the lock? */
2089 if (!page->mapping)
2090 goto page_not_up_to_date_locked;
2091 if (!mapping->a_ops->is_partially_uptodate(page,
2092 offset, iter->count))
2093 goto page_not_up_to_date_locked;
2094 unlock_page(page);
2095 }
2096page_ok:
2097 /*
2098 * i_size must be checked after we know the page is Uptodate.
2099 *
2100 * Checking i_size after the check allows us to calculate
2101 * the correct value for "nr", which means the zero-filled
2102 * part of the page is not copied back to userspace (unless
2103 * another truncate extends the file - this is desired though).
2104 */
2105
2106 isize = i_size_read(inode);
2107 end_index = (isize - 1) >> PAGE_SHIFT;
2108 if (unlikely(!isize || index > end_index)) {
2109 put_page(page);
2110 goto out;
2111 }
2112
2113 /* nr is the maximum number of bytes to copy from this page */
2114 nr = PAGE_SIZE;
2115 if (index == end_index) {
2116 nr = ((isize - 1) & ~PAGE_MASK) + 1;
2117 if (nr <= offset) {
2118 put_page(page);
2119 goto out;
2120 }
2121 }
2122 nr = nr - offset;
2123
2124 /* If users can be writing to this page using arbitrary
2125 * virtual addresses, take care about potential aliasing
2126 * before reading the page on the kernel side.
2127 */
2128 if (mapping_writably_mapped(mapping))
2129 flush_dcache_page(page);
2130
2131 /*
2132 * When a sequential read accesses a page several times,
2133 * only mark it as accessed the first time.
2134 */
2135 if (prev_index != index || offset != prev_offset)
2136 mark_page_accessed(page);
2137 prev_index = index;
2138
2139 /*
2140 * Ok, we have the page, and it's up-to-date, so
2141 * now we can copy it to user space...
2142 */
2143
2144 ret = copy_page_to_iter(page, offset, nr, iter);
2145 offset += ret;
2146 index += offset >> PAGE_SHIFT;
2147 offset &= ~PAGE_MASK;
2148 prev_offset = offset;
2149
2150 put_page(page);
2151 written += ret;
2152 if (!iov_iter_count(iter))
2153 goto out;
2154 if (ret < nr) {
2155 error = -EFAULT;
2156 goto out;
2157 }
2158 continue;
2159
2160page_not_up_to_date:
2161 /* Get exclusive access to the page ... */
2162 error = lock_page_killable(page);
2163 if (unlikely(error))
2164 goto readpage_error;
2165
2166page_not_up_to_date_locked:
2167 /* Did it get truncated before we got the lock? */
2168 if (!page->mapping) {
2169 unlock_page(page);
2170 put_page(page);
2171 continue;
2172 }
2173
2174 /* Did somebody else fill it already? */
2175 if (PageUptodate(page)) {
2176 unlock_page(page);
2177 goto page_ok;
2178 }
2179
2180readpage:
2181 /*
2182 * A previous I/O error may have been due to temporary
2183 * failures, eg. multipath errors.
2184 * PG_error will be set again if readpage fails.
2185 */
2186 ClearPageError(page);
2187 /* Start the actual read. The read will unlock the page. */
2188 error = mapping->a_ops->readpage(filp, page);
2189
2190 if (unlikely(error)) {
2191 if (error == AOP_TRUNCATED_PAGE) {
2192 put_page(page);
2193 error = 0;
2194 goto find_page;
2195 }
2196 goto readpage_error;
2197 }
2198
2199 if (!PageUptodate(page)) {
2200 error = lock_page_killable(page);
2201 if (unlikely(error))
2202 goto readpage_error;
2203 if (!PageUptodate(page)) {
2204 if (page->mapping == NULL) {
2205 /*
2206 * invalidate_mapping_pages got it
2207 */
2208 unlock_page(page);
2209 put_page(page);
2210 goto find_page;
2211 }
2212 unlock_page(page);
2213 shrink_readahead_size_eio(filp, ra);
2214 error = -EIO;
2215 goto readpage_error;
2216 }
2217 unlock_page(page);
2218 }
2219
2220 goto page_ok;
2221
2222readpage_error:
2223 /* UHHUH! A synchronous read error occurred. Report it */
2224 put_page(page);
2225 goto out;
2226
2227no_cached_page:
2228 /*
2229 * Ok, it wasn't cached, so we need to create a new
2230 * page..
2231 */
2232 page = page_cache_alloc(mapping);
2233 if (!page) {
2234 error = -ENOMEM;
2235 goto out;
2236 }
2237 error = add_to_page_cache_lru(page, mapping, index,
2238 mapping_gfp_constraint(mapping, GFP_KERNEL));
2239 if (error) {
2240 put_page(page);
2241 if (error == -EEXIST) {
2242 error = 0;
2243 goto find_page;
2244 }
2245 goto out;
2246 }
2247 goto readpage;
2248 }
2249
2250would_block:
2251 error = -EAGAIN;
2252out:
2253 ra->prev_pos = prev_index;
2254 ra->prev_pos <<= PAGE_SHIFT;
2255 ra->prev_pos |= prev_offset;
2256
2257 *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
2258 file_accessed(filp);
2259 return written ? written : error;
2260}
2261
2262/**
2263 * generic_file_read_iter - generic filesystem read routine
2264 * @iocb: kernel I/O control block
2265 * @iter: destination for the data read
2266 *
2267 * This is the "read_iter()" routine for all filesystems
2268 * that can use the page cache directly.
2269 * Return:
2270 * * number of bytes copied, even for partial reads
2271 * * negative error code if nothing was read
2272 */
2273ssize_t
2274generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2275{
2276 size_t count = iov_iter_count(iter);
2277 ssize_t retval = 0;
2278
2279 if (!count)
2280 goto out; /* skip atime */
2281
2282 if (iocb->ki_flags & IOCB_DIRECT) {
2283 struct file *file = iocb->ki_filp;
2284 struct address_space *mapping = file->f_mapping;
2285 struct inode *inode = mapping->host;
2286 loff_t size;
2287
2288 size = i_size_read(inode);
2289 if (iocb->ki_flags & IOCB_NOWAIT) {
2290 if (filemap_range_has_page(mapping, iocb->ki_pos,
2291 iocb->ki_pos + count - 1))
2292 return -EAGAIN;
2293 } else {
2294 retval = filemap_write_and_wait_range(mapping,
2295 iocb->ki_pos,
2296 iocb->ki_pos + count - 1);
2297 if (retval < 0)
2298 goto out;
2299 }
2300
2301 file_accessed(file);
2302
2303 retval = mapping->a_ops->direct_IO(iocb, iter);
2304 if (retval >= 0) {
2305 iocb->ki_pos += retval;
2306 count -= retval;
2307 }
2308 iov_iter_revert(iter, count - iov_iter_count(iter));
2309
2310 /*
2311 * Btrfs can have a short DIO read if we encounter
2312 * compressed extents, so if there was an error, or if
2313 * we've already read everything we wanted to, or if
2314 * there was a short read because we hit EOF, go ahead
2315 * and return. Otherwise fallthrough to buffered io for
2316 * the rest of the read. Buffered reads will not work for
2317 * DAX files, so don't bother trying.
2318 */
2319 if (retval < 0 || !count || iocb->ki_pos >= size ||
2320 IS_DAX(inode))
2321 goto out;
2322 }
2323
2324 retval = generic_file_buffered_read(iocb, iter, retval);
2325out:
2326 return retval;
2327}
2328EXPORT_SYMBOL(generic_file_read_iter);
2329
2330#ifdef CONFIG_MMU
2331#define MMAP_LOTSAMISS (100)
2332static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
2333 struct file *fpin)
2334{
2335 int flags = vmf->flags;
2336
2337 if (fpin)
2338 return fpin;
2339
2340 /*
2341 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
2342 * anything, so we only pin the file and drop the mmap_sem if only
2343 * FAULT_FLAG_ALLOW_RETRY is set.
2344 */
2345 if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
2346 FAULT_FLAG_ALLOW_RETRY) {
2347 fpin = get_file(vmf->vma->vm_file);
2348 up_read(&vmf->vma->vm_mm->mmap_sem);
2349 }
2350 return fpin;
2351}
2352
2353/*
2354 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
2355 * @vmf - the vm_fault for this fault.
2356 * @page - the page to lock.
2357 * @fpin - the pointer to the file we may pin (or is already pinned).
2358 *
2359 * This works similar to lock_page_or_retry in that it can drop the mmap_sem.
2360 * It differs in that it actually returns the page locked if it returns 1 and 0
2361 * if it couldn't lock the page. If we did have to drop the mmap_sem then fpin
2362 * will point to the pinned file and needs to be fput()'ed at a later point.
2363 */
2364static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
2365 struct file **fpin)
2366{
2367 if (trylock_page(page))
2368 return 1;
2369
2370 /*
2371 * NOTE! This will make us return with VM_FAULT_RETRY, but with
2372 * the mmap_sem still held. That's how FAULT_FLAG_RETRY_NOWAIT
2373 * is supposed to work. We have way too many special cases..
2374 */
2375 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
2376 return 0;
2377
2378 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
2379 if (vmf->flags & FAULT_FLAG_KILLABLE) {
2380 if (__lock_page_killable(page)) {
2381 /*
2382 * We didn't have the right flags to drop the mmap_sem,
2383 * but all fault_handlers only check for fatal signals
2384 * if we return VM_FAULT_RETRY, so we need to drop the
2385 * mmap_sem here and return 0 if we don't have a fpin.
2386 */
2387 if (*fpin == NULL)
2388 up_read(&vmf->vma->vm_mm->mmap_sem);
2389 return 0;
2390 }
2391 } else
2392 __lock_page(page);
2393 return 1;
2394}
2395
2396
2397/*
2398 * Synchronous readahead happens when we don't even find a page in the page
2399 * cache at all. We don't want to perform IO under the mmap sem, so if we have
2400 * to drop the mmap sem we return the file that was pinned in order for us to do
2401 * that. If we didn't pin a file then we return NULL. The file that is
2402 * returned needs to be fput()'ed when we're done with it.
2403 */
2404static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
2405{
2406 struct file *file = vmf->vma->vm_file;
2407 struct file_ra_state *ra = &file->f_ra;
2408 struct address_space *mapping = file->f_mapping;
2409 struct file *fpin = NULL;
2410 pgoff_t offset = vmf->pgoff;
2411
2412 /* If we don't want any read-ahead, don't bother */
2413 if (vmf->vma->vm_flags & VM_RAND_READ)
2414 return fpin;
2415 if (!ra->ra_pages)
2416 return fpin;
2417
2418 if (vmf->vma->vm_flags & VM_SEQ_READ) {
2419 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2420 page_cache_sync_readahead(mapping, ra, file, offset,
2421 ra->ra_pages);
2422 return fpin;
2423 }
2424
2425 /* Avoid banging the cache line if not needed */
2426 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
2427 ra->mmap_miss++;
2428
2429 /*
2430 * Do we miss much more than hit in this file? If so,
2431 * stop bothering with read-ahead. It will only hurt.
2432 */
2433 if (ra->mmap_miss > MMAP_LOTSAMISS)
2434 return fpin;
2435
2436 /*
2437 * mmap read-around
2438 */
2439 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2440 ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
2441 ra->size = ra->ra_pages;
2442 ra->async_size = ra->ra_pages / 4;
2443 ra_submit(ra, mapping, file);
2444 return fpin;
2445}
2446
2447/*
2448 * Asynchronous readahead happens when we find the page and PG_readahead,
2449 * so we want to possibly extend the readahead further. We return the file that
2450 * was pinned if we have to drop the mmap_sem in order to do IO.
2451 */
2452static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
2453 struct page *page)
2454{
2455 struct file *file = vmf->vma->vm_file;
2456 struct file_ra_state *ra = &file->f_ra;
2457 struct address_space *mapping = file->f_mapping;
2458 struct file *fpin = NULL;
2459 pgoff_t offset = vmf->pgoff;
2460
2461 /* If we don't want any read-ahead, don't bother */
2462 if (vmf->vma->vm_flags & VM_RAND_READ)
2463 return fpin;
2464 if (ra->mmap_miss > 0)
2465 ra->mmap_miss--;
2466 if (PageReadahead(page)) {
2467 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2468 page_cache_async_readahead(mapping, ra, file,
2469 page, offset, ra->ra_pages);
2470 }
2471 return fpin;
2472}
2473
2474/**
2475 * filemap_fault - read in file data for page fault handling
2476 * @vmf: struct vm_fault containing details of the fault
2477 *
2478 * filemap_fault() is invoked via the vma operations vector for a
2479 * mapped memory region to read in file data during a page fault.
2480 *
2481 * The goto's are kind of ugly, but this streamlines the normal case of having
2482 * it in the page cache, and handles the special cases reasonably without
2483 * having a lot of duplicated code.
2484 *
2485 * vma->vm_mm->mmap_sem must be held on entry.
2486 *
2487 * If our return value has VM_FAULT_RETRY set, it's because the mmap_sem
2488 * may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
2489 *
2490 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
2491 * has not been released.
2492 *
2493 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
2494 *
2495 * Return: bitwise-OR of %VM_FAULT_ codes.
2496 */
2497vm_fault_t filemap_fault(struct vm_fault *vmf)
2498{
2499 int error;
2500 struct file *file = vmf->vma->vm_file;
2501 struct file *fpin = NULL;
2502 struct address_space *mapping = file->f_mapping;
2503 struct file_ra_state *ra = &file->f_ra;
2504 struct inode *inode = mapping->host;
2505 pgoff_t offset = vmf->pgoff;
2506 pgoff_t max_off;
2507 struct page *page;
2508 vm_fault_t ret = 0;
2509
2510 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2511 if (unlikely(offset >= max_off))
2512 return VM_FAULT_SIGBUS;
2513
2514 /*
2515 * Do we have something in the page cache already?
2516 */
2517 page = find_get_page(mapping, offset);
2518 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
2519 /*
2520 * We found the page, so try async readahead before
2521 * waiting for the lock.
2522 */
2523 fpin = do_async_mmap_readahead(vmf, page);
2524 } else if (!page) {
2525 /* No page in the page cache at all */
2526 count_vm_event(PGMAJFAULT);
2527 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
2528 ret = VM_FAULT_MAJOR;
2529 fpin = do_sync_mmap_readahead(vmf);
2530retry_find:
2531 page = pagecache_get_page(mapping, offset,
2532 FGP_CREAT|FGP_FOR_MMAP,
2533 vmf->gfp_mask);
2534 if (!page) {
2535 if (fpin)
2536 goto out_retry;
2537 return vmf_error(-ENOMEM);
2538 }
2539 }
2540
2541 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin))
2542 goto out_retry;
2543
2544 /* Did it get truncated? */
2545 if (unlikely(compound_head(page)->mapping != mapping)) {
2546 unlock_page(page);
2547 put_page(page);
2548 goto retry_find;
2549 }
2550 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
2551
2552 /*
2553 * We have a locked page in the page cache, now we need to check
2554 * that it's up-to-date. If not, it is going to be due to an error.
2555 */
2556 if (unlikely(!PageUptodate(page)))
2557 goto page_not_uptodate;
2558
2559 /*
2560 * We've made it this far and we had to drop our mmap_sem, now is the
2561 * time to return to the upper layer and have it re-find the vma and
2562 * redo the fault.
2563 */
2564 if (fpin) {
2565 unlock_page(page);
2566 goto out_retry;
2567 }
2568
2569 /*
2570 * Found the page and have a reference on it.
2571 * We must recheck i_size under page lock.
2572 */
2573 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2574 if (unlikely(offset >= max_off)) {
2575 unlock_page(page);
2576 put_page(page);
2577 return VM_FAULT_SIGBUS;
2578 }
2579
2580 vmf->page = page;
2581 return ret | VM_FAULT_LOCKED;
2582
2583page_not_uptodate:
2584 /*
2585 * Umm, take care of errors if the page isn't up-to-date.
2586 * Try to re-read it _once_. We do this synchronously,
2587 * because there really aren't any performance issues here
2588 * and we need to check for errors.
2589 */
2590 ClearPageError(page);
2591 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2592 error = mapping->a_ops->readpage(file, page);
2593 if (!error) {
2594 wait_on_page_locked(page);
2595 if (!PageUptodate(page))
2596 error = -EIO;
2597 }
2598 if (fpin)
2599 goto out_retry;
2600 put_page(page);
2601
2602 if (!error || error == AOP_TRUNCATED_PAGE)
2603 goto retry_find;
2604
2605 /* Things didn't work out. Return zero to tell the mm layer so. */
2606 shrink_readahead_size_eio(file, ra);
2607 return VM_FAULT_SIGBUS;
2608
2609out_retry:
2610 /*
2611 * We dropped the mmap_sem, we need to return to the fault handler to
2612 * re-find the vma and come back and find our hopefully still populated
2613 * page.
2614 */
2615 if (page)
2616 put_page(page);
2617 if (fpin)
2618 fput(fpin);
2619 return ret | VM_FAULT_RETRY;
2620}
2621EXPORT_SYMBOL(filemap_fault);
2622
2623void filemap_map_pages(struct vm_fault *vmf,
2624 pgoff_t start_pgoff, pgoff_t end_pgoff)
2625{
2626 struct file *file = vmf->vma->vm_file;
2627 struct address_space *mapping = file->f_mapping;
2628 pgoff_t last_pgoff = start_pgoff;
2629 unsigned long max_idx;
2630 XA_STATE(xas, &mapping->i_pages, start_pgoff);
2631 struct page *page;
2632
2633 rcu_read_lock();
2634 xas_for_each(&xas, page, end_pgoff) {
2635 if (xas_retry(&xas, page))
2636 continue;
2637 if (xa_is_value(page))
2638 goto next;
2639
2640 /*
2641 * Check for a locked page first, as a speculative
2642 * reference may adversely influence page migration.
2643 */
2644 if (PageLocked(page))
2645 goto next;
2646 if (!page_cache_get_speculative(page))
2647 goto next;
2648
2649 /* Has the page moved or been split? */
2650 if (unlikely(page != xas_reload(&xas)))
2651 goto skip;
2652 page = find_subpage(page, xas.xa_index);
2653
2654 if (!PageUptodate(page) ||
2655 PageReadahead(page) ||
2656 PageHWPoison(page))
2657 goto skip;
2658 if (!trylock_page(page))
2659 goto skip;
2660
2661 if (page->mapping != mapping || !PageUptodate(page))
2662 goto unlock;
2663
2664 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2665 if (page->index >= max_idx)
2666 goto unlock;
2667
2668 if (file->f_ra.mmap_miss > 0)
2669 file->f_ra.mmap_miss--;
2670
2671 vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
2672 if (vmf->pte)
2673 vmf->pte += xas.xa_index - last_pgoff;
2674 last_pgoff = xas.xa_index;
2675 if (alloc_set_pte(vmf, NULL, page))
2676 goto unlock;
2677 unlock_page(page);
2678 goto next;
2679unlock:
2680 unlock_page(page);
2681skip:
2682 put_page(page);
2683next:
2684 /* Huge page is mapped? No need to proceed. */
2685 if (pmd_trans_huge(*vmf->pmd))
2686 break;
2687 }
2688 rcu_read_unlock();
2689}
2690EXPORT_SYMBOL(filemap_map_pages);
2691
2692vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
2693{
2694 struct page *page = vmf->page;
2695 struct inode *inode = file_inode(vmf->vma->vm_file);
2696 vm_fault_t ret = VM_FAULT_LOCKED;
2697
2698 sb_start_pagefault(inode->i_sb);
2699 file_update_time(vmf->vma->vm_file);
2700 lock_page(page);
2701 if (page->mapping != inode->i_mapping) {
2702 unlock_page(page);
2703 ret = VM_FAULT_NOPAGE;
2704 goto out;
2705 }
2706 /*
2707 * We mark the page dirty already here so that when freeze is in
2708 * progress, we are guaranteed that writeback during freezing will
2709 * see the dirty page and writeprotect it again.
2710 */
2711 set_page_dirty(page);
2712 wait_for_stable_page(page);
2713out:
2714 sb_end_pagefault(inode->i_sb);
2715 return ret;
2716}
2717
2718const struct vm_operations_struct generic_file_vm_ops = {
2719 .fault = filemap_fault,
2720 .map_pages = filemap_map_pages,
2721 .page_mkwrite = filemap_page_mkwrite,
2722};
2723
2724/* This is used for a general mmap of a disk file */
2725
2726int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2727{
2728 struct address_space *mapping = file->f_mapping;
2729
2730 if (!mapping->a_ops->readpage)
2731 return -ENOEXEC;
2732 file_accessed(file);
2733 vma->vm_ops = &generic_file_vm_ops;
2734 return 0;
2735}
2736
2737/*
2738 * This is for filesystems which do not implement ->writepage.
2739 */
2740int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2741{
2742 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2743 return -EINVAL;
2744 return generic_file_mmap(file, vma);
2745}
2746#else
2747vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
2748{
2749 return VM_FAULT_SIGBUS;
2750}
2751int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2752{
2753 return -ENOSYS;
2754}
2755int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
2756{
2757 return -ENOSYS;
2758}
2759#endif /* CONFIG_MMU */
2760
2761EXPORT_SYMBOL(filemap_page_mkwrite);
2762EXPORT_SYMBOL(generic_file_mmap);
2763EXPORT_SYMBOL(generic_file_readonly_mmap);
2764
2765static struct page *wait_on_page_read(struct page *page)
2766{
2767 if (!IS_ERR(page)) {
2768 wait_on_page_locked(page);
2769 if (!PageUptodate(page)) {
2770 put_page(page);
2771 page = ERR_PTR(-EIO);
2772 }
2773 }
2774 return page;
2775}
2776
2777static struct page *do_read_cache_page(struct address_space *mapping,
2778 pgoff_t index,
2779 int (*filler)(void *, struct page *),
2780 void *data,
2781 gfp_t gfp)
2782{
2783 struct page *page;
2784 int err;
2785repeat:
2786 page = find_get_page(mapping, index);
2787 if (!page) {
2788 page = __page_cache_alloc(gfp);
2789 if (!page)
2790 return ERR_PTR(-ENOMEM);
2791 err = add_to_page_cache_lru(page, mapping, index, gfp);
2792 if (unlikely(err)) {
2793 put_page(page);
2794 if (err == -EEXIST)
2795 goto repeat;
2796 /* Presumably ENOMEM for xarray node */
2797 return ERR_PTR(err);
2798 }
2799
2800filler:
2801 if (filler)
2802 err = filler(data, page);
2803 else
2804 err = mapping->a_ops->readpage(data, page);
2805
2806 if (err < 0) {
2807 put_page(page);
2808 return ERR_PTR(err);
2809 }
2810
2811 page = wait_on_page_read(page);
2812 if (IS_ERR(page))
2813 return page;
2814 goto out;
2815 }
2816 if (PageUptodate(page))
2817 goto out;
2818
2819 /*
2820 * Page is not up to date and may be locked due one of the following
2821 * case a: Page is being filled and the page lock is held
2822 * case b: Read/write error clearing the page uptodate status
2823 * case c: Truncation in progress (page locked)
2824 * case d: Reclaim in progress
2825 *
2826 * Case a, the page will be up to date when the page is unlocked.
2827 * There is no need to serialise on the page lock here as the page
2828 * is pinned so the lock gives no additional protection. Even if the
2829 * the page is truncated, the data is still valid if PageUptodate as
2830 * it's a race vs truncate race.
2831 * Case b, the page will not be up to date
2832 * Case c, the page may be truncated but in itself, the data may still
2833 * be valid after IO completes as it's a read vs truncate race. The
2834 * operation must restart if the page is not uptodate on unlock but
2835 * otherwise serialising on page lock to stabilise the mapping gives
2836 * no additional guarantees to the caller as the page lock is
2837 * released before return.
2838 * Case d, similar to truncation. If reclaim holds the page lock, it
2839 * will be a race with remove_mapping that determines if the mapping
2840 * is valid on unlock but otherwise the data is valid and there is
2841 * no need to serialise with page lock.
2842 *
2843 * As the page lock gives no additional guarantee, we optimistically
2844 * wait on the page to be unlocked and check if it's up to date and
2845 * use the page if it is. Otherwise, the page lock is required to
2846 * distinguish between the different cases. The motivation is that we
2847 * avoid spurious serialisations and wakeups when multiple processes
2848 * wait on the same page for IO to complete.
2849 */
2850 wait_on_page_locked(page);
2851 if (PageUptodate(page))
2852 goto out;
2853
2854 /* Distinguish between all the cases under the safety of the lock */
2855 lock_page(page);
2856
2857 /* Case c or d, restart the operation */
2858 if (!page->mapping) {
2859 unlock_page(page);
2860 put_page(page);
2861 goto repeat;
2862 }
2863
2864 /* Someone else locked and filled the page in a very small window */
2865 if (PageUptodate(page)) {
2866 unlock_page(page);
2867 goto out;
2868 }
2869 goto filler;
2870
2871out:
2872 mark_page_accessed(page);
2873 return page;
2874}
2875
2876/**
2877 * read_cache_page - read into page cache, fill it if needed
2878 * @mapping: the page's address_space
2879 * @index: the page index
2880 * @filler: function to perform the read
2881 * @data: first arg to filler(data, page) function, often left as NULL
2882 *
2883 * Read into the page cache. If a page already exists, and PageUptodate() is
2884 * not set, try to fill the page and wait for it to become unlocked.
2885 *
2886 * If the page does not get brought uptodate, return -EIO.
2887 *
2888 * Return: up to date page on success, ERR_PTR() on failure.
2889 */
2890struct page *read_cache_page(struct address_space *mapping,
2891 pgoff_t index,
2892 int (*filler)(void *, struct page *),
2893 void *data)
2894{
2895 return do_read_cache_page(mapping, index, filler, data,
2896 mapping_gfp_mask(mapping));
2897}
2898EXPORT_SYMBOL(read_cache_page);
2899
2900/**
2901 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
2902 * @mapping: the page's address_space
2903 * @index: the page index
2904 * @gfp: the page allocator flags to use if allocating
2905 *
2906 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
2907 * any new page allocations done using the specified allocation flags.
2908 *
2909 * If the page does not get brought uptodate, return -EIO.
2910 *
2911 * Return: up to date page on success, ERR_PTR() on failure.
2912 */
2913struct page *read_cache_page_gfp(struct address_space *mapping,
2914 pgoff_t index,
2915 gfp_t gfp)
2916{
2917 return do_read_cache_page(mapping, index, NULL, NULL, gfp);
2918}
2919EXPORT_SYMBOL(read_cache_page_gfp);
2920
2921/*
2922 * Don't operate on ranges the page cache doesn't support, and don't exceed the
2923 * LFS limits. If pos is under the limit it becomes a short access. If it
2924 * exceeds the limit we return -EFBIG.
2925 */
2926static int generic_write_check_limits(struct file *file, loff_t pos,
2927 loff_t *count)
2928{
2929 struct inode *inode = file->f_mapping->host;
2930 loff_t max_size = inode->i_sb->s_maxbytes;
2931 loff_t limit = rlimit(RLIMIT_FSIZE);
2932
2933 if (limit != RLIM_INFINITY) {
2934 if (pos >= limit) {
2935 send_sig(SIGXFSZ, current, 0);
2936 return -EFBIG;
2937 }
2938 *count = min(*count, limit - pos);
2939 }
2940
2941 if (!(file->f_flags & O_LARGEFILE))
2942 max_size = MAX_NON_LFS;
2943
2944 if (unlikely(pos >= max_size))
2945 return -EFBIG;
2946
2947 *count = min(*count, max_size - pos);
2948
2949 return 0;
2950}
2951
2952/*
2953 * Performs necessary checks before doing a write
2954 *
2955 * Can adjust writing position or amount of bytes to write.
2956 * Returns appropriate error code that caller should return or
2957 * zero in case that write should be allowed.
2958 */
2959inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
2960{
2961 struct file *file = iocb->ki_filp;
2962 struct inode *inode = file->f_mapping->host;
2963 loff_t count;
2964 int ret;
2965
2966 if (IS_SWAPFILE(inode))
2967 return -ETXTBSY;
2968
2969 if (!iov_iter_count(from))
2970 return 0;
2971
2972 /* FIXME: this is for backwards compatibility with 2.4 */
2973 if (iocb->ki_flags & IOCB_APPEND)
2974 iocb->ki_pos = i_size_read(inode);
2975
2976 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
2977 return -EINVAL;
2978
2979 count = iov_iter_count(from);
2980 ret = generic_write_check_limits(file, iocb->ki_pos, &count);
2981 if (ret)
2982 return ret;
2983
2984 iov_iter_truncate(from, count);
2985 return iov_iter_count(from);
2986}
2987EXPORT_SYMBOL(generic_write_checks);
2988
2989/*
2990 * Performs necessary checks before doing a clone.
2991 *
2992 * Can adjust amount of bytes to clone via @req_count argument.
2993 * Returns appropriate error code that caller should return or
2994 * zero in case the clone should be allowed.
2995 */
2996int generic_remap_checks(struct file *file_in, loff_t pos_in,
2997 struct file *file_out, loff_t pos_out,
2998 loff_t *req_count, unsigned int remap_flags)
2999{
3000 struct inode *inode_in = file_in->f_mapping->host;
3001 struct inode *inode_out = file_out->f_mapping->host;
3002 uint64_t count = *req_count;
3003 uint64_t bcount;
3004 loff_t size_in, size_out;
3005 loff_t bs = inode_out->i_sb->s_blocksize;
3006 int ret;
3007
3008 /* The start of both ranges must be aligned to an fs block. */
3009 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
3010 return -EINVAL;
3011
3012 /* Ensure offsets don't wrap. */
3013 if (pos_in + count < pos_in || pos_out + count < pos_out)
3014 return -EINVAL;
3015
3016 size_in = i_size_read(inode_in);
3017 size_out = i_size_read(inode_out);
3018
3019 /* Dedupe requires both ranges to be within EOF. */
3020 if ((remap_flags & REMAP_FILE_DEDUP) &&
3021 (pos_in >= size_in || pos_in + count > size_in ||
3022 pos_out >= size_out || pos_out + count > size_out))
3023 return -EINVAL;
3024
3025 /* Ensure the infile range is within the infile. */
3026 if (pos_in >= size_in)
3027 return -EINVAL;
3028 count = min(count, size_in - (uint64_t)pos_in);
3029
3030 ret = generic_write_check_limits(file_out, pos_out, &count);
3031 if (ret)
3032 return ret;
3033
3034 /*
3035 * If the user wanted us to link to the infile's EOF, round up to the
3036 * next block boundary for this check.
3037 *
3038 * Otherwise, make sure the count is also block-aligned, having
3039 * already confirmed the starting offsets' block alignment.
3040 */
3041 if (pos_in + count == size_in) {
3042 bcount = ALIGN(size_in, bs) - pos_in;
3043 } else {
3044 if (!IS_ALIGNED(count, bs))
3045 count = ALIGN_DOWN(count, bs);
3046 bcount = count;
3047 }
3048
3049 /* Don't allow overlapped cloning within the same file. */
3050 if (inode_in == inode_out &&
3051 pos_out + bcount > pos_in &&
3052 pos_out < pos_in + bcount)
3053 return -EINVAL;
3054
3055 /*
3056 * We shortened the request but the caller can't deal with that, so
3057 * bounce the request back to userspace.
3058 */
3059 if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
3060 return -EINVAL;
3061
3062 *req_count = count;
3063 return 0;
3064}
3065
3066
3067/*
3068 * Performs common checks before doing a file copy/clone
3069 * from @file_in to @file_out.
3070 */
3071int generic_file_rw_checks(struct file *file_in, struct file *file_out)
3072{
3073 struct inode *inode_in = file_inode(file_in);
3074 struct inode *inode_out = file_inode(file_out);
3075
3076 /* Don't copy dirs, pipes, sockets... */
3077 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
3078 return -EISDIR;
3079 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
3080 return -EINVAL;
3081
3082 if (!(file_in->f_mode & FMODE_READ) ||
3083 !(file_out->f_mode & FMODE_WRITE) ||
3084 (file_out->f_flags & O_APPEND))
3085 return -EBADF;
3086
3087 return 0;
3088}
3089
3090/*
3091 * Performs necessary checks before doing a file copy
3092 *
3093 * Can adjust amount of bytes to copy via @req_count argument.
3094 * Returns appropriate error code that caller should return or
3095 * zero in case the copy should be allowed.
3096 */
3097int generic_copy_file_checks(struct file *file_in, loff_t pos_in,
3098 struct file *file_out, loff_t pos_out,
3099 size_t *req_count, unsigned int flags)
3100{
3101 struct inode *inode_in = file_inode(file_in);
3102 struct inode *inode_out = file_inode(file_out);
3103 uint64_t count = *req_count;
3104 loff_t size_in;
3105 int ret;
3106
3107 ret = generic_file_rw_checks(file_in, file_out);
3108 if (ret)
3109 return ret;
3110
3111 /* Don't touch certain kinds of inodes */
3112 if (IS_IMMUTABLE(inode_out))
3113 return -EPERM;
3114
3115 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
3116 return -ETXTBSY;
3117
3118 /* Ensure offsets don't wrap. */
3119 if (pos_in + count < pos_in || pos_out + count < pos_out)
3120 return -EOVERFLOW;
3121
3122 /* Shorten the copy to EOF */
3123 size_in = i_size_read(inode_in);
3124 if (pos_in >= size_in)
3125 count = 0;
3126 else
3127 count = min(count, size_in - (uint64_t)pos_in);
3128
3129 ret = generic_write_check_limits(file_out, pos_out, &count);
3130 if (ret)
3131 return ret;
3132
3133 /* Don't allow overlapped copying within the same file. */
3134 if (inode_in == inode_out &&
3135 pos_out + count > pos_in &&
3136 pos_out < pos_in + count)
3137 return -EINVAL;
3138
3139 *req_count = count;
3140 return 0;
3141}
3142
3143int pagecache_write_begin(struct file *file, struct address_space *mapping,
3144 loff_t pos, unsigned len, unsigned flags,
3145 struct page **pagep, void **fsdata)
3146{
3147 const struct address_space_operations *aops = mapping->a_ops;
3148
3149 return aops->write_begin(file, mapping, pos, len, flags,
3150 pagep, fsdata);
3151}
3152EXPORT_SYMBOL(pagecache_write_begin);
3153
3154int pagecache_write_end(struct file *file, struct address_space *mapping,
3155 loff_t pos, unsigned len, unsigned copied,
3156 struct page *page, void *fsdata)
3157{
3158 const struct address_space_operations *aops = mapping->a_ops;
3159
3160 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
3161}
3162EXPORT_SYMBOL(pagecache_write_end);
3163
3164ssize_t
3165generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3166{
3167 struct file *file = iocb->ki_filp;
3168 struct address_space *mapping = file->f_mapping;
3169 struct inode *inode = mapping->host;
3170 loff_t pos = iocb->ki_pos;
3171 ssize_t written;
3172 size_t write_len;
3173 pgoff_t end;
3174
3175 write_len = iov_iter_count(from);
3176 end = (pos + write_len - 1) >> PAGE_SHIFT;
3177
3178 if (iocb->ki_flags & IOCB_NOWAIT) {
3179 /* If there are pages to writeback, return */
3180 if (filemap_range_has_page(inode->i_mapping, pos,
3181 pos + write_len - 1))
3182 return -EAGAIN;
3183 } else {
3184 written = filemap_write_and_wait_range(mapping, pos,
3185 pos + write_len - 1);
3186 if (written)
3187 goto out;
3188 }
3189
3190 /*
3191 * After a write we want buffered reads to be sure to go to disk to get
3192 * the new data. We invalidate clean cached page from the region we're
3193 * about to write. We do this *before* the write so that we can return
3194 * without clobbering -EIOCBQUEUED from ->direct_IO().
3195 */
3196 written = invalidate_inode_pages2_range(mapping,
3197 pos >> PAGE_SHIFT, end);
3198 /*
3199 * If a page can not be invalidated, return 0 to fall back
3200 * to buffered write.
3201 */
3202 if (written) {
3203 if (written == -EBUSY)
3204 return 0;
3205 goto out;
3206 }
3207
3208 written = mapping->a_ops->direct_IO(iocb, from);
3209
3210 /*
3211 * Finally, try again to invalidate clean pages which might have been
3212 * cached by non-direct readahead, or faulted in by get_user_pages()
3213 * if the source of the write was an mmap'ed region of the file
3214 * we're writing. Either one is a pretty crazy thing to do,
3215 * so we don't support it 100%. If this invalidation
3216 * fails, tough, the write still worked...
3217 *
3218 * Most of the time we do not need this since dio_complete() will do
3219 * the invalidation for us. However there are some file systems that
3220 * do not end up with dio_complete() being called, so let's not break
3221 * them by removing it completely
3222 */
3223 if (mapping->nrpages)
3224 invalidate_inode_pages2_range(mapping,
3225 pos >> PAGE_SHIFT, end);
3226
3227 if (written > 0) {
3228 pos += written;
3229 write_len -= written;
3230 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3231 i_size_write(inode, pos);
3232 mark_inode_dirty(inode);
3233 }
3234 iocb->ki_pos = pos;
3235 }
3236 iov_iter_revert(from, write_len - iov_iter_count(from));
3237out:
3238 return written;
3239}
3240EXPORT_SYMBOL(generic_file_direct_write);
3241
3242/*
3243 * Find or create a page at the given pagecache position. Return the locked
3244 * page. This function is specifically for buffered writes.
3245 */
3246struct page *grab_cache_page_write_begin(struct address_space *mapping,
3247 pgoff_t index, unsigned flags)
3248{
3249 struct page *page;
3250 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
3251
3252 if (flags & AOP_FLAG_NOFS)
3253 fgp_flags |= FGP_NOFS;
3254
3255 page = pagecache_get_page(mapping, index, fgp_flags,
3256 mapping_gfp_mask(mapping));
3257 if (page)
3258 wait_for_stable_page(page);
3259
3260 return page;
3261}
3262EXPORT_SYMBOL(grab_cache_page_write_begin);
3263
3264ssize_t generic_perform_write(struct file *file,
3265 struct iov_iter *i, loff_t pos)
3266{
3267 struct address_space *mapping = file->f_mapping;
3268 const struct address_space_operations *a_ops = mapping->a_ops;
3269 long status = 0;
3270 ssize_t written = 0;
3271 unsigned int flags = 0;
3272
3273 do {
3274 struct page *page;
3275 unsigned long offset; /* Offset into pagecache page */
3276 unsigned long bytes; /* Bytes to write to page */
3277 size_t copied; /* Bytes copied from user */
3278 void *fsdata;
3279
3280 offset = (pos & (PAGE_SIZE - 1));
3281 bytes = min_t(unsigned long, PAGE_SIZE - offset,
3282 iov_iter_count(i));
3283
3284again:
3285 /*
3286 * Bring in the user page that we will copy from _first_.
3287 * Otherwise there's a nasty deadlock on copying from the
3288 * same page as we're writing to, without it being marked
3289 * up-to-date.
3290 *
3291 * Not only is this an optimisation, but it is also required
3292 * to check that the address is actually valid, when atomic
3293 * usercopies are used, below.
3294 */
3295 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
3296 status = -EFAULT;
3297 break;
3298 }
3299
3300 if (fatal_signal_pending(current)) {
3301 status = -EINTR;
3302 break;
3303 }
3304
3305 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
3306 &page, &fsdata);
3307 if (unlikely(status < 0))
3308 break;
3309
3310 if (mapping_writably_mapped(mapping))
3311 flush_dcache_page(page);
3312
3313 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
3314 flush_dcache_page(page);
3315
3316 status = a_ops->write_end(file, mapping, pos, bytes, copied,
3317 page, fsdata);
3318 if (unlikely(status < 0))
3319 break;
3320 copied = status;
3321
3322 cond_resched();
3323
3324 iov_iter_advance(i, copied);
3325 if (unlikely(copied == 0)) {
3326 /*
3327 * If we were unable to copy any data at all, we must
3328 * fall back to a single segment length write.
3329 *
3330 * If we didn't fallback here, we could livelock
3331 * because not all segments in the iov can be copied at
3332 * once without a pagefault.
3333 */
3334 bytes = min_t(unsigned long, PAGE_SIZE - offset,
3335 iov_iter_single_seg_count(i));
3336 goto again;
3337 }
3338 pos += copied;
3339 written += copied;
3340
3341 balance_dirty_pages_ratelimited(mapping);
3342 } while (iov_iter_count(i));
3343
3344 return written ? written : status;
3345}
3346EXPORT_SYMBOL(generic_perform_write);
3347
3348/**
3349 * __generic_file_write_iter - write data to a file
3350 * @iocb: IO state structure (file, offset, etc.)
3351 * @from: iov_iter with data to write
3352 *
3353 * This function does all the work needed for actually writing data to a
3354 * file. It does all basic checks, removes SUID from the file, updates
3355 * modification times and calls proper subroutines depending on whether we
3356 * do direct IO or a standard buffered write.
3357 *
3358 * It expects i_mutex to be grabbed unless we work on a block device or similar
3359 * object which does not need locking at all.
3360 *
3361 * This function does *not* take care of syncing data in case of O_SYNC write.
3362 * A caller has to handle it. This is mainly due to the fact that we want to
3363 * avoid syncing under i_mutex.
3364 *
3365 * Return:
3366 * * number of bytes written, even for truncated writes
3367 * * negative error code if no data has been written at all
3368 */
3369ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3370{
3371 struct file *file = iocb->ki_filp;
3372 struct address_space * mapping = file->f_mapping;
3373 struct inode *inode = mapping->host;
3374 ssize_t written = 0;
3375 ssize_t err;
3376 ssize_t status;
3377
3378 /* We can write back this queue in page reclaim */
3379 current->backing_dev_info = inode_to_bdi(inode);
3380 err = file_remove_privs(file);
3381 if (err)
3382 goto out;
3383
3384 err = file_update_time(file);
3385 if (err)
3386 goto out;
3387
3388 if (iocb->ki_flags & IOCB_DIRECT) {
3389 loff_t pos, endbyte;
3390
3391 written = generic_file_direct_write(iocb, from);
3392 /*
3393 * If the write stopped short of completing, fall back to
3394 * buffered writes. Some filesystems do this for writes to
3395 * holes, for example. For DAX files, a buffered write will
3396 * not succeed (even if it did, DAX does not handle dirty
3397 * page-cache pages correctly).
3398 */
3399 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
3400 goto out;
3401
3402 status = generic_perform_write(file, from, pos = iocb->ki_pos);
3403 /*
3404 * If generic_perform_write() returned a synchronous error
3405 * then we want to return the number of bytes which were
3406 * direct-written, or the error code if that was zero. Note
3407 * that this differs from normal direct-io semantics, which
3408 * will return -EFOO even if some bytes were written.
3409 */
3410 if (unlikely(status < 0)) {
3411 err = status;
3412 goto out;
3413 }
3414 /*
3415 * We need to ensure that the page cache pages are written to
3416 * disk and invalidated to preserve the expected O_DIRECT
3417 * semantics.
3418 */
3419 endbyte = pos + status - 1;
3420 err = filemap_write_and_wait_range(mapping, pos, endbyte);
3421 if (err == 0) {
3422 iocb->ki_pos = endbyte + 1;
3423 written += status;
3424 invalidate_mapping_pages(mapping,
3425 pos >> PAGE_SHIFT,
3426 endbyte >> PAGE_SHIFT);
3427 } else {
3428 /*
3429 * We don't know how much we wrote, so just return
3430 * the number of bytes which were direct-written
3431 */
3432 }
3433 } else {
3434 written = generic_perform_write(file, from, iocb->ki_pos);
3435 if (likely(written > 0))
3436 iocb->ki_pos += written;
3437 }
3438out:
3439 current->backing_dev_info = NULL;
3440 return written ? written : err;
3441}
3442EXPORT_SYMBOL(__generic_file_write_iter);
3443
3444/**
3445 * generic_file_write_iter - write data to a file
3446 * @iocb: IO state structure
3447 * @from: iov_iter with data to write
3448 *
3449 * This is a wrapper around __generic_file_write_iter() to be used by most
3450 * filesystems. It takes care of syncing the file in case of O_SYNC file
3451 * and acquires i_mutex as needed.
3452 * Return:
3453 * * negative error code if no data has been written at all of
3454 * vfs_fsync_range() failed for a synchronous write
3455 * * number of bytes written, even for truncated writes
3456 */
3457ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3458{
3459 struct file *file = iocb->ki_filp;
3460 struct inode *inode = file->f_mapping->host;
3461 ssize_t ret;
3462
3463 inode_lock(inode);
3464 ret = generic_write_checks(iocb, from);
3465 if (ret > 0)
3466 ret = __generic_file_write_iter(iocb, from);
3467 inode_unlock(inode);
3468
3469 if (ret > 0)
3470 ret = generic_write_sync(iocb, ret);
3471 return ret;
3472}
3473EXPORT_SYMBOL(generic_file_write_iter);
3474
3475/**
3476 * try_to_release_page() - release old fs-specific metadata on a page
3477 *
3478 * @page: the page which the kernel is trying to free
3479 * @gfp_mask: memory allocation flags (and I/O mode)
3480 *
3481 * The address_space is to try to release any data against the page
3482 * (presumably at page->private).
3483 *
3484 * This may also be called if PG_fscache is set on a page, indicating that the
3485 * page is known to the local caching routines.
3486 *
3487 * The @gfp_mask argument specifies whether I/O may be performed to release
3488 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3489 *
3490 * Return: %1 if the release was successful, otherwise return zero.
3491 */
3492int try_to_release_page(struct page *page, gfp_t gfp_mask)
3493{
3494 struct address_space * const mapping = page->mapping;
3495
3496 BUG_ON(!PageLocked(page));
3497 if (PageWriteback(page))
3498 return 0;
3499
3500 if (mapping && mapping->a_ops->releasepage)
3501 return mapping->a_ops->releasepage(page, gfp_mask);
3502 return try_to_free_buffers(page);
3503}
3504
3505EXPORT_SYMBOL(try_to_release_page);