Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5/*
6 * Copyright 1995 Linus Torvalds
7 */
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
13#include <linux/uaccess.h>
14#include <linux/gfp.h>
15#include <linux/bitops.h>
16#include <linux/hardirq.h> /* for in_interrupt() */
17#include <linux/hugetlb_inline.h>
18
19struct pagevec;
20
21/*
22 * Bits in mapping->flags.
23 */
24enum mapping_flags {
25 AS_EIO = 0, /* IO error on async write */
26 AS_ENOSPC = 1, /* ENOSPC on async write */
27 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
28 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
29 AS_EXITING = 4, /* final truncate in progress */
30 /* writeback related tags are not used */
31 AS_NO_WRITEBACK_TAGS = 5,
32};
33
34/**
35 * mapping_set_error - record a writeback error in the address_space
36 * @mapping: the mapping in which an error should be set
37 * @error: the error to set in the mapping
38 *
39 * When writeback fails in some way, we must record that error so that
40 * userspace can be informed when fsync and the like are called. We endeavor
41 * to report errors on any file that was open at the time of the error. Some
42 * internal callers also need to know when writeback errors have occurred.
43 *
44 * When a writeback error occurs, most filesystems will want to call
45 * mapping_set_error to record the error in the mapping so that it can be
46 * reported when the application calls fsync(2).
47 */
48static inline void mapping_set_error(struct address_space *mapping, int error)
49{
50 if (likely(!error))
51 return;
52
53 /* Record in wb_err for checkers using errseq_t based tracking */
54 __filemap_set_wb_err(mapping, error);
55
56 /* Record it in superblock */
57 if (mapping->host)
58 errseq_set(&mapping->host->i_sb->s_wb_err, error);
59
60 /* Record it in flags for now, for legacy callers */
61 if (error == -ENOSPC)
62 set_bit(AS_ENOSPC, &mapping->flags);
63 else
64 set_bit(AS_EIO, &mapping->flags);
65}
66
67static inline void mapping_set_unevictable(struct address_space *mapping)
68{
69 set_bit(AS_UNEVICTABLE, &mapping->flags);
70}
71
72static inline void mapping_clear_unevictable(struct address_space *mapping)
73{
74 clear_bit(AS_UNEVICTABLE, &mapping->flags);
75}
76
77static inline bool mapping_unevictable(struct address_space *mapping)
78{
79 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
80}
81
82static inline void mapping_set_exiting(struct address_space *mapping)
83{
84 set_bit(AS_EXITING, &mapping->flags);
85}
86
87static inline int mapping_exiting(struct address_space *mapping)
88{
89 return test_bit(AS_EXITING, &mapping->flags);
90}
91
92static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
93{
94 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
95}
96
97static inline int mapping_use_writeback_tags(struct address_space *mapping)
98{
99 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
100}
101
102static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
103{
104 return mapping->gfp_mask;
105}
106
107/* Restricts the given gfp_mask to what the mapping allows. */
108static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
109 gfp_t gfp_mask)
110{
111 return mapping_gfp_mask(mapping) & gfp_mask;
112}
113
114/*
115 * This is non-atomic. Only to be used before the mapping is activated.
116 * Probably needs a barrier...
117 */
118static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
119{
120 m->gfp_mask = mask;
121}
122
123void release_pages(struct page **pages, int nr);
124
125/*
126 * speculatively take a reference to a page.
127 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
128 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
129 *
130 * This function must be called inside the same rcu_read_lock() section as has
131 * been used to lookup the page in the pagecache radix-tree (or page table):
132 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
133 *
134 * Unless an RCU grace period has passed, the count of all pages coming out
135 * of the allocator must be considered unstable. page_count may return higher
136 * than expected, and put_page must be able to do the right thing when the
137 * page has been finished with, no matter what it is subsequently allocated
138 * for (because put_page is what is used here to drop an invalid speculative
139 * reference).
140 *
141 * This is the interesting part of the lockless pagecache (and lockless
142 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
143 * has the following pattern:
144 * 1. find page in radix tree
145 * 2. conditionally increment refcount
146 * 3. check the page is still in pagecache (if no, goto 1)
147 *
148 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
149 * following (with the i_pages lock held):
150 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
151 * B. remove page from pagecache
152 * C. free the page
153 *
154 * There are 2 critical interleavings that matter:
155 * - 2 runs before A: in this case, A sees elevated refcount and bails out
156 * - A runs before 2: in this case, 2 sees zero refcount and retries;
157 * subsequently, B will complete and 1 will find no page, causing the
158 * lookup to return NULL.
159 *
160 * It is possible that between 1 and 2, the page is removed then the exact same
161 * page is inserted into the same position in pagecache. That's OK: the
162 * old find_get_page using a lock could equally have run before or after
163 * such a re-insertion, depending on order that locks are granted.
164 *
165 * Lookups racing against pagecache insertion isn't a big problem: either 1
166 * will find the page or it will not. Likewise, the old find_get_page could run
167 * either before the insertion or afterwards, depending on timing.
168 */
169static inline int __page_cache_add_speculative(struct page *page, int count)
170{
171#ifdef CONFIG_TINY_RCU
172# ifdef CONFIG_PREEMPT_COUNT
173 VM_BUG_ON(!in_atomic() && !irqs_disabled());
174# endif
175 /*
176 * Preempt must be disabled here - we rely on rcu_read_lock doing
177 * this for us.
178 *
179 * Pagecache won't be truncated from interrupt context, so if we have
180 * found a page in the radix tree here, we have pinned its refcount by
181 * disabling preempt, and hence no need for the "speculative get" that
182 * SMP requires.
183 */
184 VM_BUG_ON_PAGE(page_count(page) == 0, page);
185 page_ref_add(page, count);
186
187#else
188 if (unlikely(!page_ref_add_unless(page, count, 0))) {
189 /*
190 * Either the page has been freed, or will be freed.
191 * In either case, retry here and the caller should
192 * do the right thing (see comments above).
193 */
194 return 0;
195 }
196#endif
197 VM_BUG_ON_PAGE(PageTail(page), page);
198
199 return 1;
200}
201
202static inline int page_cache_get_speculative(struct page *page)
203{
204 return __page_cache_add_speculative(page, 1);
205}
206
207static inline int page_cache_add_speculative(struct page *page, int count)
208{
209 return __page_cache_add_speculative(page, count);
210}
211
212/**
213 * attach_page_private - Attach private data to a page.
214 * @page: Page to attach data to.
215 * @data: Data to attach to page.
216 *
217 * Attaching private data to a page increments the page's reference count.
218 * The data must be detached before the page will be freed.
219 */
220static inline void attach_page_private(struct page *page, void *data)
221{
222 get_page(page);
223 set_page_private(page, (unsigned long)data);
224 SetPagePrivate(page);
225}
226
227/**
228 * detach_page_private - Detach private data from a page.
229 * @page: Page to detach data from.
230 *
231 * Removes the data that was previously attached to the page and decrements
232 * the refcount on the page.
233 *
234 * Return: Data that was attached to the page.
235 */
236static inline void *detach_page_private(struct page *page)
237{
238 void *data = (void *)page_private(page);
239
240 if (!PagePrivate(page))
241 return NULL;
242 ClearPagePrivate(page);
243 set_page_private(page, 0);
244 put_page(page);
245
246 return data;
247}
248
249#ifdef CONFIG_NUMA
250extern struct page *__page_cache_alloc(gfp_t gfp);
251#else
252static inline struct page *__page_cache_alloc(gfp_t gfp)
253{
254 return alloc_pages(gfp, 0);
255}
256#endif
257
258static inline struct page *page_cache_alloc(struct address_space *x)
259{
260 return __page_cache_alloc(mapping_gfp_mask(x));
261}
262
263static inline gfp_t readahead_gfp_mask(struct address_space *x)
264{
265 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
266}
267
268typedef int filler_t(void *, struct page *);
269
270pgoff_t page_cache_next_miss(struct address_space *mapping,
271 pgoff_t index, unsigned long max_scan);
272pgoff_t page_cache_prev_miss(struct address_space *mapping,
273 pgoff_t index, unsigned long max_scan);
274
275#define FGP_ACCESSED 0x00000001
276#define FGP_LOCK 0x00000002
277#define FGP_CREAT 0x00000004
278#define FGP_WRITE 0x00000008
279#define FGP_NOFS 0x00000010
280#define FGP_NOWAIT 0x00000020
281#define FGP_FOR_MMAP 0x00000040
282
283struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
284 int fgp_flags, gfp_t cache_gfp_mask);
285
286/**
287 * find_get_page - find and get a page reference
288 * @mapping: the address_space to search
289 * @offset: the page index
290 *
291 * Looks up the page cache slot at @mapping & @offset. If there is a
292 * page cache page, it is returned with an increased refcount.
293 *
294 * Otherwise, %NULL is returned.
295 */
296static inline struct page *find_get_page(struct address_space *mapping,
297 pgoff_t offset)
298{
299 return pagecache_get_page(mapping, offset, 0, 0);
300}
301
302static inline struct page *find_get_page_flags(struct address_space *mapping,
303 pgoff_t offset, int fgp_flags)
304{
305 return pagecache_get_page(mapping, offset, fgp_flags, 0);
306}
307
308/**
309 * find_lock_page - locate, pin and lock a pagecache page
310 * @mapping: the address_space to search
311 * @offset: the page index
312 *
313 * Looks up the page cache slot at @mapping & @offset. If there is a
314 * page cache page, it is returned locked and with an increased
315 * refcount.
316 *
317 * Otherwise, %NULL is returned.
318 *
319 * find_lock_page() may sleep.
320 */
321static inline struct page *find_lock_page(struct address_space *mapping,
322 pgoff_t offset)
323{
324 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
325}
326
327/**
328 * find_or_create_page - locate or add a pagecache page
329 * @mapping: the page's address_space
330 * @index: the page's index into the mapping
331 * @gfp_mask: page allocation mode
332 *
333 * Looks up the page cache slot at @mapping & @offset. If there is a
334 * page cache page, it is returned locked and with an increased
335 * refcount.
336 *
337 * If the page is not present, a new page is allocated using @gfp_mask
338 * and added to the page cache and the VM's LRU list. The page is
339 * returned locked and with an increased refcount.
340 *
341 * On memory exhaustion, %NULL is returned.
342 *
343 * find_or_create_page() may sleep, even if @gfp_flags specifies an
344 * atomic allocation!
345 */
346static inline struct page *find_or_create_page(struct address_space *mapping,
347 pgoff_t index, gfp_t gfp_mask)
348{
349 return pagecache_get_page(mapping, index,
350 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
351 gfp_mask);
352}
353
354/**
355 * grab_cache_page_nowait - returns locked page at given index in given cache
356 * @mapping: target address_space
357 * @index: the page index
358 *
359 * Same as grab_cache_page(), but do not wait if the page is unavailable.
360 * This is intended for speculative data generators, where the data can
361 * be regenerated if the page couldn't be grabbed. This routine should
362 * be safe to call while holding the lock for another page.
363 *
364 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
365 * and deadlock against the caller's locked page.
366 */
367static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
368 pgoff_t index)
369{
370 return pagecache_get_page(mapping, index,
371 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
372 mapping_gfp_mask(mapping));
373}
374
375/*
376 * Given the page we found in the page cache, return the page corresponding
377 * to this index in the file
378 */
379static inline struct page *find_subpage(struct page *head, pgoff_t index)
380{
381 /* HugeTLBfs wants the head page regardless */
382 if (PageHuge(head))
383 return head;
384
385 return head + (index & (thp_nr_pages(head) - 1));
386}
387
388struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
389struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
390unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
391 unsigned int nr_entries, struct page **entries,
392 pgoff_t *indices);
393unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
394 pgoff_t end, unsigned int nr_pages,
395 struct page **pages);
396static inline unsigned find_get_pages(struct address_space *mapping,
397 pgoff_t *start, unsigned int nr_pages,
398 struct page **pages)
399{
400 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
401 pages);
402}
403unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
404 unsigned int nr_pages, struct page **pages);
405unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
406 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
407 struct page **pages);
408static inline unsigned find_get_pages_tag(struct address_space *mapping,
409 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
410 struct page **pages)
411{
412 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
413 nr_pages, pages);
414}
415
416struct page *grab_cache_page_write_begin(struct address_space *mapping,
417 pgoff_t index, unsigned flags);
418
419/*
420 * Returns locked page at given index in given cache, creating it if needed.
421 */
422static inline struct page *grab_cache_page(struct address_space *mapping,
423 pgoff_t index)
424{
425 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
426}
427
428extern struct page * read_cache_page(struct address_space *mapping,
429 pgoff_t index, filler_t *filler, void *data);
430extern struct page * read_cache_page_gfp(struct address_space *mapping,
431 pgoff_t index, gfp_t gfp_mask);
432extern int read_cache_pages(struct address_space *mapping,
433 struct list_head *pages, filler_t *filler, void *data);
434
435static inline struct page *read_mapping_page(struct address_space *mapping,
436 pgoff_t index, void *data)
437{
438 return read_cache_page(mapping, index, NULL, data);
439}
440
441/*
442 * Get index of the page with in radix-tree
443 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
444 */
445static inline pgoff_t page_to_index(struct page *page)
446{
447 pgoff_t pgoff;
448
449 if (likely(!PageTransTail(page)))
450 return page->index;
451
452 /*
453 * We don't initialize ->index for tail pages: calculate based on
454 * head page
455 */
456 pgoff = compound_head(page)->index;
457 pgoff += page - compound_head(page);
458 return pgoff;
459}
460
461/*
462 * Get the offset in PAGE_SIZE.
463 * (TODO: hugepage should have ->index in PAGE_SIZE)
464 */
465static inline pgoff_t page_to_pgoff(struct page *page)
466{
467 if (unlikely(PageHeadHuge(page)))
468 return page->index << compound_order(page);
469
470 return page_to_index(page);
471}
472
473/*
474 * Return byte-offset into filesystem object for page.
475 */
476static inline loff_t page_offset(struct page *page)
477{
478 return ((loff_t)page->index) << PAGE_SHIFT;
479}
480
481static inline loff_t page_file_offset(struct page *page)
482{
483 return ((loff_t)page_index(page)) << PAGE_SHIFT;
484}
485
486extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
487 unsigned long address);
488
489static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
490 unsigned long address)
491{
492 pgoff_t pgoff;
493 if (unlikely(is_vm_hugetlb_page(vma)))
494 return linear_hugepage_index(vma, address);
495 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
496 pgoff += vma->vm_pgoff;
497 return pgoff;
498}
499
500/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
501struct wait_page_key {
502 struct page *page;
503 int bit_nr;
504 int page_match;
505};
506
507struct wait_page_queue {
508 struct page *page;
509 int bit_nr;
510 wait_queue_entry_t wait;
511};
512
513static inline bool wake_page_match(struct wait_page_queue *wait_page,
514 struct wait_page_key *key)
515{
516 if (wait_page->page != key->page)
517 return false;
518 key->page_match = 1;
519
520 if (wait_page->bit_nr != key->bit_nr)
521 return false;
522
523 return true;
524}
525
526extern void __lock_page(struct page *page);
527extern int __lock_page_killable(struct page *page);
528extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
529extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
530 unsigned int flags);
531extern void unlock_page(struct page *page);
532
533/*
534 * Return true if the page was successfully locked
535 */
536static inline int trylock_page(struct page *page)
537{
538 page = compound_head(page);
539 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
540}
541
542/*
543 * lock_page may only be called if we have the page's inode pinned.
544 */
545static inline void lock_page(struct page *page)
546{
547 might_sleep();
548 if (!trylock_page(page))
549 __lock_page(page);
550}
551
552/*
553 * lock_page_killable is like lock_page but can be interrupted by fatal
554 * signals. It returns 0 if it locked the page and -EINTR if it was
555 * killed while waiting.
556 */
557static inline int lock_page_killable(struct page *page)
558{
559 might_sleep();
560 if (!trylock_page(page))
561 return __lock_page_killable(page);
562 return 0;
563}
564
565/*
566 * lock_page_async - Lock the page, unless this would block. If the page
567 * is already locked, then queue a callback when the page becomes unlocked.
568 * This callback can then retry the operation.
569 *
570 * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
571 * was already locked and the callback defined in 'wait' was queued.
572 */
573static inline int lock_page_async(struct page *page,
574 struct wait_page_queue *wait)
575{
576 if (!trylock_page(page))
577 return __lock_page_async(page, wait);
578 return 0;
579}
580
581/*
582 * lock_page_or_retry - Lock the page, unless this would block and the
583 * caller indicated that it can handle a retry.
584 *
585 * Return value and mmap_lock implications depend on flags; see
586 * __lock_page_or_retry().
587 */
588static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
589 unsigned int flags)
590{
591 might_sleep();
592 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
593}
594
595/*
596 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
597 * and should not be used directly.
598 */
599extern void wait_on_page_bit(struct page *page, int bit_nr);
600extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
601
602/*
603 * Wait for a page to be unlocked.
604 *
605 * This must be called with the caller "holding" the page,
606 * ie with increased "page->count" so that the page won't
607 * go away during the wait..
608 */
609static inline void wait_on_page_locked(struct page *page)
610{
611 if (PageLocked(page))
612 wait_on_page_bit(compound_head(page), PG_locked);
613}
614
615static inline int wait_on_page_locked_killable(struct page *page)
616{
617 if (!PageLocked(page))
618 return 0;
619 return wait_on_page_bit_killable(compound_head(page), PG_locked);
620}
621
622extern void put_and_wait_on_page_locked(struct page *page);
623
624void wait_on_page_writeback(struct page *page);
625extern void end_page_writeback(struct page *page);
626void wait_for_stable_page(struct page *page);
627
628void page_endio(struct page *page, bool is_write, int err);
629
630/*
631 * Add an arbitrary waiter to a page's wait queue
632 */
633extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
634
635/*
636 * Fault everything in given userspace address range in.
637 */
638static inline int fault_in_pages_writeable(char __user *uaddr, int size)
639{
640 char __user *end = uaddr + size - 1;
641
642 if (unlikely(size == 0))
643 return 0;
644
645 if (unlikely(uaddr > end))
646 return -EFAULT;
647 /*
648 * Writing zeroes into userspace here is OK, because we know that if
649 * the zero gets there, we'll be overwriting it.
650 */
651 do {
652 if (unlikely(__put_user(0, uaddr) != 0))
653 return -EFAULT;
654 uaddr += PAGE_SIZE;
655 } while (uaddr <= end);
656
657 /* Check whether the range spilled into the next page. */
658 if (((unsigned long)uaddr & PAGE_MASK) ==
659 ((unsigned long)end & PAGE_MASK))
660 return __put_user(0, end);
661
662 return 0;
663}
664
665static inline int fault_in_pages_readable(const char __user *uaddr, int size)
666{
667 volatile char c;
668 const char __user *end = uaddr + size - 1;
669
670 if (unlikely(size == 0))
671 return 0;
672
673 if (unlikely(uaddr > end))
674 return -EFAULT;
675
676 do {
677 if (unlikely(__get_user(c, uaddr) != 0))
678 return -EFAULT;
679 uaddr += PAGE_SIZE;
680 } while (uaddr <= end);
681
682 /* Check whether the range spilled into the next page. */
683 if (((unsigned long)uaddr & PAGE_MASK) ==
684 ((unsigned long)end & PAGE_MASK)) {
685 return __get_user(c, end);
686 }
687
688 (void)c;
689 return 0;
690}
691
692int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
693 pgoff_t index, gfp_t gfp_mask);
694int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
695 pgoff_t index, gfp_t gfp_mask);
696extern void delete_from_page_cache(struct page *page);
697extern void __delete_from_page_cache(struct page *page, void *shadow);
698int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
699void delete_from_page_cache_batch(struct address_space *mapping,
700 struct pagevec *pvec);
701
702#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
703
704void page_cache_sync_readahead(struct address_space *, struct file_ra_state *,
705 struct file *, pgoff_t index, unsigned long req_count);
706void page_cache_async_readahead(struct address_space *, struct file_ra_state *,
707 struct file *, struct page *, pgoff_t index,
708 unsigned long req_count);
709void page_cache_readahead_unbounded(struct address_space *, struct file *,
710 pgoff_t index, unsigned long nr_to_read,
711 unsigned long lookahead_count);
712
713/*
714 * Like add_to_page_cache_locked, but used to add newly allocated pages:
715 * the page is new, so we can just run __SetPageLocked() against it.
716 */
717static inline int add_to_page_cache(struct page *page,
718 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
719{
720 int error;
721
722 __SetPageLocked(page);
723 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
724 if (unlikely(error))
725 __ClearPageLocked(page);
726 return error;
727}
728
729/**
730 * struct readahead_control - Describes a readahead request.
731 *
732 * A readahead request is for consecutive pages. Filesystems which
733 * implement the ->readahead method should call readahead_page() or
734 * readahead_page_batch() in a loop and attempt to start I/O against
735 * each page in the request.
736 *
737 * Most of the fields in this struct are private and should be accessed
738 * by the functions below.
739 *
740 * @file: The file, used primarily by network filesystems for authentication.
741 * May be NULL if invoked internally by the filesystem.
742 * @mapping: Readahead this filesystem object.
743 */
744struct readahead_control {
745 struct file *file;
746 struct address_space *mapping;
747/* private: use the readahead_* accessors instead */
748 pgoff_t _index;
749 unsigned int _nr_pages;
750 unsigned int _batch_count;
751};
752
753/**
754 * readahead_page - Get the next page to read.
755 * @rac: The current readahead request.
756 *
757 * Context: The page is locked and has an elevated refcount. The caller
758 * should decreases the refcount once the page has been submitted for I/O
759 * and unlock the page once all I/O to that page has completed.
760 * Return: A pointer to the next page, or %NULL if we are done.
761 */
762static inline struct page *readahead_page(struct readahead_control *rac)
763{
764 struct page *page;
765
766 BUG_ON(rac->_batch_count > rac->_nr_pages);
767 rac->_nr_pages -= rac->_batch_count;
768 rac->_index += rac->_batch_count;
769
770 if (!rac->_nr_pages) {
771 rac->_batch_count = 0;
772 return NULL;
773 }
774
775 page = xa_load(&rac->mapping->i_pages, rac->_index);
776 VM_BUG_ON_PAGE(!PageLocked(page), page);
777 rac->_batch_count = thp_nr_pages(page);
778
779 return page;
780}
781
782static inline unsigned int __readahead_batch(struct readahead_control *rac,
783 struct page **array, unsigned int array_sz)
784{
785 unsigned int i = 0;
786 XA_STATE(xas, &rac->mapping->i_pages, 0);
787 struct page *page;
788
789 BUG_ON(rac->_batch_count > rac->_nr_pages);
790 rac->_nr_pages -= rac->_batch_count;
791 rac->_index += rac->_batch_count;
792 rac->_batch_count = 0;
793
794 xas_set(&xas, rac->_index);
795 rcu_read_lock();
796 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
797 VM_BUG_ON_PAGE(!PageLocked(page), page);
798 VM_BUG_ON_PAGE(PageTail(page), page);
799 array[i++] = page;
800 rac->_batch_count += thp_nr_pages(page);
801
802 /*
803 * The page cache isn't using multi-index entries yet,
804 * so the xas cursor needs to be manually moved to the
805 * next index. This can be removed once the page cache
806 * is converted.
807 */
808 if (PageHead(page))
809 xas_set(&xas, rac->_index + rac->_batch_count);
810
811 if (i == array_sz)
812 break;
813 }
814 rcu_read_unlock();
815
816 return i;
817}
818
819/**
820 * readahead_page_batch - Get a batch of pages to read.
821 * @rac: The current readahead request.
822 * @array: An array of pointers to struct page.
823 *
824 * Context: The pages are locked and have an elevated refcount. The caller
825 * should decreases the refcount once the page has been submitted for I/O
826 * and unlock the page once all I/O to that page has completed.
827 * Return: The number of pages placed in the array. 0 indicates the request
828 * is complete.
829 */
830#define readahead_page_batch(rac, array) \
831 __readahead_batch(rac, array, ARRAY_SIZE(array))
832
833/**
834 * readahead_pos - The byte offset into the file of this readahead request.
835 * @rac: The readahead request.
836 */
837static inline loff_t readahead_pos(struct readahead_control *rac)
838{
839 return (loff_t)rac->_index * PAGE_SIZE;
840}
841
842/**
843 * readahead_length - The number of bytes in this readahead request.
844 * @rac: The readahead request.
845 */
846static inline loff_t readahead_length(struct readahead_control *rac)
847{
848 return (loff_t)rac->_nr_pages * PAGE_SIZE;
849}
850
851/**
852 * readahead_index - The index of the first page in this readahead request.
853 * @rac: The readahead request.
854 */
855static inline pgoff_t readahead_index(struct readahead_control *rac)
856{
857 return rac->_index;
858}
859
860/**
861 * readahead_count - The number of pages in this readahead request.
862 * @rac: The readahead request.
863 */
864static inline unsigned int readahead_count(struct readahead_control *rac)
865{
866 return rac->_nr_pages;
867}
868
869static inline unsigned long dir_pages(struct inode *inode)
870{
871 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
872 PAGE_SHIFT;
873}
874
875/**
876 * page_mkwrite_check_truncate - check if page was truncated
877 * @page: the page to check
878 * @inode: the inode to check the page against
879 *
880 * Returns the number of bytes in the page up to EOF,
881 * or -EFAULT if the page was truncated.
882 */
883static inline int page_mkwrite_check_truncate(struct page *page,
884 struct inode *inode)
885{
886 loff_t size = i_size_read(inode);
887 pgoff_t index = size >> PAGE_SHIFT;
888 int offset = offset_in_page(size);
889
890 if (page->mapping != inode->i_mapping)
891 return -EFAULT;
892
893 /* page is wholly inside EOF */
894 if (page->index < index)
895 return PAGE_SIZE;
896 /* page is wholly past EOF */
897 if (page->index > index || !offset)
898 return -EFAULT;
899 /* page is partially inside EOF */
900 return offset;
901}
902
903#endif /* _LINUX_PAGEMAP_H */
1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h> /* for in_interrupt() */
16#include <linux/hugetlb_inline.h>
17
18/*
19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
20 * allocation mode flags.
21 */
22enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27};
28
29static inline void mapping_set_error(struct address_space *mapping, int error)
30{
31 if (unlikely(error)) {
32 if (error == -ENOSPC)
33 set_bit(AS_ENOSPC, &mapping->flags);
34 else
35 set_bit(AS_EIO, &mapping->flags);
36 }
37}
38
39static inline void mapping_set_unevictable(struct address_space *mapping)
40{
41 set_bit(AS_UNEVICTABLE, &mapping->flags);
42}
43
44static inline void mapping_clear_unevictable(struct address_space *mapping)
45{
46 clear_bit(AS_UNEVICTABLE, &mapping->flags);
47}
48
49static inline int mapping_unevictable(struct address_space *mapping)
50{
51 if (mapping)
52 return test_bit(AS_UNEVICTABLE, &mapping->flags);
53 return !!mapping;
54}
55
56static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
57{
58 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
59}
60
61/*
62 * This is non-atomic. Only to be used before the mapping is activated.
63 * Probably needs a barrier...
64 */
65static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
66{
67 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
68 (__force unsigned long)mask;
69}
70
71/*
72 * The page cache can done in larger chunks than
73 * one page, because it allows for more efficient
74 * throughput (it can then be mapped into user
75 * space in smaller chunks for same flexibility).
76 *
77 * Or rather, it _will_ be done in larger chunks.
78 */
79#define PAGE_CACHE_SHIFT PAGE_SHIFT
80#define PAGE_CACHE_SIZE PAGE_SIZE
81#define PAGE_CACHE_MASK PAGE_MASK
82#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
83
84#define page_cache_get(page) get_page(page)
85#define page_cache_release(page) put_page(page)
86void release_pages(struct page **pages, int nr, int cold);
87
88/*
89 * speculatively take a reference to a page.
90 * If the page is free (_count == 0), then _count is untouched, and 0
91 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
92 *
93 * This function must be called inside the same rcu_read_lock() section as has
94 * been used to lookup the page in the pagecache radix-tree (or page table):
95 * this allows allocators to use a synchronize_rcu() to stabilize _count.
96 *
97 * Unless an RCU grace period has passed, the count of all pages coming out
98 * of the allocator must be considered unstable. page_count may return higher
99 * than expected, and put_page must be able to do the right thing when the
100 * page has been finished with, no matter what it is subsequently allocated
101 * for (because put_page is what is used here to drop an invalid speculative
102 * reference).
103 *
104 * This is the interesting part of the lockless pagecache (and lockless
105 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
106 * has the following pattern:
107 * 1. find page in radix tree
108 * 2. conditionally increment refcount
109 * 3. check the page is still in pagecache (if no, goto 1)
110 *
111 * Remove-side that cares about stability of _count (eg. reclaim) has the
112 * following (with tree_lock held for write):
113 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
114 * B. remove page from pagecache
115 * C. free the page
116 *
117 * There are 2 critical interleavings that matter:
118 * - 2 runs before A: in this case, A sees elevated refcount and bails out
119 * - A runs before 2: in this case, 2 sees zero refcount and retries;
120 * subsequently, B will complete and 1 will find no page, causing the
121 * lookup to return NULL.
122 *
123 * It is possible that between 1 and 2, the page is removed then the exact same
124 * page is inserted into the same position in pagecache. That's OK: the
125 * old find_get_page using tree_lock could equally have run before or after
126 * such a re-insertion, depending on order that locks are granted.
127 *
128 * Lookups racing against pagecache insertion isn't a big problem: either 1
129 * will find the page or it will not. Likewise, the old find_get_page could run
130 * either before the insertion or afterwards, depending on timing.
131 */
132static inline int page_cache_get_speculative(struct page *page)
133{
134 VM_BUG_ON(in_interrupt());
135
136#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
137# ifdef CONFIG_PREEMPT_COUNT
138 VM_BUG_ON(!in_atomic());
139# endif
140 /*
141 * Preempt must be disabled here - we rely on rcu_read_lock doing
142 * this for us.
143 *
144 * Pagecache won't be truncated from interrupt context, so if we have
145 * found a page in the radix tree here, we have pinned its refcount by
146 * disabling preempt, and hence no need for the "speculative get" that
147 * SMP requires.
148 */
149 VM_BUG_ON(page_count(page) == 0);
150 atomic_inc(&page->_count);
151
152#else
153 if (unlikely(!get_page_unless_zero(page))) {
154 /*
155 * Either the page has been freed, or will be freed.
156 * In either case, retry here and the caller should
157 * do the right thing (see comments above).
158 */
159 return 0;
160 }
161#endif
162 VM_BUG_ON(PageTail(page));
163
164 return 1;
165}
166
167/*
168 * Same as above, but add instead of inc (could just be merged)
169 */
170static inline int page_cache_add_speculative(struct page *page, int count)
171{
172 VM_BUG_ON(in_interrupt());
173
174#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
175# ifdef CONFIG_PREEMPT_COUNT
176 VM_BUG_ON(!in_atomic());
177# endif
178 VM_BUG_ON(page_count(page) == 0);
179 atomic_add(count, &page->_count);
180
181#else
182 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
183 return 0;
184#endif
185 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
186
187 return 1;
188}
189
190static inline int page_freeze_refs(struct page *page, int count)
191{
192 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
193}
194
195static inline void page_unfreeze_refs(struct page *page, int count)
196{
197 VM_BUG_ON(page_count(page) != 0);
198 VM_BUG_ON(count == 0);
199
200 atomic_set(&page->_count, count);
201}
202
203#ifdef CONFIG_NUMA
204extern struct page *__page_cache_alloc(gfp_t gfp);
205#else
206static inline struct page *__page_cache_alloc(gfp_t gfp)
207{
208 return alloc_pages(gfp, 0);
209}
210#endif
211
212static inline struct page *page_cache_alloc(struct address_space *x)
213{
214 return __page_cache_alloc(mapping_gfp_mask(x));
215}
216
217static inline struct page *page_cache_alloc_cold(struct address_space *x)
218{
219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
220}
221
222static inline struct page *page_cache_alloc_readahead(struct address_space *x)
223{
224 return __page_cache_alloc(mapping_gfp_mask(x) |
225 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
226}
227
228typedef int filler_t(void *, struct page *);
229
230extern struct page * find_get_page(struct address_space *mapping,
231 pgoff_t index);
232extern struct page * find_lock_page(struct address_space *mapping,
233 pgoff_t index);
234extern struct page * find_or_create_page(struct address_space *mapping,
235 pgoff_t index, gfp_t gfp_mask);
236unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
237 unsigned int nr_pages, struct page **pages);
238unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
239 unsigned int nr_pages, struct page **pages);
240unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
241 int tag, unsigned int nr_pages, struct page **pages);
242
243struct page *grab_cache_page_write_begin(struct address_space *mapping,
244 pgoff_t index, unsigned flags);
245
246/*
247 * Returns locked page at given index in given cache, creating it if needed.
248 */
249static inline struct page *grab_cache_page(struct address_space *mapping,
250 pgoff_t index)
251{
252 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
253}
254
255extern struct page * grab_cache_page_nowait(struct address_space *mapping,
256 pgoff_t index);
257extern struct page * read_cache_page_async(struct address_space *mapping,
258 pgoff_t index, filler_t *filler, void *data);
259extern struct page * read_cache_page(struct address_space *mapping,
260 pgoff_t index, filler_t *filler, void *data);
261extern struct page * read_cache_page_gfp(struct address_space *mapping,
262 pgoff_t index, gfp_t gfp_mask);
263extern int read_cache_pages(struct address_space *mapping,
264 struct list_head *pages, filler_t *filler, void *data);
265
266static inline struct page *read_mapping_page_async(
267 struct address_space *mapping,
268 pgoff_t index, void *data)
269{
270 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
271 return read_cache_page_async(mapping, index, filler, data);
272}
273
274static inline struct page *read_mapping_page(struct address_space *mapping,
275 pgoff_t index, void *data)
276{
277 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
278 return read_cache_page(mapping, index, filler, data);
279}
280
281/*
282 * Return byte-offset into filesystem object for page.
283 */
284static inline loff_t page_offset(struct page *page)
285{
286 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
287}
288
289extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
290 unsigned long address);
291
292static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
293 unsigned long address)
294{
295 pgoff_t pgoff;
296 if (unlikely(is_vm_hugetlb_page(vma)))
297 return linear_hugepage_index(vma, address);
298 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
299 pgoff += vma->vm_pgoff;
300 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
301}
302
303extern void __lock_page(struct page *page);
304extern int __lock_page_killable(struct page *page);
305extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
306 unsigned int flags);
307extern void unlock_page(struct page *page);
308
309static inline void __set_page_locked(struct page *page)
310{
311 __set_bit(PG_locked, &page->flags);
312}
313
314static inline void __clear_page_locked(struct page *page)
315{
316 __clear_bit(PG_locked, &page->flags);
317}
318
319static inline int trylock_page(struct page *page)
320{
321 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
322}
323
324/*
325 * lock_page may only be called if we have the page's inode pinned.
326 */
327static inline void lock_page(struct page *page)
328{
329 might_sleep();
330 if (!trylock_page(page))
331 __lock_page(page);
332}
333
334/*
335 * lock_page_killable is like lock_page but can be interrupted by fatal
336 * signals. It returns 0 if it locked the page and -EINTR if it was
337 * killed while waiting.
338 */
339static inline int lock_page_killable(struct page *page)
340{
341 might_sleep();
342 if (!trylock_page(page))
343 return __lock_page_killable(page);
344 return 0;
345}
346
347/*
348 * lock_page_or_retry - Lock the page, unless this would block and the
349 * caller indicated that it can handle a retry.
350 */
351static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
352 unsigned int flags)
353{
354 might_sleep();
355 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
356}
357
358/*
359 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
360 * Never use this directly!
361 */
362extern void wait_on_page_bit(struct page *page, int bit_nr);
363
364extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
365
366static inline int wait_on_page_locked_killable(struct page *page)
367{
368 if (PageLocked(page))
369 return wait_on_page_bit_killable(page, PG_locked);
370 return 0;
371}
372
373/*
374 * Wait for a page to be unlocked.
375 *
376 * This must be called with the caller "holding" the page,
377 * ie with increased "page->count" so that the page won't
378 * go away during the wait..
379 */
380static inline void wait_on_page_locked(struct page *page)
381{
382 if (PageLocked(page))
383 wait_on_page_bit(page, PG_locked);
384}
385
386/*
387 * Wait for a page to complete writeback
388 */
389static inline void wait_on_page_writeback(struct page *page)
390{
391 if (PageWriteback(page))
392 wait_on_page_bit(page, PG_writeback);
393}
394
395extern void end_page_writeback(struct page *page);
396
397/*
398 * Add an arbitrary waiter to a page's wait queue
399 */
400extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
401
402/*
403 * Fault a userspace page into pagetables. Return non-zero on a fault.
404 *
405 * This assumes that two userspace pages are always sufficient. That's
406 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
407 */
408static inline int fault_in_pages_writeable(char __user *uaddr, int size)
409{
410 int ret;
411
412 if (unlikely(size == 0))
413 return 0;
414
415 /*
416 * Writing zeroes into userspace here is OK, because we know that if
417 * the zero gets there, we'll be overwriting it.
418 */
419 ret = __put_user(0, uaddr);
420 if (ret == 0) {
421 char __user *end = uaddr + size - 1;
422
423 /*
424 * If the page was already mapped, this will get a cache miss
425 * for sure, so try to avoid doing it.
426 */
427 if (((unsigned long)uaddr & PAGE_MASK) !=
428 ((unsigned long)end & PAGE_MASK))
429 ret = __put_user(0, end);
430 }
431 return ret;
432}
433
434static inline int fault_in_pages_readable(const char __user *uaddr, int size)
435{
436 volatile char c;
437 int ret;
438
439 if (unlikely(size == 0))
440 return 0;
441
442 ret = __get_user(c, uaddr);
443 if (ret == 0) {
444 const char __user *end = uaddr + size - 1;
445
446 if (((unsigned long)uaddr & PAGE_MASK) !=
447 ((unsigned long)end & PAGE_MASK)) {
448 ret = __get_user(c, end);
449 (void)c;
450 }
451 }
452 return ret;
453}
454
455/*
456 * Multipage variants of the above prefault helpers, useful if more than
457 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
458 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
459 * filemap.c hotpaths.
460 */
461static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
462{
463 int ret = 0;
464 char __user *end = uaddr + size - 1;
465
466 if (unlikely(size == 0))
467 return ret;
468
469 /*
470 * Writing zeroes into userspace here is OK, because we know that if
471 * the zero gets there, we'll be overwriting it.
472 */
473 while (uaddr <= end) {
474 ret = __put_user(0, uaddr);
475 if (ret != 0)
476 return ret;
477 uaddr += PAGE_SIZE;
478 }
479
480 /* Check whether the range spilled into the next page. */
481 if (((unsigned long)uaddr & PAGE_MASK) ==
482 ((unsigned long)end & PAGE_MASK))
483 ret = __put_user(0, end);
484
485 return ret;
486}
487
488static inline int fault_in_multipages_readable(const char __user *uaddr,
489 int size)
490{
491 volatile char c;
492 int ret = 0;
493 const char __user *end = uaddr + size - 1;
494
495 if (unlikely(size == 0))
496 return ret;
497
498 while (uaddr <= end) {
499 ret = __get_user(c, uaddr);
500 if (ret != 0)
501 return ret;
502 uaddr += PAGE_SIZE;
503 }
504
505 /* Check whether the range spilled into the next page. */
506 if (((unsigned long)uaddr & PAGE_MASK) ==
507 ((unsigned long)end & PAGE_MASK)) {
508 ret = __get_user(c, end);
509 (void)c;
510 }
511
512 return ret;
513}
514
515int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
516 pgoff_t index, gfp_t gfp_mask);
517int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
518 pgoff_t index, gfp_t gfp_mask);
519extern void delete_from_page_cache(struct page *page);
520extern void __delete_from_page_cache(struct page *page);
521int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
522
523/*
524 * Like add_to_page_cache_locked, but used to add newly allocated pages:
525 * the page is new, so we can just run __set_page_locked() against it.
526 */
527static inline int add_to_page_cache(struct page *page,
528 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
529{
530 int error;
531
532 __set_page_locked(page);
533 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
534 if (unlikely(error))
535 __clear_page_locked(page);
536 return error;
537}
538
539#endif /* _LINUX_PAGEMAP_H */