Loading...
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
28#include <linux/pagemap.h>
29#include <linux/file.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/swap.h>
33
34static struct vfsmount *shm_mnt;
35
36#ifdef CONFIG_SHMEM
37/*
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
41 */
42
43#include <linux/xattr.h>
44#include <linux/exportfs.h>
45#include <linux/posix_acl.h>
46#include <linux/generic_acl.h>
47#include <linux/mman.h>
48#include <linux/string.h>
49#include <linux/slab.h>
50#include <linux/backing-dev.h>
51#include <linux/shmem_fs.h>
52#include <linux/writeback.h>
53#include <linux/blkdev.h>
54#include <linux/pagevec.h>
55#include <linux/percpu_counter.h>
56#include <linux/splice.h>
57#include <linux/security.h>
58#include <linux/swapops.h>
59#include <linux/mempolicy.h>
60#include <linux/namei.h>
61#include <linux/ctype.h>
62#include <linux/migrate.h>
63#include <linux/highmem.h>
64#include <linux/seq_file.h>
65#include <linux/magic.h>
66
67#include <asm/uaccess.h>
68#include <asm/pgtable.h>
69
70#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
71#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
72
73/* Pretend that each entry is of this size in directory's i_size */
74#define BOGO_DIRENT_SIZE 20
75
76/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77#define SHORT_SYMLINK_LEN 128
78
79struct shmem_xattr {
80 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
81 char *name; /* xattr name */
82 size_t size;
83 char value[0];
84};
85
86/* Flag allocation requirements to shmem_getpage */
87enum sgp_type {
88 SGP_READ, /* don't exceed i_size, don't allocate page */
89 SGP_CACHE, /* don't exceed i_size, may allocate page */
90 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
91 SGP_WRITE, /* may exceed i_size, may allocate page */
92};
93
94#ifdef CONFIG_TMPFS
95static unsigned long shmem_default_max_blocks(void)
96{
97 return totalram_pages / 2;
98}
99
100static unsigned long shmem_default_max_inodes(void)
101{
102 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
103}
104#endif
105
106static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
107 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
108
109static inline int shmem_getpage(struct inode *inode, pgoff_t index,
110 struct page **pagep, enum sgp_type sgp, int *fault_type)
111{
112 return shmem_getpage_gfp(inode, index, pagep, sgp,
113 mapping_gfp_mask(inode->i_mapping), fault_type);
114}
115
116static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
117{
118 return sb->s_fs_info;
119}
120
121/*
122 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
123 * for shared memory and for shared anonymous (/dev/zero) mappings
124 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
125 * consistent with the pre-accounting of private mappings ...
126 */
127static inline int shmem_acct_size(unsigned long flags, loff_t size)
128{
129 return (flags & VM_NORESERVE) ?
130 0 : security_vm_enough_memory_kern(VM_ACCT(size));
131}
132
133static inline void shmem_unacct_size(unsigned long flags, loff_t size)
134{
135 if (!(flags & VM_NORESERVE))
136 vm_unacct_memory(VM_ACCT(size));
137}
138
139/*
140 * ... whereas tmpfs objects are accounted incrementally as
141 * pages are allocated, in order to allow huge sparse files.
142 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
143 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
144 */
145static inline int shmem_acct_block(unsigned long flags)
146{
147 return (flags & VM_NORESERVE) ?
148 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
149}
150
151static inline void shmem_unacct_blocks(unsigned long flags, long pages)
152{
153 if (flags & VM_NORESERVE)
154 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
155}
156
157static const struct super_operations shmem_ops;
158static const struct address_space_operations shmem_aops;
159static const struct file_operations shmem_file_operations;
160static const struct inode_operations shmem_inode_operations;
161static const struct inode_operations shmem_dir_inode_operations;
162static const struct inode_operations shmem_special_inode_operations;
163static const struct vm_operations_struct shmem_vm_ops;
164
165static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
166 .ra_pages = 0, /* No readahead */
167 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
168};
169
170static LIST_HEAD(shmem_swaplist);
171static DEFINE_MUTEX(shmem_swaplist_mutex);
172
173static int shmem_reserve_inode(struct super_block *sb)
174{
175 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
176 if (sbinfo->max_inodes) {
177 spin_lock(&sbinfo->stat_lock);
178 if (!sbinfo->free_inodes) {
179 spin_unlock(&sbinfo->stat_lock);
180 return -ENOSPC;
181 }
182 sbinfo->free_inodes--;
183 spin_unlock(&sbinfo->stat_lock);
184 }
185 return 0;
186}
187
188static void shmem_free_inode(struct super_block *sb)
189{
190 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
191 if (sbinfo->max_inodes) {
192 spin_lock(&sbinfo->stat_lock);
193 sbinfo->free_inodes++;
194 spin_unlock(&sbinfo->stat_lock);
195 }
196}
197
198/**
199 * shmem_recalc_inode - recalculate the block usage of an inode
200 * @inode: inode to recalc
201 *
202 * We have to calculate the free blocks since the mm can drop
203 * undirtied hole pages behind our back.
204 *
205 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
206 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
207 *
208 * It has to be called with the spinlock held.
209 */
210static void shmem_recalc_inode(struct inode *inode)
211{
212 struct shmem_inode_info *info = SHMEM_I(inode);
213 long freed;
214
215 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
216 if (freed > 0) {
217 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
218 if (sbinfo->max_blocks)
219 percpu_counter_add(&sbinfo->used_blocks, -freed);
220 info->alloced -= freed;
221 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
222 shmem_unacct_blocks(info->flags, freed);
223 }
224}
225
226/*
227 * Replace item expected in radix tree by a new item, while holding tree lock.
228 */
229static int shmem_radix_tree_replace(struct address_space *mapping,
230 pgoff_t index, void *expected, void *replacement)
231{
232 void **pslot;
233 void *item = NULL;
234
235 VM_BUG_ON(!expected);
236 pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
237 if (pslot)
238 item = radix_tree_deref_slot_protected(pslot,
239 &mapping->tree_lock);
240 if (item != expected)
241 return -ENOENT;
242 if (replacement)
243 radix_tree_replace_slot(pslot, replacement);
244 else
245 radix_tree_delete(&mapping->page_tree, index);
246 return 0;
247}
248
249/*
250 * Like add_to_page_cache_locked, but error if expected item has gone.
251 */
252static int shmem_add_to_page_cache(struct page *page,
253 struct address_space *mapping,
254 pgoff_t index, gfp_t gfp, void *expected)
255{
256 int error = 0;
257
258 VM_BUG_ON(!PageLocked(page));
259 VM_BUG_ON(!PageSwapBacked(page));
260
261 if (!expected)
262 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
263 if (!error) {
264 page_cache_get(page);
265 page->mapping = mapping;
266 page->index = index;
267
268 spin_lock_irq(&mapping->tree_lock);
269 if (!expected)
270 error = radix_tree_insert(&mapping->page_tree,
271 index, page);
272 else
273 error = shmem_radix_tree_replace(mapping, index,
274 expected, page);
275 if (!error) {
276 mapping->nrpages++;
277 __inc_zone_page_state(page, NR_FILE_PAGES);
278 __inc_zone_page_state(page, NR_SHMEM);
279 spin_unlock_irq(&mapping->tree_lock);
280 } else {
281 page->mapping = NULL;
282 spin_unlock_irq(&mapping->tree_lock);
283 page_cache_release(page);
284 }
285 if (!expected)
286 radix_tree_preload_end();
287 }
288 if (error)
289 mem_cgroup_uncharge_cache_page(page);
290 return error;
291}
292
293/*
294 * Like delete_from_page_cache, but substitutes swap for page.
295 */
296static void shmem_delete_from_page_cache(struct page *page, void *radswap)
297{
298 struct address_space *mapping = page->mapping;
299 int error;
300
301 spin_lock_irq(&mapping->tree_lock);
302 error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
303 page->mapping = NULL;
304 mapping->nrpages--;
305 __dec_zone_page_state(page, NR_FILE_PAGES);
306 __dec_zone_page_state(page, NR_SHMEM);
307 spin_unlock_irq(&mapping->tree_lock);
308 page_cache_release(page);
309 BUG_ON(error);
310}
311
312/*
313 * Like find_get_pages, but collecting swap entries as well as pages.
314 */
315static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
316 pgoff_t start, unsigned int nr_pages,
317 struct page **pages, pgoff_t *indices)
318{
319 unsigned int i;
320 unsigned int ret;
321 unsigned int nr_found;
322
323 rcu_read_lock();
324restart:
325 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
326 (void ***)pages, indices, start, nr_pages);
327 ret = 0;
328 for (i = 0; i < nr_found; i++) {
329 struct page *page;
330repeat:
331 page = radix_tree_deref_slot((void **)pages[i]);
332 if (unlikely(!page))
333 continue;
334 if (radix_tree_exception(page)) {
335 if (radix_tree_deref_retry(page))
336 goto restart;
337 /*
338 * Otherwise, we must be storing a swap entry
339 * here as an exceptional entry: so return it
340 * without attempting to raise page count.
341 */
342 goto export;
343 }
344 if (!page_cache_get_speculative(page))
345 goto repeat;
346
347 /* Has the page moved? */
348 if (unlikely(page != *((void **)pages[i]))) {
349 page_cache_release(page);
350 goto repeat;
351 }
352export:
353 indices[ret] = indices[i];
354 pages[ret] = page;
355 ret++;
356 }
357 if (unlikely(!ret && nr_found))
358 goto restart;
359 rcu_read_unlock();
360 return ret;
361}
362
363/*
364 * Remove swap entry from radix tree, free the swap and its page cache.
365 */
366static int shmem_free_swap(struct address_space *mapping,
367 pgoff_t index, void *radswap)
368{
369 int error;
370
371 spin_lock_irq(&mapping->tree_lock);
372 error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
373 spin_unlock_irq(&mapping->tree_lock);
374 if (!error)
375 free_swap_and_cache(radix_to_swp_entry(radswap));
376 return error;
377}
378
379/*
380 * Pagevec may contain swap entries, so shuffle up pages before releasing.
381 */
382static void shmem_pagevec_release(struct pagevec *pvec)
383{
384 int i, j;
385
386 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
387 struct page *page = pvec->pages[i];
388 if (!radix_tree_exceptional_entry(page))
389 pvec->pages[j++] = page;
390 }
391 pvec->nr = j;
392 pagevec_release(pvec);
393}
394
395/*
396 * Remove range of pages and swap entries from radix tree, and free them.
397 */
398void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
399{
400 struct address_space *mapping = inode->i_mapping;
401 struct shmem_inode_info *info = SHMEM_I(inode);
402 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
403 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
404 pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
405 struct pagevec pvec;
406 pgoff_t indices[PAGEVEC_SIZE];
407 long nr_swaps_freed = 0;
408 pgoff_t index;
409 int i;
410
411 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
412
413 pagevec_init(&pvec, 0);
414 index = start;
415 while (index <= end) {
416 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
417 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
418 pvec.pages, indices);
419 if (!pvec.nr)
420 break;
421 mem_cgroup_uncharge_start();
422 for (i = 0; i < pagevec_count(&pvec); i++) {
423 struct page *page = pvec.pages[i];
424
425 index = indices[i];
426 if (index > end)
427 break;
428
429 if (radix_tree_exceptional_entry(page)) {
430 nr_swaps_freed += !shmem_free_swap(mapping,
431 index, page);
432 continue;
433 }
434
435 if (!trylock_page(page))
436 continue;
437 if (page->mapping == mapping) {
438 VM_BUG_ON(PageWriteback(page));
439 truncate_inode_page(mapping, page);
440 }
441 unlock_page(page);
442 }
443 shmem_pagevec_release(&pvec);
444 mem_cgroup_uncharge_end();
445 cond_resched();
446 index++;
447 }
448
449 if (partial) {
450 struct page *page = NULL;
451 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
452 if (page) {
453 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
454 set_page_dirty(page);
455 unlock_page(page);
456 page_cache_release(page);
457 }
458 }
459
460 index = start;
461 for ( ; ; ) {
462 cond_resched();
463 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
464 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
465 pvec.pages, indices);
466 if (!pvec.nr) {
467 if (index == start)
468 break;
469 index = start;
470 continue;
471 }
472 if (index == start && indices[0] > end) {
473 shmem_pagevec_release(&pvec);
474 break;
475 }
476 mem_cgroup_uncharge_start();
477 for (i = 0; i < pagevec_count(&pvec); i++) {
478 struct page *page = pvec.pages[i];
479
480 index = indices[i];
481 if (index > end)
482 break;
483
484 if (radix_tree_exceptional_entry(page)) {
485 nr_swaps_freed += !shmem_free_swap(mapping,
486 index, page);
487 continue;
488 }
489
490 lock_page(page);
491 if (page->mapping == mapping) {
492 VM_BUG_ON(PageWriteback(page));
493 truncate_inode_page(mapping, page);
494 }
495 unlock_page(page);
496 }
497 shmem_pagevec_release(&pvec);
498 mem_cgroup_uncharge_end();
499 index++;
500 }
501
502 spin_lock(&info->lock);
503 info->swapped -= nr_swaps_freed;
504 shmem_recalc_inode(inode);
505 spin_unlock(&info->lock);
506
507 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
508}
509EXPORT_SYMBOL_GPL(shmem_truncate_range);
510
511static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
512{
513 struct inode *inode = dentry->d_inode;
514 int error;
515
516 error = inode_change_ok(inode, attr);
517 if (error)
518 return error;
519
520 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
521 loff_t oldsize = inode->i_size;
522 loff_t newsize = attr->ia_size;
523
524 if (newsize != oldsize) {
525 i_size_write(inode, newsize);
526 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
527 }
528 if (newsize < oldsize) {
529 loff_t holebegin = round_up(newsize, PAGE_SIZE);
530 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
531 shmem_truncate_range(inode, newsize, (loff_t)-1);
532 /* unmap again to remove racily COWed private pages */
533 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
534 }
535 }
536
537 setattr_copy(inode, attr);
538#ifdef CONFIG_TMPFS_POSIX_ACL
539 if (attr->ia_valid & ATTR_MODE)
540 error = generic_acl_chmod(inode);
541#endif
542 return error;
543}
544
545static void shmem_evict_inode(struct inode *inode)
546{
547 struct shmem_inode_info *info = SHMEM_I(inode);
548 struct shmem_xattr *xattr, *nxattr;
549
550 if (inode->i_mapping->a_ops == &shmem_aops) {
551 shmem_unacct_size(info->flags, inode->i_size);
552 inode->i_size = 0;
553 shmem_truncate_range(inode, 0, (loff_t)-1);
554 if (!list_empty(&info->swaplist)) {
555 mutex_lock(&shmem_swaplist_mutex);
556 list_del_init(&info->swaplist);
557 mutex_unlock(&shmem_swaplist_mutex);
558 }
559 } else
560 kfree(info->symlink);
561
562 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
563 kfree(xattr->name);
564 kfree(xattr);
565 }
566 BUG_ON(inode->i_blocks);
567 shmem_free_inode(inode->i_sb);
568 end_writeback(inode);
569}
570
571/*
572 * If swap found in inode, free it and move page from swapcache to filecache.
573 */
574static int shmem_unuse_inode(struct shmem_inode_info *info,
575 swp_entry_t swap, struct page *page)
576{
577 struct address_space *mapping = info->vfs_inode.i_mapping;
578 void *radswap;
579 pgoff_t index;
580 int error;
581
582 radswap = swp_to_radix_entry(swap);
583 index = radix_tree_locate_item(&mapping->page_tree, radswap);
584 if (index == -1)
585 return 0;
586
587 /*
588 * Move _head_ to start search for next from here.
589 * But be careful: shmem_evict_inode checks list_empty without taking
590 * mutex, and there's an instant in list_move_tail when info->swaplist
591 * would appear empty, if it were the only one on shmem_swaplist.
592 */
593 if (shmem_swaplist.next != &info->swaplist)
594 list_move_tail(&shmem_swaplist, &info->swaplist);
595
596 /*
597 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
598 * but also to hold up shmem_evict_inode(): so inode cannot be freed
599 * beneath us (pagelock doesn't help until the page is in pagecache).
600 */
601 error = shmem_add_to_page_cache(page, mapping, index,
602 GFP_NOWAIT, radswap);
603 /* which does mem_cgroup_uncharge_cache_page on error */
604
605 if (error != -ENOMEM) {
606 /*
607 * Truncation and eviction use free_swap_and_cache(), which
608 * only does trylock page: if we raced, best clean up here.
609 */
610 delete_from_swap_cache(page);
611 set_page_dirty(page);
612 if (!error) {
613 spin_lock(&info->lock);
614 info->swapped--;
615 spin_unlock(&info->lock);
616 swap_free(swap);
617 }
618 error = 1; /* not an error, but entry was found */
619 }
620 return error;
621}
622
623/*
624 * Search through swapped inodes to find and replace swap by page.
625 */
626int shmem_unuse(swp_entry_t swap, struct page *page)
627{
628 struct list_head *this, *next;
629 struct shmem_inode_info *info;
630 int found = 0;
631 int error;
632
633 /*
634 * Charge page using GFP_KERNEL while we can wait, before taking
635 * the shmem_swaplist_mutex which might hold up shmem_writepage().
636 * Charged back to the user (not to caller) when swap account is used.
637 */
638 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
639 if (error)
640 goto out;
641 /* No radix_tree_preload: swap entry keeps a place for page in tree */
642
643 mutex_lock(&shmem_swaplist_mutex);
644 list_for_each_safe(this, next, &shmem_swaplist) {
645 info = list_entry(this, struct shmem_inode_info, swaplist);
646 if (info->swapped)
647 found = shmem_unuse_inode(info, swap, page);
648 else
649 list_del_init(&info->swaplist);
650 cond_resched();
651 if (found)
652 break;
653 }
654 mutex_unlock(&shmem_swaplist_mutex);
655
656 if (!found)
657 mem_cgroup_uncharge_cache_page(page);
658 if (found < 0)
659 error = found;
660out:
661 unlock_page(page);
662 page_cache_release(page);
663 return error;
664}
665
666/*
667 * Move the page from the page cache to the swap cache.
668 */
669static int shmem_writepage(struct page *page, struct writeback_control *wbc)
670{
671 struct shmem_inode_info *info;
672 struct address_space *mapping;
673 struct inode *inode;
674 swp_entry_t swap;
675 pgoff_t index;
676
677 BUG_ON(!PageLocked(page));
678 mapping = page->mapping;
679 index = page->index;
680 inode = mapping->host;
681 info = SHMEM_I(inode);
682 if (info->flags & VM_LOCKED)
683 goto redirty;
684 if (!total_swap_pages)
685 goto redirty;
686
687 /*
688 * shmem_backing_dev_info's capabilities prevent regular writeback or
689 * sync from ever calling shmem_writepage; but a stacking filesystem
690 * might use ->writepage of its underlying filesystem, in which case
691 * tmpfs should write out to swap only in response to memory pressure,
692 * and not for the writeback threads or sync.
693 */
694 if (!wbc->for_reclaim) {
695 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
696 goto redirty;
697 }
698 swap = get_swap_page();
699 if (!swap.val)
700 goto redirty;
701
702 /*
703 * Add inode to shmem_unuse()'s list of swapped-out inodes,
704 * if it's not already there. Do it now before the page is
705 * moved to swap cache, when its pagelock no longer protects
706 * the inode from eviction. But don't unlock the mutex until
707 * we've incremented swapped, because shmem_unuse_inode() will
708 * prune a !swapped inode from the swaplist under this mutex.
709 */
710 mutex_lock(&shmem_swaplist_mutex);
711 if (list_empty(&info->swaplist))
712 list_add_tail(&info->swaplist, &shmem_swaplist);
713
714 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
715 swap_shmem_alloc(swap);
716 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
717
718 spin_lock(&info->lock);
719 info->swapped++;
720 shmem_recalc_inode(inode);
721 spin_unlock(&info->lock);
722
723 mutex_unlock(&shmem_swaplist_mutex);
724 BUG_ON(page_mapped(page));
725 swap_writepage(page, wbc);
726 return 0;
727 }
728
729 mutex_unlock(&shmem_swaplist_mutex);
730 swapcache_free(swap, NULL);
731redirty:
732 set_page_dirty(page);
733 if (wbc->for_reclaim)
734 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
735 unlock_page(page);
736 return 0;
737}
738
739#ifdef CONFIG_NUMA
740#ifdef CONFIG_TMPFS
741static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
742{
743 char buffer[64];
744
745 if (!mpol || mpol->mode == MPOL_DEFAULT)
746 return; /* show nothing */
747
748 mpol_to_str(buffer, sizeof(buffer), mpol, 1);
749
750 seq_printf(seq, ",mpol=%s", buffer);
751}
752
753static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
754{
755 struct mempolicy *mpol = NULL;
756 if (sbinfo->mpol) {
757 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
758 mpol = sbinfo->mpol;
759 mpol_get(mpol);
760 spin_unlock(&sbinfo->stat_lock);
761 }
762 return mpol;
763}
764#endif /* CONFIG_TMPFS */
765
766static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
767 struct shmem_inode_info *info, pgoff_t index)
768{
769 struct mempolicy mpol, *spol;
770 struct vm_area_struct pvma;
771
772 spol = mpol_cond_copy(&mpol,
773 mpol_shared_policy_lookup(&info->policy, index));
774
775 /* Create a pseudo vma that just contains the policy */
776 pvma.vm_start = 0;
777 pvma.vm_pgoff = index;
778 pvma.vm_ops = NULL;
779 pvma.vm_policy = spol;
780 return swapin_readahead(swap, gfp, &pvma, 0);
781}
782
783static struct page *shmem_alloc_page(gfp_t gfp,
784 struct shmem_inode_info *info, pgoff_t index)
785{
786 struct vm_area_struct pvma;
787
788 /* Create a pseudo vma that just contains the policy */
789 pvma.vm_start = 0;
790 pvma.vm_pgoff = index;
791 pvma.vm_ops = NULL;
792 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
793
794 /*
795 * alloc_page_vma() will drop the shared policy reference
796 */
797 return alloc_page_vma(gfp, &pvma, 0);
798}
799#else /* !CONFIG_NUMA */
800#ifdef CONFIG_TMPFS
801static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
802{
803}
804#endif /* CONFIG_TMPFS */
805
806static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
807 struct shmem_inode_info *info, pgoff_t index)
808{
809 return swapin_readahead(swap, gfp, NULL, 0);
810}
811
812static inline struct page *shmem_alloc_page(gfp_t gfp,
813 struct shmem_inode_info *info, pgoff_t index)
814{
815 return alloc_page(gfp);
816}
817#endif /* CONFIG_NUMA */
818
819#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
820static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
821{
822 return NULL;
823}
824#endif
825
826/*
827 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
828 *
829 * If we allocate a new one we do not mark it dirty. That's up to the
830 * vm. If we swap it in we mark it dirty since we also free the swap
831 * entry since a page cannot live in both the swap and page cache
832 */
833static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
834 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
835{
836 struct address_space *mapping = inode->i_mapping;
837 struct shmem_inode_info *info;
838 struct shmem_sb_info *sbinfo;
839 struct page *page;
840 swp_entry_t swap;
841 int error;
842 int once = 0;
843
844 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
845 return -EFBIG;
846repeat:
847 swap.val = 0;
848 page = find_lock_page(mapping, index);
849 if (radix_tree_exceptional_entry(page)) {
850 swap = radix_to_swp_entry(page);
851 page = NULL;
852 }
853
854 if (sgp != SGP_WRITE &&
855 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
856 error = -EINVAL;
857 goto failed;
858 }
859
860 if (page || (sgp == SGP_READ && !swap.val)) {
861 /*
862 * Once we can get the page lock, it must be uptodate:
863 * if there were an error in reading back from swap,
864 * the page would not be inserted into the filecache.
865 */
866 BUG_ON(page && !PageUptodate(page));
867 *pagep = page;
868 return 0;
869 }
870
871 /*
872 * Fast cache lookup did not find it:
873 * bring it back from swap or allocate.
874 */
875 info = SHMEM_I(inode);
876 sbinfo = SHMEM_SB(inode->i_sb);
877
878 if (swap.val) {
879 /* Look it up and read it in.. */
880 page = lookup_swap_cache(swap);
881 if (!page) {
882 /* here we actually do the io */
883 if (fault_type)
884 *fault_type |= VM_FAULT_MAJOR;
885 page = shmem_swapin(swap, gfp, info, index);
886 if (!page) {
887 error = -ENOMEM;
888 goto failed;
889 }
890 }
891
892 /* We have to do this with page locked to prevent races */
893 lock_page(page);
894 if (!PageUptodate(page)) {
895 error = -EIO;
896 goto failed;
897 }
898 wait_on_page_writeback(page);
899
900 /* Someone may have already done it for us */
901 if (page->mapping) {
902 if (page->mapping == mapping &&
903 page->index == index)
904 goto done;
905 error = -EEXIST;
906 goto failed;
907 }
908
909 error = mem_cgroup_cache_charge(page, current->mm,
910 gfp & GFP_RECLAIM_MASK);
911 if (!error)
912 error = shmem_add_to_page_cache(page, mapping, index,
913 gfp, swp_to_radix_entry(swap));
914 if (error)
915 goto failed;
916
917 spin_lock(&info->lock);
918 info->swapped--;
919 shmem_recalc_inode(inode);
920 spin_unlock(&info->lock);
921
922 delete_from_swap_cache(page);
923 set_page_dirty(page);
924 swap_free(swap);
925
926 } else {
927 if (shmem_acct_block(info->flags)) {
928 error = -ENOSPC;
929 goto failed;
930 }
931 if (sbinfo->max_blocks) {
932 if (percpu_counter_compare(&sbinfo->used_blocks,
933 sbinfo->max_blocks) >= 0) {
934 error = -ENOSPC;
935 goto unacct;
936 }
937 percpu_counter_inc(&sbinfo->used_blocks);
938 }
939
940 page = shmem_alloc_page(gfp, info, index);
941 if (!page) {
942 error = -ENOMEM;
943 goto decused;
944 }
945
946 SetPageSwapBacked(page);
947 __set_page_locked(page);
948 error = mem_cgroup_cache_charge(page, current->mm,
949 gfp & GFP_RECLAIM_MASK);
950 if (!error)
951 error = shmem_add_to_page_cache(page, mapping, index,
952 gfp, NULL);
953 if (error)
954 goto decused;
955 lru_cache_add_anon(page);
956
957 spin_lock(&info->lock);
958 info->alloced++;
959 inode->i_blocks += BLOCKS_PER_PAGE;
960 shmem_recalc_inode(inode);
961 spin_unlock(&info->lock);
962
963 clear_highpage(page);
964 flush_dcache_page(page);
965 SetPageUptodate(page);
966 if (sgp == SGP_DIRTY)
967 set_page_dirty(page);
968 }
969done:
970 /* Perhaps the file has been truncated since we checked */
971 if (sgp != SGP_WRITE &&
972 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
973 error = -EINVAL;
974 goto trunc;
975 }
976 *pagep = page;
977 return 0;
978
979 /*
980 * Error recovery.
981 */
982trunc:
983 ClearPageDirty(page);
984 delete_from_page_cache(page);
985 spin_lock(&info->lock);
986 info->alloced--;
987 inode->i_blocks -= BLOCKS_PER_PAGE;
988 spin_unlock(&info->lock);
989decused:
990 if (sbinfo->max_blocks)
991 percpu_counter_add(&sbinfo->used_blocks, -1);
992unacct:
993 shmem_unacct_blocks(info->flags, 1);
994failed:
995 if (swap.val && error != -EINVAL) {
996 struct page *test = find_get_page(mapping, index);
997 if (test && !radix_tree_exceptional_entry(test))
998 page_cache_release(test);
999 /* Have another try if the entry has changed */
1000 if (test != swp_to_radix_entry(swap))
1001 error = -EEXIST;
1002 }
1003 if (page) {
1004 unlock_page(page);
1005 page_cache_release(page);
1006 }
1007 if (error == -ENOSPC && !once++) {
1008 info = SHMEM_I(inode);
1009 spin_lock(&info->lock);
1010 shmem_recalc_inode(inode);
1011 spin_unlock(&info->lock);
1012 goto repeat;
1013 }
1014 if (error == -EEXIST)
1015 goto repeat;
1016 return error;
1017}
1018
1019static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1020{
1021 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1022 int error;
1023 int ret = VM_FAULT_LOCKED;
1024
1025 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1026 if (error)
1027 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1028
1029 if (ret & VM_FAULT_MAJOR) {
1030 count_vm_event(PGMAJFAULT);
1031 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1032 }
1033 return ret;
1034}
1035
1036#ifdef CONFIG_NUMA
1037static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1038{
1039 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1040 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1041}
1042
1043static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1044 unsigned long addr)
1045{
1046 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1047 pgoff_t index;
1048
1049 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1050 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1051}
1052#endif
1053
1054int shmem_lock(struct file *file, int lock, struct user_struct *user)
1055{
1056 struct inode *inode = file->f_path.dentry->d_inode;
1057 struct shmem_inode_info *info = SHMEM_I(inode);
1058 int retval = -ENOMEM;
1059
1060 spin_lock(&info->lock);
1061 if (lock && !(info->flags & VM_LOCKED)) {
1062 if (!user_shm_lock(inode->i_size, user))
1063 goto out_nomem;
1064 info->flags |= VM_LOCKED;
1065 mapping_set_unevictable(file->f_mapping);
1066 }
1067 if (!lock && (info->flags & VM_LOCKED) && user) {
1068 user_shm_unlock(inode->i_size, user);
1069 info->flags &= ~VM_LOCKED;
1070 mapping_clear_unevictable(file->f_mapping);
1071 scan_mapping_unevictable_pages(file->f_mapping);
1072 }
1073 retval = 0;
1074
1075out_nomem:
1076 spin_unlock(&info->lock);
1077 return retval;
1078}
1079
1080static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1081{
1082 file_accessed(file);
1083 vma->vm_ops = &shmem_vm_ops;
1084 vma->vm_flags |= VM_CAN_NONLINEAR;
1085 return 0;
1086}
1087
1088static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1089 int mode, dev_t dev, unsigned long flags)
1090{
1091 struct inode *inode;
1092 struct shmem_inode_info *info;
1093 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1094
1095 if (shmem_reserve_inode(sb))
1096 return NULL;
1097
1098 inode = new_inode(sb);
1099 if (inode) {
1100 inode->i_ino = get_next_ino();
1101 inode_init_owner(inode, dir, mode);
1102 inode->i_blocks = 0;
1103 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1104 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1105 inode->i_generation = get_seconds();
1106 info = SHMEM_I(inode);
1107 memset(info, 0, (char *)inode - (char *)info);
1108 spin_lock_init(&info->lock);
1109 info->flags = flags & VM_NORESERVE;
1110 INIT_LIST_HEAD(&info->swaplist);
1111 INIT_LIST_HEAD(&info->xattr_list);
1112 cache_no_acl(inode);
1113
1114 switch (mode & S_IFMT) {
1115 default:
1116 inode->i_op = &shmem_special_inode_operations;
1117 init_special_inode(inode, mode, dev);
1118 break;
1119 case S_IFREG:
1120 inode->i_mapping->a_ops = &shmem_aops;
1121 inode->i_op = &shmem_inode_operations;
1122 inode->i_fop = &shmem_file_operations;
1123 mpol_shared_policy_init(&info->policy,
1124 shmem_get_sbmpol(sbinfo));
1125 break;
1126 case S_IFDIR:
1127 inc_nlink(inode);
1128 /* Some things misbehave if size == 0 on a directory */
1129 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1130 inode->i_op = &shmem_dir_inode_operations;
1131 inode->i_fop = &simple_dir_operations;
1132 break;
1133 case S_IFLNK:
1134 /*
1135 * Must not load anything in the rbtree,
1136 * mpol_free_shared_policy will not be called.
1137 */
1138 mpol_shared_policy_init(&info->policy, NULL);
1139 break;
1140 }
1141 } else
1142 shmem_free_inode(sb);
1143 return inode;
1144}
1145
1146#ifdef CONFIG_TMPFS
1147static const struct inode_operations shmem_symlink_inode_operations;
1148static const struct inode_operations shmem_short_symlink_operations;
1149
1150static int
1151shmem_write_begin(struct file *file, struct address_space *mapping,
1152 loff_t pos, unsigned len, unsigned flags,
1153 struct page **pagep, void **fsdata)
1154{
1155 struct inode *inode = mapping->host;
1156 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1157 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1158}
1159
1160static int
1161shmem_write_end(struct file *file, struct address_space *mapping,
1162 loff_t pos, unsigned len, unsigned copied,
1163 struct page *page, void *fsdata)
1164{
1165 struct inode *inode = mapping->host;
1166
1167 if (pos + copied > inode->i_size)
1168 i_size_write(inode, pos + copied);
1169
1170 set_page_dirty(page);
1171 unlock_page(page);
1172 page_cache_release(page);
1173
1174 return copied;
1175}
1176
1177static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1178{
1179 struct inode *inode = filp->f_path.dentry->d_inode;
1180 struct address_space *mapping = inode->i_mapping;
1181 pgoff_t index;
1182 unsigned long offset;
1183 enum sgp_type sgp = SGP_READ;
1184
1185 /*
1186 * Might this read be for a stacking filesystem? Then when reading
1187 * holes of a sparse file, we actually need to allocate those pages,
1188 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1189 */
1190 if (segment_eq(get_fs(), KERNEL_DS))
1191 sgp = SGP_DIRTY;
1192
1193 index = *ppos >> PAGE_CACHE_SHIFT;
1194 offset = *ppos & ~PAGE_CACHE_MASK;
1195
1196 for (;;) {
1197 struct page *page = NULL;
1198 pgoff_t end_index;
1199 unsigned long nr, ret;
1200 loff_t i_size = i_size_read(inode);
1201
1202 end_index = i_size >> PAGE_CACHE_SHIFT;
1203 if (index > end_index)
1204 break;
1205 if (index == end_index) {
1206 nr = i_size & ~PAGE_CACHE_MASK;
1207 if (nr <= offset)
1208 break;
1209 }
1210
1211 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1212 if (desc->error) {
1213 if (desc->error == -EINVAL)
1214 desc->error = 0;
1215 break;
1216 }
1217 if (page)
1218 unlock_page(page);
1219
1220 /*
1221 * We must evaluate after, since reads (unlike writes)
1222 * are called without i_mutex protection against truncate
1223 */
1224 nr = PAGE_CACHE_SIZE;
1225 i_size = i_size_read(inode);
1226 end_index = i_size >> PAGE_CACHE_SHIFT;
1227 if (index == end_index) {
1228 nr = i_size & ~PAGE_CACHE_MASK;
1229 if (nr <= offset) {
1230 if (page)
1231 page_cache_release(page);
1232 break;
1233 }
1234 }
1235 nr -= offset;
1236
1237 if (page) {
1238 /*
1239 * If users can be writing to this page using arbitrary
1240 * virtual addresses, take care about potential aliasing
1241 * before reading the page on the kernel side.
1242 */
1243 if (mapping_writably_mapped(mapping))
1244 flush_dcache_page(page);
1245 /*
1246 * Mark the page accessed if we read the beginning.
1247 */
1248 if (!offset)
1249 mark_page_accessed(page);
1250 } else {
1251 page = ZERO_PAGE(0);
1252 page_cache_get(page);
1253 }
1254
1255 /*
1256 * Ok, we have the page, and it's up-to-date, so
1257 * now we can copy it to user space...
1258 *
1259 * The actor routine returns how many bytes were actually used..
1260 * NOTE! This may not be the same as how much of a user buffer
1261 * we filled up (we may be padding etc), so we can only update
1262 * "pos" here (the actor routine has to update the user buffer
1263 * pointers and the remaining count).
1264 */
1265 ret = actor(desc, page, offset, nr);
1266 offset += ret;
1267 index += offset >> PAGE_CACHE_SHIFT;
1268 offset &= ~PAGE_CACHE_MASK;
1269
1270 page_cache_release(page);
1271 if (ret != nr || !desc->count)
1272 break;
1273
1274 cond_resched();
1275 }
1276
1277 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1278 file_accessed(filp);
1279}
1280
1281static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1282 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1283{
1284 struct file *filp = iocb->ki_filp;
1285 ssize_t retval;
1286 unsigned long seg;
1287 size_t count;
1288 loff_t *ppos = &iocb->ki_pos;
1289
1290 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1291 if (retval)
1292 return retval;
1293
1294 for (seg = 0; seg < nr_segs; seg++) {
1295 read_descriptor_t desc;
1296
1297 desc.written = 0;
1298 desc.arg.buf = iov[seg].iov_base;
1299 desc.count = iov[seg].iov_len;
1300 if (desc.count == 0)
1301 continue;
1302 desc.error = 0;
1303 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1304 retval += desc.written;
1305 if (desc.error) {
1306 retval = retval ?: desc.error;
1307 break;
1308 }
1309 if (desc.count > 0)
1310 break;
1311 }
1312 return retval;
1313}
1314
1315static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1316 struct pipe_inode_info *pipe, size_t len,
1317 unsigned int flags)
1318{
1319 struct address_space *mapping = in->f_mapping;
1320 struct inode *inode = mapping->host;
1321 unsigned int loff, nr_pages, req_pages;
1322 struct page *pages[PIPE_DEF_BUFFERS];
1323 struct partial_page partial[PIPE_DEF_BUFFERS];
1324 struct page *page;
1325 pgoff_t index, end_index;
1326 loff_t isize, left;
1327 int error, page_nr;
1328 struct splice_pipe_desc spd = {
1329 .pages = pages,
1330 .partial = partial,
1331 .flags = flags,
1332 .ops = &page_cache_pipe_buf_ops,
1333 .spd_release = spd_release_page,
1334 };
1335
1336 isize = i_size_read(inode);
1337 if (unlikely(*ppos >= isize))
1338 return 0;
1339
1340 left = isize - *ppos;
1341 if (unlikely(left < len))
1342 len = left;
1343
1344 if (splice_grow_spd(pipe, &spd))
1345 return -ENOMEM;
1346
1347 index = *ppos >> PAGE_CACHE_SHIFT;
1348 loff = *ppos & ~PAGE_CACHE_MASK;
1349 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1350 nr_pages = min(req_pages, pipe->buffers);
1351
1352 spd.nr_pages = find_get_pages_contig(mapping, index,
1353 nr_pages, spd.pages);
1354 index += spd.nr_pages;
1355 error = 0;
1356
1357 while (spd.nr_pages < nr_pages) {
1358 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1359 if (error)
1360 break;
1361 unlock_page(page);
1362 spd.pages[spd.nr_pages++] = page;
1363 index++;
1364 }
1365
1366 index = *ppos >> PAGE_CACHE_SHIFT;
1367 nr_pages = spd.nr_pages;
1368 spd.nr_pages = 0;
1369
1370 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1371 unsigned int this_len;
1372
1373 if (!len)
1374 break;
1375
1376 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1377 page = spd.pages[page_nr];
1378
1379 if (!PageUptodate(page) || page->mapping != mapping) {
1380 error = shmem_getpage(inode, index, &page,
1381 SGP_CACHE, NULL);
1382 if (error)
1383 break;
1384 unlock_page(page);
1385 page_cache_release(spd.pages[page_nr]);
1386 spd.pages[page_nr] = page;
1387 }
1388
1389 isize = i_size_read(inode);
1390 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1391 if (unlikely(!isize || index > end_index))
1392 break;
1393
1394 if (end_index == index) {
1395 unsigned int plen;
1396
1397 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1398 if (plen <= loff)
1399 break;
1400
1401 this_len = min(this_len, plen - loff);
1402 len = this_len;
1403 }
1404
1405 spd.partial[page_nr].offset = loff;
1406 spd.partial[page_nr].len = this_len;
1407 len -= this_len;
1408 loff = 0;
1409 spd.nr_pages++;
1410 index++;
1411 }
1412
1413 while (page_nr < nr_pages)
1414 page_cache_release(spd.pages[page_nr++]);
1415
1416 if (spd.nr_pages)
1417 error = splice_to_pipe(pipe, &spd);
1418
1419 splice_shrink_spd(pipe, &spd);
1420
1421 if (error > 0) {
1422 *ppos += error;
1423 file_accessed(in);
1424 }
1425 return error;
1426}
1427
1428static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1429{
1430 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1431
1432 buf->f_type = TMPFS_MAGIC;
1433 buf->f_bsize = PAGE_CACHE_SIZE;
1434 buf->f_namelen = NAME_MAX;
1435 if (sbinfo->max_blocks) {
1436 buf->f_blocks = sbinfo->max_blocks;
1437 buf->f_bavail =
1438 buf->f_bfree = sbinfo->max_blocks -
1439 percpu_counter_sum(&sbinfo->used_blocks);
1440 }
1441 if (sbinfo->max_inodes) {
1442 buf->f_files = sbinfo->max_inodes;
1443 buf->f_ffree = sbinfo->free_inodes;
1444 }
1445 /* else leave those fields 0 like simple_statfs */
1446 return 0;
1447}
1448
1449/*
1450 * File creation. Allocate an inode, and we're done..
1451 */
1452static int
1453shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1454{
1455 struct inode *inode;
1456 int error = -ENOSPC;
1457
1458 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1459 if (inode) {
1460 error = security_inode_init_security(inode, dir,
1461 &dentry->d_name, NULL,
1462 NULL, NULL);
1463 if (error) {
1464 if (error != -EOPNOTSUPP) {
1465 iput(inode);
1466 return error;
1467 }
1468 }
1469#ifdef CONFIG_TMPFS_POSIX_ACL
1470 error = generic_acl_init(inode, dir);
1471 if (error) {
1472 iput(inode);
1473 return error;
1474 }
1475#else
1476 error = 0;
1477#endif
1478 dir->i_size += BOGO_DIRENT_SIZE;
1479 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1480 d_instantiate(dentry, inode);
1481 dget(dentry); /* Extra count - pin the dentry in core */
1482 }
1483 return error;
1484}
1485
1486static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1487{
1488 int error;
1489
1490 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1491 return error;
1492 inc_nlink(dir);
1493 return 0;
1494}
1495
1496static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1497 struct nameidata *nd)
1498{
1499 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1500}
1501
1502/*
1503 * Link a file..
1504 */
1505static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1506{
1507 struct inode *inode = old_dentry->d_inode;
1508 int ret;
1509
1510 /*
1511 * No ordinary (disk based) filesystem counts links as inodes;
1512 * but each new link needs a new dentry, pinning lowmem, and
1513 * tmpfs dentries cannot be pruned until they are unlinked.
1514 */
1515 ret = shmem_reserve_inode(inode->i_sb);
1516 if (ret)
1517 goto out;
1518
1519 dir->i_size += BOGO_DIRENT_SIZE;
1520 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1521 inc_nlink(inode);
1522 ihold(inode); /* New dentry reference */
1523 dget(dentry); /* Extra pinning count for the created dentry */
1524 d_instantiate(dentry, inode);
1525out:
1526 return ret;
1527}
1528
1529static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1530{
1531 struct inode *inode = dentry->d_inode;
1532
1533 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1534 shmem_free_inode(inode->i_sb);
1535
1536 dir->i_size -= BOGO_DIRENT_SIZE;
1537 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1538 drop_nlink(inode);
1539 dput(dentry); /* Undo the count from "create" - this does all the work */
1540 return 0;
1541}
1542
1543static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1544{
1545 if (!simple_empty(dentry))
1546 return -ENOTEMPTY;
1547
1548 drop_nlink(dentry->d_inode);
1549 drop_nlink(dir);
1550 return shmem_unlink(dir, dentry);
1551}
1552
1553/*
1554 * The VFS layer already does all the dentry stuff for rename,
1555 * we just have to decrement the usage count for the target if
1556 * it exists so that the VFS layer correctly free's it when it
1557 * gets overwritten.
1558 */
1559static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1560{
1561 struct inode *inode = old_dentry->d_inode;
1562 int they_are_dirs = S_ISDIR(inode->i_mode);
1563
1564 if (!simple_empty(new_dentry))
1565 return -ENOTEMPTY;
1566
1567 if (new_dentry->d_inode) {
1568 (void) shmem_unlink(new_dir, new_dentry);
1569 if (they_are_dirs)
1570 drop_nlink(old_dir);
1571 } else if (they_are_dirs) {
1572 drop_nlink(old_dir);
1573 inc_nlink(new_dir);
1574 }
1575
1576 old_dir->i_size -= BOGO_DIRENT_SIZE;
1577 new_dir->i_size += BOGO_DIRENT_SIZE;
1578 old_dir->i_ctime = old_dir->i_mtime =
1579 new_dir->i_ctime = new_dir->i_mtime =
1580 inode->i_ctime = CURRENT_TIME;
1581 return 0;
1582}
1583
1584static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1585{
1586 int error;
1587 int len;
1588 struct inode *inode;
1589 struct page *page;
1590 char *kaddr;
1591 struct shmem_inode_info *info;
1592
1593 len = strlen(symname) + 1;
1594 if (len > PAGE_CACHE_SIZE)
1595 return -ENAMETOOLONG;
1596
1597 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1598 if (!inode)
1599 return -ENOSPC;
1600
1601 error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
1602 NULL, NULL);
1603 if (error) {
1604 if (error != -EOPNOTSUPP) {
1605 iput(inode);
1606 return error;
1607 }
1608 error = 0;
1609 }
1610
1611 info = SHMEM_I(inode);
1612 inode->i_size = len-1;
1613 if (len <= SHORT_SYMLINK_LEN) {
1614 info->symlink = kmemdup(symname, len, GFP_KERNEL);
1615 if (!info->symlink) {
1616 iput(inode);
1617 return -ENOMEM;
1618 }
1619 inode->i_op = &shmem_short_symlink_operations;
1620 } else {
1621 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1622 if (error) {
1623 iput(inode);
1624 return error;
1625 }
1626 inode->i_mapping->a_ops = &shmem_aops;
1627 inode->i_op = &shmem_symlink_inode_operations;
1628 kaddr = kmap_atomic(page, KM_USER0);
1629 memcpy(kaddr, symname, len);
1630 kunmap_atomic(kaddr, KM_USER0);
1631 set_page_dirty(page);
1632 unlock_page(page);
1633 page_cache_release(page);
1634 }
1635 dir->i_size += BOGO_DIRENT_SIZE;
1636 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1637 d_instantiate(dentry, inode);
1638 dget(dentry);
1639 return 0;
1640}
1641
1642static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
1643{
1644 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
1645 return NULL;
1646}
1647
1648static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1649{
1650 struct page *page = NULL;
1651 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1652 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
1653 if (page)
1654 unlock_page(page);
1655 return page;
1656}
1657
1658static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1659{
1660 if (!IS_ERR(nd_get_link(nd))) {
1661 struct page *page = cookie;
1662 kunmap(page);
1663 mark_page_accessed(page);
1664 page_cache_release(page);
1665 }
1666}
1667
1668#ifdef CONFIG_TMPFS_XATTR
1669/*
1670 * Superblocks without xattr inode operations may get some security.* xattr
1671 * support from the LSM "for free". As soon as we have any other xattrs
1672 * like ACLs, we also need to implement the security.* handlers at
1673 * filesystem level, though.
1674 */
1675
1676static int shmem_xattr_get(struct dentry *dentry, const char *name,
1677 void *buffer, size_t size)
1678{
1679 struct shmem_inode_info *info;
1680 struct shmem_xattr *xattr;
1681 int ret = -ENODATA;
1682
1683 info = SHMEM_I(dentry->d_inode);
1684
1685 spin_lock(&info->lock);
1686 list_for_each_entry(xattr, &info->xattr_list, list) {
1687 if (strcmp(name, xattr->name))
1688 continue;
1689
1690 ret = xattr->size;
1691 if (buffer) {
1692 if (size < xattr->size)
1693 ret = -ERANGE;
1694 else
1695 memcpy(buffer, xattr->value, xattr->size);
1696 }
1697 break;
1698 }
1699 spin_unlock(&info->lock);
1700 return ret;
1701}
1702
1703static int shmem_xattr_set(struct dentry *dentry, const char *name,
1704 const void *value, size_t size, int flags)
1705{
1706 struct inode *inode = dentry->d_inode;
1707 struct shmem_inode_info *info = SHMEM_I(inode);
1708 struct shmem_xattr *xattr;
1709 struct shmem_xattr *new_xattr = NULL;
1710 size_t len;
1711 int err = 0;
1712
1713 /* value == NULL means remove */
1714 if (value) {
1715 /* wrap around? */
1716 len = sizeof(*new_xattr) + size;
1717 if (len <= sizeof(*new_xattr))
1718 return -ENOMEM;
1719
1720 new_xattr = kmalloc(len, GFP_KERNEL);
1721 if (!new_xattr)
1722 return -ENOMEM;
1723
1724 new_xattr->name = kstrdup(name, GFP_KERNEL);
1725 if (!new_xattr->name) {
1726 kfree(new_xattr);
1727 return -ENOMEM;
1728 }
1729
1730 new_xattr->size = size;
1731 memcpy(new_xattr->value, value, size);
1732 }
1733
1734 spin_lock(&info->lock);
1735 list_for_each_entry(xattr, &info->xattr_list, list) {
1736 if (!strcmp(name, xattr->name)) {
1737 if (flags & XATTR_CREATE) {
1738 xattr = new_xattr;
1739 err = -EEXIST;
1740 } else if (new_xattr) {
1741 list_replace(&xattr->list, &new_xattr->list);
1742 } else {
1743 list_del(&xattr->list);
1744 }
1745 goto out;
1746 }
1747 }
1748 if (flags & XATTR_REPLACE) {
1749 xattr = new_xattr;
1750 err = -ENODATA;
1751 } else {
1752 list_add(&new_xattr->list, &info->xattr_list);
1753 xattr = NULL;
1754 }
1755out:
1756 spin_unlock(&info->lock);
1757 if (xattr)
1758 kfree(xattr->name);
1759 kfree(xattr);
1760 return err;
1761}
1762
1763static const struct xattr_handler *shmem_xattr_handlers[] = {
1764#ifdef CONFIG_TMPFS_POSIX_ACL
1765 &generic_acl_access_handler,
1766 &generic_acl_default_handler,
1767#endif
1768 NULL
1769};
1770
1771static int shmem_xattr_validate(const char *name)
1772{
1773 struct { const char *prefix; size_t len; } arr[] = {
1774 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
1775 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
1776 };
1777 int i;
1778
1779 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1780 size_t preflen = arr[i].len;
1781 if (strncmp(name, arr[i].prefix, preflen) == 0) {
1782 if (!name[preflen])
1783 return -EINVAL;
1784 return 0;
1785 }
1786 }
1787 return -EOPNOTSUPP;
1788}
1789
1790static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
1791 void *buffer, size_t size)
1792{
1793 int err;
1794
1795 /*
1796 * If this is a request for a synthetic attribute in the system.*
1797 * namespace use the generic infrastructure to resolve a handler
1798 * for it via sb->s_xattr.
1799 */
1800 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1801 return generic_getxattr(dentry, name, buffer, size);
1802
1803 err = shmem_xattr_validate(name);
1804 if (err)
1805 return err;
1806
1807 return shmem_xattr_get(dentry, name, buffer, size);
1808}
1809
1810static int shmem_setxattr(struct dentry *dentry, const char *name,
1811 const void *value, size_t size, int flags)
1812{
1813 int err;
1814
1815 /*
1816 * If this is a request for a synthetic attribute in the system.*
1817 * namespace use the generic infrastructure to resolve a handler
1818 * for it via sb->s_xattr.
1819 */
1820 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1821 return generic_setxattr(dentry, name, value, size, flags);
1822
1823 err = shmem_xattr_validate(name);
1824 if (err)
1825 return err;
1826
1827 if (size == 0)
1828 value = ""; /* empty EA, do not remove */
1829
1830 return shmem_xattr_set(dentry, name, value, size, flags);
1831
1832}
1833
1834static int shmem_removexattr(struct dentry *dentry, const char *name)
1835{
1836 int err;
1837
1838 /*
1839 * If this is a request for a synthetic attribute in the system.*
1840 * namespace use the generic infrastructure to resolve a handler
1841 * for it via sb->s_xattr.
1842 */
1843 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1844 return generic_removexattr(dentry, name);
1845
1846 err = shmem_xattr_validate(name);
1847 if (err)
1848 return err;
1849
1850 return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
1851}
1852
1853static bool xattr_is_trusted(const char *name)
1854{
1855 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
1856}
1857
1858static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
1859{
1860 bool trusted = capable(CAP_SYS_ADMIN);
1861 struct shmem_xattr *xattr;
1862 struct shmem_inode_info *info;
1863 size_t used = 0;
1864
1865 info = SHMEM_I(dentry->d_inode);
1866
1867 spin_lock(&info->lock);
1868 list_for_each_entry(xattr, &info->xattr_list, list) {
1869 size_t len;
1870
1871 /* skip "trusted." attributes for unprivileged callers */
1872 if (!trusted && xattr_is_trusted(xattr->name))
1873 continue;
1874
1875 len = strlen(xattr->name) + 1;
1876 used += len;
1877 if (buffer) {
1878 if (size < used) {
1879 used = -ERANGE;
1880 break;
1881 }
1882 memcpy(buffer, xattr->name, len);
1883 buffer += len;
1884 }
1885 }
1886 spin_unlock(&info->lock);
1887
1888 return used;
1889}
1890#endif /* CONFIG_TMPFS_XATTR */
1891
1892static const struct inode_operations shmem_short_symlink_operations = {
1893 .readlink = generic_readlink,
1894 .follow_link = shmem_follow_short_symlink,
1895#ifdef CONFIG_TMPFS_XATTR
1896 .setxattr = shmem_setxattr,
1897 .getxattr = shmem_getxattr,
1898 .listxattr = shmem_listxattr,
1899 .removexattr = shmem_removexattr,
1900#endif
1901};
1902
1903static const struct inode_operations shmem_symlink_inode_operations = {
1904 .readlink = generic_readlink,
1905 .follow_link = shmem_follow_link,
1906 .put_link = shmem_put_link,
1907#ifdef CONFIG_TMPFS_XATTR
1908 .setxattr = shmem_setxattr,
1909 .getxattr = shmem_getxattr,
1910 .listxattr = shmem_listxattr,
1911 .removexattr = shmem_removexattr,
1912#endif
1913};
1914
1915static struct dentry *shmem_get_parent(struct dentry *child)
1916{
1917 return ERR_PTR(-ESTALE);
1918}
1919
1920static int shmem_match(struct inode *ino, void *vfh)
1921{
1922 __u32 *fh = vfh;
1923 __u64 inum = fh[2];
1924 inum = (inum << 32) | fh[1];
1925 return ino->i_ino == inum && fh[0] == ino->i_generation;
1926}
1927
1928static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
1929 struct fid *fid, int fh_len, int fh_type)
1930{
1931 struct inode *inode;
1932 struct dentry *dentry = NULL;
1933 u64 inum = fid->raw[2];
1934 inum = (inum << 32) | fid->raw[1];
1935
1936 if (fh_len < 3)
1937 return NULL;
1938
1939 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
1940 shmem_match, fid->raw);
1941 if (inode) {
1942 dentry = d_find_alias(inode);
1943 iput(inode);
1944 }
1945
1946 return dentry;
1947}
1948
1949static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
1950 int connectable)
1951{
1952 struct inode *inode = dentry->d_inode;
1953
1954 if (*len < 3) {
1955 *len = 3;
1956 return 255;
1957 }
1958
1959 if (inode_unhashed(inode)) {
1960 /* Unfortunately insert_inode_hash is not idempotent,
1961 * so as we hash inodes here rather than at creation
1962 * time, we need a lock to ensure we only try
1963 * to do it once
1964 */
1965 static DEFINE_SPINLOCK(lock);
1966 spin_lock(&lock);
1967 if (inode_unhashed(inode))
1968 __insert_inode_hash(inode,
1969 inode->i_ino + inode->i_generation);
1970 spin_unlock(&lock);
1971 }
1972
1973 fh[0] = inode->i_generation;
1974 fh[1] = inode->i_ino;
1975 fh[2] = ((__u64)inode->i_ino) >> 32;
1976
1977 *len = 3;
1978 return 1;
1979}
1980
1981static const struct export_operations shmem_export_ops = {
1982 .get_parent = shmem_get_parent,
1983 .encode_fh = shmem_encode_fh,
1984 .fh_to_dentry = shmem_fh_to_dentry,
1985};
1986
1987static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
1988 bool remount)
1989{
1990 char *this_char, *value, *rest;
1991
1992 while (options != NULL) {
1993 this_char = options;
1994 for (;;) {
1995 /*
1996 * NUL-terminate this option: unfortunately,
1997 * mount options form a comma-separated list,
1998 * but mpol's nodelist may also contain commas.
1999 */
2000 options = strchr(options, ',');
2001 if (options == NULL)
2002 break;
2003 options++;
2004 if (!isdigit(*options)) {
2005 options[-1] = '\0';
2006 break;
2007 }
2008 }
2009 if (!*this_char)
2010 continue;
2011 if ((value = strchr(this_char,'=')) != NULL) {
2012 *value++ = 0;
2013 } else {
2014 printk(KERN_ERR
2015 "tmpfs: No value for mount option '%s'\n",
2016 this_char);
2017 return 1;
2018 }
2019
2020 if (!strcmp(this_char,"size")) {
2021 unsigned long long size;
2022 size = memparse(value,&rest);
2023 if (*rest == '%') {
2024 size <<= PAGE_SHIFT;
2025 size *= totalram_pages;
2026 do_div(size, 100);
2027 rest++;
2028 }
2029 if (*rest)
2030 goto bad_val;
2031 sbinfo->max_blocks =
2032 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2033 } else if (!strcmp(this_char,"nr_blocks")) {
2034 sbinfo->max_blocks = memparse(value, &rest);
2035 if (*rest)
2036 goto bad_val;
2037 } else if (!strcmp(this_char,"nr_inodes")) {
2038 sbinfo->max_inodes = memparse(value, &rest);
2039 if (*rest)
2040 goto bad_val;
2041 } else if (!strcmp(this_char,"mode")) {
2042 if (remount)
2043 continue;
2044 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2045 if (*rest)
2046 goto bad_val;
2047 } else if (!strcmp(this_char,"uid")) {
2048 if (remount)
2049 continue;
2050 sbinfo->uid = simple_strtoul(value, &rest, 0);
2051 if (*rest)
2052 goto bad_val;
2053 } else if (!strcmp(this_char,"gid")) {
2054 if (remount)
2055 continue;
2056 sbinfo->gid = simple_strtoul(value, &rest, 0);
2057 if (*rest)
2058 goto bad_val;
2059 } else if (!strcmp(this_char,"mpol")) {
2060 if (mpol_parse_str(value, &sbinfo->mpol, 1))
2061 goto bad_val;
2062 } else {
2063 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2064 this_char);
2065 return 1;
2066 }
2067 }
2068 return 0;
2069
2070bad_val:
2071 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2072 value, this_char);
2073 return 1;
2074
2075}
2076
2077static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2078{
2079 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2080 struct shmem_sb_info config = *sbinfo;
2081 unsigned long inodes;
2082 int error = -EINVAL;
2083
2084 if (shmem_parse_options(data, &config, true))
2085 return error;
2086
2087 spin_lock(&sbinfo->stat_lock);
2088 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2089 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2090 goto out;
2091 if (config.max_inodes < inodes)
2092 goto out;
2093 /*
2094 * Those tests disallow limited->unlimited while any are in use;
2095 * but we must separately disallow unlimited->limited, because
2096 * in that case we have no record of how much is already in use.
2097 */
2098 if (config.max_blocks && !sbinfo->max_blocks)
2099 goto out;
2100 if (config.max_inodes && !sbinfo->max_inodes)
2101 goto out;
2102
2103 error = 0;
2104 sbinfo->max_blocks = config.max_blocks;
2105 sbinfo->max_inodes = config.max_inodes;
2106 sbinfo->free_inodes = config.max_inodes - inodes;
2107
2108 mpol_put(sbinfo->mpol);
2109 sbinfo->mpol = config.mpol; /* transfers initial ref */
2110out:
2111 spin_unlock(&sbinfo->stat_lock);
2112 return error;
2113}
2114
2115static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2116{
2117 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2118
2119 if (sbinfo->max_blocks != shmem_default_max_blocks())
2120 seq_printf(seq, ",size=%luk",
2121 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2122 if (sbinfo->max_inodes != shmem_default_max_inodes())
2123 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2124 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2125 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2126 if (sbinfo->uid != 0)
2127 seq_printf(seq, ",uid=%u", sbinfo->uid);
2128 if (sbinfo->gid != 0)
2129 seq_printf(seq, ",gid=%u", sbinfo->gid);
2130 shmem_show_mpol(seq, sbinfo->mpol);
2131 return 0;
2132}
2133#endif /* CONFIG_TMPFS */
2134
2135static void shmem_put_super(struct super_block *sb)
2136{
2137 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2138
2139 percpu_counter_destroy(&sbinfo->used_blocks);
2140 kfree(sbinfo);
2141 sb->s_fs_info = NULL;
2142}
2143
2144int shmem_fill_super(struct super_block *sb, void *data, int silent)
2145{
2146 struct inode *inode;
2147 struct dentry *root;
2148 struct shmem_sb_info *sbinfo;
2149 int err = -ENOMEM;
2150
2151 /* Round up to L1_CACHE_BYTES to resist false sharing */
2152 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2153 L1_CACHE_BYTES), GFP_KERNEL);
2154 if (!sbinfo)
2155 return -ENOMEM;
2156
2157 sbinfo->mode = S_IRWXUGO | S_ISVTX;
2158 sbinfo->uid = current_fsuid();
2159 sbinfo->gid = current_fsgid();
2160 sb->s_fs_info = sbinfo;
2161
2162#ifdef CONFIG_TMPFS
2163 /*
2164 * Per default we only allow half of the physical ram per
2165 * tmpfs instance, limiting inodes to one per page of lowmem;
2166 * but the internal instance is left unlimited.
2167 */
2168 if (!(sb->s_flags & MS_NOUSER)) {
2169 sbinfo->max_blocks = shmem_default_max_blocks();
2170 sbinfo->max_inodes = shmem_default_max_inodes();
2171 if (shmem_parse_options(data, sbinfo, false)) {
2172 err = -EINVAL;
2173 goto failed;
2174 }
2175 }
2176 sb->s_export_op = &shmem_export_ops;
2177#else
2178 sb->s_flags |= MS_NOUSER;
2179#endif
2180
2181 spin_lock_init(&sbinfo->stat_lock);
2182 if (percpu_counter_init(&sbinfo->used_blocks, 0))
2183 goto failed;
2184 sbinfo->free_inodes = sbinfo->max_inodes;
2185
2186 sb->s_maxbytes = MAX_LFS_FILESIZE;
2187 sb->s_blocksize = PAGE_CACHE_SIZE;
2188 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2189 sb->s_magic = TMPFS_MAGIC;
2190 sb->s_op = &shmem_ops;
2191 sb->s_time_gran = 1;
2192#ifdef CONFIG_TMPFS_XATTR
2193 sb->s_xattr = shmem_xattr_handlers;
2194#endif
2195#ifdef CONFIG_TMPFS_POSIX_ACL
2196 sb->s_flags |= MS_POSIXACL;
2197#endif
2198
2199 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2200 if (!inode)
2201 goto failed;
2202 inode->i_uid = sbinfo->uid;
2203 inode->i_gid = sbinfo->gid;
2204 root = d_alloc_root(inode);
2205 if (!root)
2206 goto failed_iput;
2207 sb->s_root = root;
2208 return 0;
2209
2210failed_iput:
2211 iput(inode);
2212failed:
2213 shmem_put_super(sb);
2214 return err;
2215}
2216
2217static struct kmem_cache *shmem_inode_cachep;
2218
2219static struct inode *shmem_alloc_inode(struct super_block *sb)
2220{
2221 struct shmem_inode_info *info;
2222 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2223 if (!info)
2224 return NULL;
2225 return &info->vfs_inode;
2226}
2227
2228static void shmem_destroy_callback(struct rcu_head *head)
2229{
2230 struct inode *inode = container_of(head, struct inode, i_rcu);
2231 INIT_LIST_HEAD(&inode->i_dentry);
2232 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2233}
2234
2235static void shmem_destroy_inode(struct inode *inode)
2236{
2237 if ((inode->i_mode & S_IFMT) == S_IFREG)
2238 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2239 call_rcu(&inode->i_rcu, shmem_destroy_callback);
2240}
2241
2242static void shmem_init_inode(void *foo)
2243{
2244 struct shmem_inode_info *info = foo;
2245 inode_init_once(&info->vfs_inode);
2246}
2247
2248static int shmem_init_inodecache(void)
2249{
2250 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2251 sizeof(struct shmem_inode_info),
2252 0, SLAB_PANIC, shmem_init_inode);
2253 return 0;
2254}
2255
2256static void shmem_destroy_inodecache(void)
2257{
2258 kmem_cache_destroy(shmem_inode_cachep);
2259}
2260
2261static const struct address_space_operations shmem_aops = {
2262 .writepage = shmem_writepage,
2263 .set_page_dirty = __set_page_dirty_no_writeback,
2264#ifdef CONFIG_TMPFS
2265 .write_begin = shmem_write_begin,
2266 .write_end = shmem_write_end,
2267#endif
2268 .migratepage = migrate_page,
2269 .error_remove_page = generic_error_remove_page,
2270};
2271
2272static const struct file_operations shmem_file_operations = {
2273 .mmap = shmem_mmap,
2274#ifdef CONFIG_TMPFS
2275 .llseek = generic_file_llseek,
2276 .read = do_sync_read,
2277 .write = do_sync_write,
2278 .aio_read = shmem_file_aio_read,
2279 .aio_write = generic_file_aio_write,
2280 .fsync = noop_fsync,
2281 .splice_read = shmem_file_splice_read,
2282 .splice_write = generic_file_splice_write,
2283#endif
2284};
2285
2286static const struct inode_operations shmem_inode_operations = {
2287 .setattr = shmem_setattr,
2288 .truncate_range = shmem_truncate_range,
2289#ifdef CONFIG_TMPFS_XATTR
2290 .setxattr = shmem_setxattr,
2291 .getxattr = shmem_getxattr,
2292 .listxattr = shmem_listxattr,
2293 .removexattr = shmem_removexattr,
2294#endif
2295};
2296
2297static const struct inode_operations shmem_dir_inode_operations = {
2298#ifdef CONFIG_TMPFS
2299 .create = shmem_create,
2300 .lookup = simple_lookup,
2301 .link = shmem_link,
2302 .unlink = shmem_unlink,
2303 .symlink = shmem_symlink,
2304 .mkdir = shmem_mkdir,
2305 .rmdir = shmem_rmdir,
2306 .mknod = shmem_mknod,
2307 .rename = shmem_rename,
2308#endif
2309#ifdef CONFIG_TMPFS_XATTR
2310 .setxattr = shmem_setxattr,
2311 .getxattr = shmem_getxattr,
2312 .listxattr = shmem_listxattr,
2313 .removexattr = shmem_removexattr,
2314#endif
2315#ifdef CONFIG_TMPFS_POSIX_ACL
2316 .setattr = shmem_setattr,
2317#endif
2318};
2319
2320static const struct inode_operations shmem_special_inode_operations = {
2321#ifdef CONFIG_TMPFS_XATTR
2322 .setxattr = shmem_setxattr,
2323 .getxattr = shmem_getxattr,
2324 .listxattr = shmem_listxattr,
2325 .removexattr = shmem_removexattr,
2326#endif
2327#ifdef CONFIG_TMPFS_POSIX_ACL
2328 .setattr = shmem_setattr,
2329#endif
2330};
2331
2332static const struct super_operations shmem_ops = {
2333 .alloc_inode = shmem_alloc_inode,
2334 .destroy_inode = shmem_destroy_inode,
2335#ifdef CONFIG_TMPFS
2336 .statfs = shmem_statfs,
2337 .remount_fs = shmem_remount_fs,
2338 .show_options = shmem_show_options,
2339#endif
2340 .evict_inode = shmem_evict_inode,
2341 .drop_inode = generic_delete_inode,
2342 .put_super = shmem_put_super,
2343};
2344
2345static const struct vm_operations_struct shmem_vm_ops = {
2346 .fault = shmem_fault,
2347#ifdef CONFIG_NUMA
2348 .set_policy = shmem_set_policy,
2349 .get_policy = shmem_get_policy,
2350#endif
2351};
2352
2353static struct dentry *shmem_mount(struct file_system_type *fs_type,
2354 int flags, const char *dev_name, void *data)
2355{
2356 return mount_nodev(fs_type, flags, data, shmem_fill_super);
2357}
2358
2359static struct file_system_type shmem_fs_type = {
2360 .owner = THIS_MODULE,
2361 .name = "tmpfs",
2362 .mount = shmem_mount,
2363 .kill_sb = kill_litter_super,
2364};
2365
2366int __init shmem_init(void)
2367{
2368 int error;
2369
2370 error = bdi_init(&shmem_backing_dev_info);
2371 if (error)
2372 goto out4;
2373
2374 error = shmem_init_inodecache();
2375 if (error)
2376 goto out3;
2377
2378 error = register_filesystem(&shmem_fs_type);
2379 if (error) {
2380 printk(KERN_ERR "Could not register tmpfs\n");
2381 goto out2;
2382 }
2383
2384 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2385 shmem_fs_type.name, NULL);
2386 if (IS_ERR(shm_mnt)) {
2387 error = PTR_ERR(shm_mnt);
2388 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2389 goto out1;
2390 }
2391 return 0;
2392
2393out1:
2394 unregister_filesystem(&shmem_fs_type);
2395out2:
2396 shmem_destroy_inodecache();
2397out3:
2398 bdi_destroy(&shmem_backing_dev_info);
2399out4:
2400 shm_mnt = ERR_PTR(error);
2401 return error;
2402}
2403
2404#else /* !CONFIG_SHMEM */
2405
2406/*
2407 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2408 *
2409 * This is intended for small system where the benefits of the full
2410 * shmem code (swap-backed and resource-limited) are outweighed by
2411 * their complexity. On systems without swap this code should be
2412 * effectively equivalent, but much lighter weight.
2413 */
2414
2415#include <linux/ramfs.h>
2416
2417static struct file_system_type shmem_fs_type = {
2418 .name = "tmpfs",
2419 .mount = ramfs_mount,
2420 .kill_sb = kill_litter_super,
2421};
2422
2423int __init shmem_init(void)
2424{
2425 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
2426
2427 shm_mnt = kern_mount(&shmem_fs_type);
2428 BUG_ON(IS_ERR(shm_mnt));
2429
2430 return 0;
2431}
2432
2433int shmem_unuse(swp_entry_t swap, struct page *page)
2434{
2435 return 0;
2436}
2437
2438int shmem_lock(struct file *file, int lock, struct user_struct *user)
2439{
2440 return 0;
2441}
2442
2443void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2444{
2445 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
2446}
2447EXPORT_SYMBOL_GPL(shmem_truncate_range);
2448
2449#define shmem_vm_ops generic_file_vm_ops
2450#define shmem_file_operations ramfs_file_operations
2451#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
2452#define shmem_acct_size(flags, size) 0
2453#define shmem_unacct_size(flags, size) do {} while (0)
2454
2455#endif /* CONFIG_SHMEM */
2456
2457/* common code */
2458
2459/**
2460 * shmem_file_setup - get an unlinked file living in tmpfs
2461 * @name: name for dentry (to be seen in /proc/<pid>/maps
2462 * @size: size to be set for the file
2463 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2464 */
2465struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2466{
2467 int error;
2468 struct file *file;
2469 struct inode *inode;
2470 struct path path;
2471 struct dentry *root;
2472 struct qstr this;
2473
2474 if (IS_ERR(shm_mnt))
2475 return (void *)shm_mnt;
2476
2477 if (size < 0 || size > MAX_LFS_FILESIZE)
2478 return ERR_PTR(-EINVAL);
2479
2480 if (shmem_acct_size(flags, size))
2481 return ERR_PTR(-ENOMEM);
2482
2483 error = -ENOMEM;
2484 this.name = name;
2485 this.len = strlen(name);
2486 this.hash = 0; /* will go */
2487 root = shm_mnt->mnt_root;
2488 path.dentry = d_alloc(root, &this);
2489 if (!path.dentry)
2490 goto put_memory;
2491 path.mnt = mntget(shm_mnt);
2492
2493 error = -ENOSPC;
2494 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
2495 if (!inode)
2496 goto put_dentry;
2497
2498 d_instantiate(path.dentry, inode);
2499 inode->i_size = size;
2500 inode->i_nlink = 0; /* It is unlinked */
2501#ifndef CONFIG_MMU
2502 error = ramfs_nommu_expand_for_mapping(inode, size);
2503 if (error)
2504 goto put_dentry;
2505#endif
2506
2507 error = -ENFILE;
2508 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
2509 &shmem_file_operations);
2510 if (!file)
2511 goto put_dentry;
2512
2513 return file;
2514
2515put_dentry:
2516 path_put(&path);
2517put_memory:
2518 shmem_unacct_size(flags, size);
2519 return ERR_PTR(error);
2520}
2521EXPORT_SYMBOL_GPL(shmem_file_setup);
2522
2523/**
2524 * shmem_zero_setup - setup a shared anonymous mapping
2525 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2526 */
2527int shmem_zero_setup(struct vm_area_struct *vma)
2528{
2529 struct file *file;
2530 loff_t size = vma->vm_end - vma->vm_start;
2531
2532 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2533 if (IS_ERR(file))
2534 return PTR_ERR(file);
2535
2536 if (vma->vm_file)
2537 fput(vma->vm_file);
2538 vma->vm_file = file;
2539 vma->vm_ops = &shmem_vm_ops;
2540 vma->vm_flags |= VM_CAN_NONLINEAR;
2541 return 0;
2542}
2543
2544/**
2545 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2546 * @mapping: the page's address_space
2547 * @index: the page index
2548 * @gfp: the page allocator flags to use if allocating
2549 *
2550 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2551 * with any new page allocations done using the specified allocation flags.
2552 * But read_cache_page_gfp() uses the ->readpage() method: which does not
2553 * suit tmpfs, since it may have pages in swapcache, and needs to find those
2554 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2555 *
2556 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2557 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2558 */
2559struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2560 pgoff_t index, gfp_t gfp)
2561{
2562#ifdef CONFIG_SHMEM
2563 struct inode *inode = mapping->host;
2564 struct page *page;
2565 int error;
2566
2567 BUG_ON(mapping->a_ops != &shmem_aops);
2568 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2569 if (error)
2570 page = ERR_PTR(error);
2571 else
2572 unlock_page(page);
2573 return page;
2574#else
2575 /*
2576 * The tiny !SHMEM case uses ramfs without swap
2577 */
2578 return read_cache_page_gfp(mapping, index, gfp);
2579#endif
2580}
2581EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
28#include <linux/ramfs.h>
29#include <linux/pagemap.h>
30#include <linux/file.h>
31#include <linux/mm.h>
32#include <linux/random.h>
33#include <linux/sched/signal.h>
34#include <linux/export.h>
35#include <linux/swap.h>
36#include <linux/uio.h>
37#include <linux/khugepaged.h>
38#include <linux/hugetlb.h>
39#include <linux/frontswap.h>
40#include <linux/fs_parser.h>
41
42#include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
43
44static struct vfsmount *shm_mnt;
45
46#ifdef CONFIG_SHMEM
47/*
48 * This virtual memory filesystem is heavily based on the ramfs. It
49 * extends ramfs by the ability to use swap and honor resource limits
50 * which makes it a completely usable filesystem.
51 */
52
53#include <linux/xattr.h>
54#include <linux/exportfs.h>
55#include <linux/posix_acl.h>
56#include <linux/posix_acl_xattr.h>
57#include <linux/mman.h>
58#include <linux/string.h>
59#include <linux/slab.h>
60#include <linux/backing-dev.h>
61#include <linux/shmem_fs.h>
62#include <linux/writeback.h>
63#include <linux/blkdev.h>
64#include <linux/pagevec.h>
65#include <linux/percpu_counter.h>
66#include <linux/falloc.h>
67#include <linux/splice.h>
68#include <linux/security.h>
69#include <linux/swapops.h>
70#include <linux/mempolicy.h>
71#include <linux/namei.h>
72#include <linux/ctype.h>
73#include <linux/migrate.h>
74#include <linux/highmem.h>
75#include <linux/seq_file.h>
76#include <linux/magic.h>
77#include <linux/syscalls.h>
78#include <linux/fcntl.h>
79#include <uapi/linux/memfd.h>
80#include <linux/userfaultfd_k.h>
81#include <linux/rmap.h>
82#include <linux/uuid.h>
83
84#include <linux/uaccess.h>
85
86#include "internal.h"
87
88#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
89#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
90
91/* Pretend that each entry is of this size in directory's i_size */
92#define BOGO_DIRENT_SIZE 20
93
94/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
95#define SHORT_SYMLINK_LEN 128
96
97/*
98 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99 * inode->i_private (with i_mutex making sure that it has only one user at
100 * a time): we would prefer not to enlarge the shmem inode just for that.
101 */
102struct shmem_falloc {
103 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
104 pgoff_t start; /* start of range currently being fallocated */
105 pgoff_t next; /* the next page offset to be fallocated */
106 pgoff_t nr_falloced; /* how many new pages have been fallocated */
107 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
108};
109
110struct shmem_options {
111 unsigned long long blocks;
112 unsigned long long inodes;
113 struct mempolicy *mpol;
114 kuid_t uid;
115 kgid_t gid;
116 umode_t mode;
117 bool full_inums;
118 int huge;
119 int seen;
120#define SHMEM_SEEN_BLOCKS 1
121#define SHMEM_SEEN_INODES 2
122#define SHMEM_SEEN_HUGE 4
123#define SHMEM_SEEN_INUMS 8
124};
125
126#ifdef CONFIG_TMPFS
127static unsigned long shmem_default_max_blocks(void)
128{
129 return totalram_pages() / 2;
130}
131
132static unsigned long shmem_default_max_inodes(void)
133{
134 unsigned long nr_pages = totalram_pages();
135
136 return min(nr_pages - totalhigh_pages(), nr_pages / 2);
137}
138#endif
139
140static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
141static int shmem_replace_page(struct page **pagep, gfp_t gfp,
142 struct shmem_inode_info *info, pgoff_t index);
143static int shmem_swapin_page(struct inode *inode, pgoff_t index,
144 struct page **pagep, enum sgp_type sgp,
145 gfp_t gfp, struct vm_area_struct *vma,
146 vm_fault_t *fault_type);
147static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
148 struct page **pagep, enum sgp_type sgp,
149 gfp_t gfp, struct vm_area_struct *vma,
150 struct vm_fault *vmf, vm_fault_t *fault_type);
151
152int shmem_getpage(struct inode *inode, pgoff_t index,
153 struct page **pagep, enum sgp_type sgp)
154{
155 return shmem_getpage_gfp(inode, index, pagep, sgp,
156 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
157}
158
159static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
160{
161 return sb->s_fs_info;
162}
163
164/*
165 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
166 * for shared memory and for shared anonymous (/dev/zero) mappings
167 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
168 * consistent with the pre-accounting of private mappings ...
169 */
170static inline int shmem_acct_size(unsigned long flags, loff_t size)
171{
172 return (flags & VM_NORESERVE) ?
173 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
174}
175
176static inline void shmem_unacct_size(unsigned long flags, loff_t size)
177{
178 if (!(flags & VM_NORESERVE))
179 vm_unacct_memory(VM_ACCT(size));
180}
181
182static inline int shmem_reacct_size(unsigned long flags,
183 loff_t oldsize, loff_t newsize)
184{
185 if (!(flags & VM_NORESERVE)) {
186 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
187 return security_vm_enough_memory_mm(current->mm,
188 VM_ACCT(newsize) - VM_ACCT(oldsize));
189 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
190 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
191 }
192 return 0;
193}
194
195/*
196 * ... whereas tmpfs objects are accounted incrementally as
197 * pages are allocated, in order to allow large sparse files.
198 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
199 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
200 */
201static inline int shmem_acct_block(unsigned long flags, long pages)
202{
203 if (!(flags & VM_NORESERVE))
204 return 0;
205
206 return security_vm_enough_memory_mm(current->mm,
207 pages * VM_ACCT(PAGE_SIZE));
208}
209
210static inline void shmem_unacct_blocks(unsigned long flags, long pages)
211{
212 if (flags & VM_NORESERVE)
213 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
214}
215
216static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
217{
218 struct shmem_inode_info *info = SHMEM_I(inode);
219 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
220
221 if (shmem_acct_block(info->flags, pages))
222 return false;
223
224 if (sbinfo->max_blocks) {
225 if (percpu_counter_compare(&sbinfo->used_blocks,
226 sbinfo->max_blocks - pages) > 0)
227 goto unacct;
228 percpu_counter_add(&sbinfo->used_blocks, pages);
229 }
230
231 return true;
232
233unacct:
234 shmem_unacct_blocks(info->flags, pages);
235 return false;
236}
237
238static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
239{
240 struct shmem_inode_info *info = SHMEM_I(inode);
241 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
242
243 if (sbinfo->max_blocks)
244 percpu_counter_sub(&sbinfo->used_blocks, pages);
245 shmem_unacct_blocks(info->flags, pages);
246}
247
248static const struct super_operations shmem_ops;
249const struct address_space_operations shmem_aops;
250static const struct file_operations shmem_file_operations;
251static const struct inode_operations shmem_inode_operations;
252static const struct inode_operations shmem_dir_inode_operations;
253static const struct inode_operations shmem_special_inode_operations;
254static const struct vm_operations_struct shmem_vm_ops;
255static struct file_system_type shmem_fs_type;
256
257bool vma_is_shmem(struct vm_area_struct *vma)
258{
259 return vma->vm_ops == &shmem_vm_ops;
260}
261
262static LIST_HEAD(shmem_swaplist);
263static DEFINE_MUTEX(shmem_swaplist_mutex);
264
265/*
266 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
267 * produces a novel ino for the newly allocated inode.
268 *
269 * It may also be called when making a hard link to permit the space needed by
270 * each dentry. However, in that case, no new inode number is needed since that
271 * internally draws from another pool of inode numbers (currently global
272 * get_next_ino()). This case is indicated by passing NULL as inop.
273 */
274#define SHMEM_INO_BATCH 1024
275static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
276{
277 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
278 ino_t ino;
279
280 if (!(sb->s_flags & SB_KERNMOUNT)) {
281 spin_lock(&sbinfo->stat_lock);
282 if (sbinfo->max_inodes) {
283 if (!sbinfo->free_inodes) {
284 spin_unlock(&sbinfo->stat_lock);
285 return -ENOSPC;
286 }
287 sbinfo->free_inodes--;
288 }
289 if (inop) {
290 ino = sbinfo->next_ino++;
291 if (unlikely(is_zero_ino(ino)))
292 ino = sbinfo->next_ino++;
293 if (unlikely(!sbinfo->full_inums &&
294 ino > UINT_MAX)) {
295 /*
296 * Emulate get_next_ino uint wraparound for
297 * compatibility
298 */
299 if (IS_ENABLED(CONFIG_64BIT))
300 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
301 __func__, MINOR(sb->s_dev));
302 sbinfo->next_ino = 1;
303 ino = sbinfo->next_ino++;
304 }
305 *inop = ino;
306 }
307 spin_unlock(&sbinfo->stat_lock);
308 } else if (inop) {
309 /*
310 * __shmem_file_setup, one of our callers, is lock-free: it
311 * doesn't hold stat_lock in shmem_reserve_inode since
312 * max_inodes is always 0, and is called from potentially
313 * unknown contexts. As such, use a per-cpu batched allocator
314 * which doesn't require the per-sb stat_lock unless we are at
315 * the batch boundary.
316 *
317 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
318 * shmem mounts are not exposed to userspace, so we don't need
319 * to worry about things like glibc compatibility.
320 */
321 ino_t *next_ino;
322 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
323 ino = *next_ino;
324 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
325 spin_lock(&sbinfo->stat_lock);
326 ino = sbinfo->next_ino;
327 sbinfo->next_ino += SHMEM_INO_BATCH;
328 spin_unlock(&sbinfo->stat_lock);
329 if (unlikely(is_zero_ino(ino)))
330 ino++;
331 }
332 *inop = ino;
333 *next_ino = ++ino;
334 put_cpu();
335 }
336
337 return 0;
338}
339
340static void shmem_free_inode(struct super_block *sb)
341{
342 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
343 if (sbinfo->max_inodes) {
344 spin_lock(&sbinfo->stat_lock);
345 sbinfo->free_inodes++;
346 spin_unlock(&sbinfo->stat_lock);
347 }
348}
349
350/**
351 * shmem_recalc_inode - recalculate the block usage of an inode
352 * @inode: inode to recalc
353 *
354 * We have to calculate the free blocks since the mm can drop
355 * undirtied hole pages behind our back.
356 *
357 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
358 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
359 *
360 * It has to be called with the spinlock held.
361 */
362static void shmem_recalc_inode(struct inode *inode)
363{
364 struct shmem_inode_info *info = SHMEM_I(inode);
365 long freed;
366
367 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
368 if (freed > 0) {
369 info->alloced -= freed;
370 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
371 shmem_inode_unacct_blocks(inode, freed);
372 }
373}
374
375bool shmem_charge(struct inode *inode, long pages)
376{
377 struct shmem_inode_info *info = SHMEM_I(inode);
378 unsigned long flags;
379
380 if (!shmem_inode_acct_block(inode, pages))
381 return false;
382
383 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
384 inode->i_mapping->nrpages += pages;
385
386 spin_lock_irqsave(&info->lock, flags);
387 info->alloced += pages;
388 inode->i_blocks += pages * BLOCKS_PER_PAGE;
389 shmem_recalc_inode(inode);
390 spin_unlock_irqrestore(&info->lock, flags);
391
392 return true;
393}
394
395void shmem_uncharge(struct inode *inode, long pages)
396{
397 struct shmem_inode_info *info = SHMEM_I(inode);
398 unsigned long flags;
399
400 /* nrpages adjustment done by __delete_from_page_cache() or caller */
401
402 spin_lock_irqsave(&info->lock, flags);
403 info->alloced -= pages;
404 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
405 shmem_recalc_inode(inode);
406 spin_unlock_irqrestore(&info->lock, flags);
407
408 shmem_inode_unacct_blocks(inode, pages);
409}
410
411/*
412 * Replace item expected in xarray by a new item, while holding xa_lock.
413 */
414static int shmem_replace_entry(struct address_space *mapping,
415 pgoff_t index, void *expected, void *replacement)
416{
417 XA_STATE(xas, &mapping->i_pages, index);
418 void *item;
419
420 VM_BUG_ON(!expected);
421 VM_BUG_ON(!replacement);
422 item = xas_load(&xas);
423 if (item != expected)
424 return -ENOENT;
425 xas_store(&xas, replacement);
426 return 0;
427}
428
429/*
430 * Sometimes, before we decide whether to proceed or to fail, we must check
431 * that an entry was not already brought back from swap by a racing thread.
432 *
433 * Checking page is not enough: by the time a SwapCache page is locked, it
434 * might be reused, and again be SwapCache, using the same swap as before.
435 */
436static bool shmem_confirm_swap(struct address_space *mapping,
437 pgoff_t index, swp_entry_t swap)
438{
439 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
440}
441
442/*
443 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
444 *
445 * SHMEM_HUGE_NEVER:
446 * disables huge pages for the mount;
447 * SHMEM_HUGE_ALWAYS:
448 * enables huge pages for the mount;
449 * SHMEM_HUGE_WITHIN_SIZE:
450 * only allocate huge pages if the page will be fully within i_size,
451 * also respect fadvise()/madvise() hints;
452 * SHMEM_HUGE_ADVISE:
453 * only allocate huge pages if requested with fadvise()/madvise();
454 */
455
456#define SHMEM_HUGE_NEVER 0
457#define SHMEM_HUGE_ALWAYS 1
458#define SHMEM_HUGE_WITHIN_SIZE 2
459#define SHMEM_HUGE_ADVISE 3
460
461/*
462 * Special values.
463 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
464 *
465 * SHMEM_HUGE_DENY:
466 * disables huge on shm_mnt and all mounts, for emergency use;
467 * SHMEM_HUGE_FORCE:
468 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
469 *
470 */
471#define SHMEM_HUGE_DENY (-1)
472#define SHMEM_HUGE_FORCE (-2)
473
474#ifdef CONFIG_TRANSPARENT_HUGEPAGE
475/* ifdef here to avoid bloating shmem.o when not necessary */
476
477static int shmem_huge __read_mostly;
478
479#if defined(CONFIG_SYSFS)
480static int shmem_parse_huge(const char *str)
481{
482 if (!strcmp(str, "never"))
483 return SHMEM_HUGE_NEVER;
484 if (!strcmp(str, "always"))
485 return SHMEM_HUGE_ALWAYS;
486 if (!strcmp(str, "within_size"))
487 return SHMEM_HUGE_WITHIN_SIZE;
488 if (!strcmp(str, "advise"))
489 return SHMEM_HUGE_ADVISE;
490 if (!strcmp(str, "deny"))
491 return SHMEM_HUGE_DENY;
492 if (!strcmp(str, "force"))
493 return SHMEM_HUGE_FORCE;
494 return -EINVAL;
495}
496#endif
497
498#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
499static const char *shmem_format_huge(int huge)
500{
501 switch (huge) {
502 case SHMEM_HUGE_NEVER:
503 return "never";
504 case SHMEM_HUGE_ALWAYS:
505 return "always";
506 case SHMEM_HUGE_WITHIN_SIZE:
507 return "within_size";
508 case SHMEM_HUGE_ADVISE:
509 return "advise";
510 case SHMEM_HUGE_DENY:
511 return "deny";
512 case SHMEM_HUGE_FORCE:
513 return "force";
514 default:
515 VM_BUG_ON(1);
516 return "bad_val";
517 }
518}
519#endif
520
521static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
522 struct shrink_control *sc, unsigned long nr_to_split)
523{
524 LIST_HEAD(list), *pos, *next;
525 LIST_HEAD(to_remove);
526 struct inode *inode;
527 struct shmem_inode_info *info;
528 struct page *page;
529 unsigned long batch = sc ? sc->nr_to_scan : 128;
530 int removed = 0, split = 0;
531
532 if (list_empty(&sbinfo->shrinklist))
533 return SHRINK_STOP;
534
535 spin_lock(&sbinfo->shrinklist_lock);
536 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
537 info = list_entry(pos, struct shmem_inode_info, shrinklist);
538
539 /* pin the inode */
540 inode = igrab(&info->vfs_inode);
541
542 /* inode is about to be evicted */
543 if (!inode) {
544 list_del_init(&info->shrinklist);
545 removed++;
546 goto next;
547 }
548
549 /* Check if there's anything to gain */
550 if (round_up(inode->i_size, PAGE_SIZE) ==
551 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
552 list_move(&info->shrinklist, &to_remove);
553 removed++;
554 goto next;
555 }
556
557 list_move(&info->shrinklist, &list);
558next:
559 if (!--batch)
560 break;
561 }
562 spin_unlock(&sbinfo->shrinklist_lock);
563
564 list_for_each_safe(pos, next, &to_remove) {
565 info = list_entry(pos, struct shmem_inode_info, shrinklist);
566 inode = &info->vfs_inode;
567 list_del_init(&info->shrinklist);
568 iput(inode);
569 }
570
571 list_for_each_safe(pos, next, &list) {
572 int ret;
573
574 info = list_entry(pos, struct shmem_inode_info, shrinklist);
575 inode = &info->vfs_inode;
576
577 if (nr_to_split && split >= nr_to_split)
578 goto leave;
579
580 page = find_get_page(inode->i_mapping,
581 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
582 if (!page)
583 goto drop;
584
585 /* No huge page at the end of the file: nothing to split */
586 if (!PageTransHuge(page)) {
587 put_page(page);
588 goto drop;
589 }
590
591 /*
592 * Leave the inode on the list if we failed to lock
593 * the page at this time.
594 *
595 * Waiting for the lock may lead to deadlock in the
596 * reclaim path.
597 */
598 if (!trylock_page(page)) {
599 put_page(page);
600 goto leave;
601 }
602
603 ret = split_huge_page(page);
604 unlock_page(page);
605 put_page(page);
606
607 /* If split failed leave the inode on the list */
608 if (ret)
609 goto leave;
610
611 split++;
612drop:
613 list_del_init(&info->shrinklist);
614 removed++;
615leave:
616 iput(inode);
617 }
618
619 spin_lock(&sbinfo->shrinklist_lock);
620 list_splice_tail(&list, &sbinfo->shrinklist);
621 sbinfo->shrinklist_len -= removed;
622 spin_unlock(&sbinfo->shrinklist_lock);
623
624 return split;
625}
626
627static long shmem_unused_huge_scan(struct super_block *sb,
628 struct shrink_control *sc)
629{
630 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
631
632 if (!READ_ONCE(sbinfo->shrinklist_len))
633 return SHRINK_STOP;
634
635 return shmem_unused_huge_shrink(sbinfo, sc, 0);
636}
637
638static long shmem_unused_huge_count(struct super_block *sb,
639 struct shrink_control *sc)
640{
641 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
642 return READ_ONCE(sbinfo->shrinklist_len);
643}
644#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
645
646#define shmem_huge SHMEM_HUGE_DENY
647
648static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
649 struct shrink_control *sc, unsigned long nr_to_split)
650{
651 return 0;
652}
653#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
654
655static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
656{
657 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
658 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
659 shmem_huge != SHMEM_HUGE_DENY)
660 return true;
661 return false;
662}
663
664/*
665 * Like add_to_page_cache_locked, but error if expected item has gone.
666 */
667static int shmem_add_to_page_cache(struct page *page,
668 struct address_space *mapping,
669 pgoff_t index, void *expected, gfp_t gfp,
670 struct mm_struct *charge_mm)
671{
672 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
673 unsigned long i = 0;
674 unsigned long nr = compound_nr(page);
675 int error;
676
677 VM_BUG_ON_PAGE(PageTail(page), page);
678 VM_BUG_ON_PAGE(index != round_down(index, nr), page);
679 VM_BUG_ON_PAGE(!PageLocked(page), page);
680 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
681 VM_BUG_ON(expected && PageTransHuge(page));
682
683 page_ref_add(page, nr);
684 page->mapping = mapping;
685 page->index = index;
686
687 if (!PageSwapCache(page)) {
688 error = mem_cgroup_charge(page, charge_mm, gfp);
689 if (error) {
690 if (PageTransHuge(page)) {
691 count_vm_event(THP_FILE_FALLBACK);
692 count_vm_event(THP_FILE_FALLBACK_CHARGE);
693 }
694 goto error;
695 }
696 }
697 cgroup_throttle_swaprate(page, gfp);
698
699 do {
700 void *entry;
701 xas_lock_irq(&xas);
702 entry = xas_find_conflict(&xas);
703 if (entry != expected)
704 xas_set_err(&xas, -EEXIST);
705 xas_create_range(&xas);
706 if (xas_error(&xas))
707 goto unlock;
708next:
709 xas_store(&xas, page);
710 if (++i < nr) {
711 xas_next(&xas);
712 goto next;
713 }
714 if (PageTransHuge(page)) {
715 count_vm_event(THP_FILE_ALLOC);
716 __mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);
717 }
718 mapping->nrpages += nr;
719 __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
720 __mod_lruvec_page_state(page, NR_SHMEM, nr);
721unlock:
722 xas_unlock_irq(&xas);
723 } while (xas_nomem(&xas, gfp));
724
725 if (xas_error(&xas)) {
726 error = xas_error(&xas);
727 goto error;
728 }
729
730 return 0;
731error:
732 page->mapping = NULL;
733 page_ref_sub(page, nr);
734 return error;
735}
736
737/*
738 * Like delete_from_page_cache, but substitutes swap for page.
739 */
740static void shmem_delete_from_page_cache(struct page *page, void *radswap)
741{
742 struct address_space *mapping = page->mapping;
743 int error;
744
745 VM_BUG_ON_PAGE(PageCompound(page), page);
746
747 xa_lock_irq(&mapping->i_pages);
748 error = shmem_replace_entry(mapping, page->index, page, radswap);
749 page->mapping = NULL;
750 mapping->nrpages--;
751 __dec_lruvec_page_state(page, NR_FILE_PAGES);
752 __dec_lruvec_page_state(page, NR_SHMEM);
753 xa_unlock_irq(&mapping->i_pages);
754 put_page(page);
755 BUG_ON(error);
756}
757
758/*
759 * Remove swap entry from page cache, free the swap and its page cache.
760 */
761static int shmem_free_swap(struct address_space *mapping,
762 pgoff_t index, void *radswap)
763{
764 void *old;
765
766 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
767 if (old != radswap)
768 return -ENOENT;
769 free_swap_and_cache(radix_to_swp_entry(radswap));
770 return 0;
771}
772
773/*
774 * Determine (in bytes) how many of the shmem object's pages mapped by the
775 * given offsets are swapped out.
776 *
777 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
778 * as long as the inode doesn't go away and racy results are not a problem.
779 */
780unsigned long shmem_partial_swap_usage(struct address_space *mapping,
781 pgoff_t start, pgoff_t end)
782{
783 XA_STATE(xas, &mapping->i_pages, start);
784 struct page *page;
785 unsigned long swapped = 0;
786
787 rcu_read_lock();
788 xas_for_each(&xas, page, end - 1) {
789 if (xas_retry(&xas, page))
790 continue;
791 if (xa_is_value(page))
792 swapped++;
793
794 if (need_resched()) {
795 xas_pause(&xas);
796 cond_resched_rcu();
797 }
798 }
799
800 rcu_read_unlock();
801
802 return swapped << PAGE_SHIFT;
803}
804
805/*
806 * Determine (in bytes) how many of the shmem object's pages mapped by the
807 * given vma is swapped out.
808 *
809 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
810 * as long as the inode doesn't go away and racy results are not a problem.
811 */
812unsigned long shmem_swap_usage(struct vm_area_struct *vma)
813{
814 struct inode *inode = file_inode(vma->vm_file);
815 struct shmem_inode_info *info = SHMEM_I(inode);
816 struct address_space *mapping = inode->i_mapping;
817 unsigned long swapped;
818
819 /* Be careful as we don't hold info->lock */
820 swapped = READ_ONCE(info->swapped);
821
822 /*
823 * The easier cases are when the shmem object has nothing in swap, or
824 * the vma maps it whole. Then we can simply use the stats that we
825 * already track.
826 */
827 if (!swapped)
828 return 0;
829
830 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
831 return swapped << PAGE_SHIFT;
832
833 /* Here comes the more involved part */
834 return shmem_partial_swap_usage(mapping,
835 linear_page_index(vma, vma->vm_start),
836 linear_page_index(vma, vma->vm_end));
837}
838
839/*
840 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
841 */
842void shmem_unlock_mapping(struct address_space *mapping)
843{
844 struct pagevec pvec;
845 pgoff_t index = 0;
846
847 pagevec_init(&pvec);
848 /*
849 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
850 */
851 while (!mapping_unevictable(mapping)) {
852 if (!pagevec_lookup(&pvec, mapping, &index))
853 break;
854 check_move_unevictable_pages(&pvec);
855 pagevec_release(&pvec);
856 cond_resched();
857 }
858}
859
860/*
861 * Check whether a hole-punch or truncation needs to split a huge page,
862 * returning true if no split was required, or the split has been successful.
863 *
864 * Eviction (or truncation to 0 size) should never need to split a huge page;
865 * but in rare cases might do so, if shmem_undo_range() failed to trylock on
866 * head, and then succeeded to trylock on tail.
867 *
868 * A split can only succeed when there are no additional references on the
869 * huge page: so the split below relies upon find_get_entries() having stopped
870 * when it found a subpage of the huge page, without getting further references.
871 */
872static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
873{
874 if (!PageTransCompound(page))
875 return true;
876
877 /* Just proceed to delete a huge page wholly within the range punched */
878 if (PageHead(page) &&
879 page->index >= start && page->index + HPAGE_PMD_NR <= end)
880 return true;
881
882 /* Try to split huge page, so we can truly punch the hole or truncate */
883 return split_huge_page(page) >= 0;
884}
885
886/*
887 * Remove range of pages and swap entries from page cache, and free them.
888 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
889 */
890static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
891 bool unfalloc)
892{
893 struct address_space *mapping = inode->i_mapping;
894 struct shmem_inode_info *info = SHMEM_I(inode);
895 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
896 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
897 unsigned int partial_start = lstart & (PAGE_SIZE - 1);
898 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
899 struct pagevec pvec;
900 pgoff_t indices[PAGEVEC_SIZE];
901 long nr_swaps_freed = 0;
902 pgoff_t index;
903 int i;
904
905 if (lend == -1)
906 end = -1; /* unsigned, so actually very big */
907
908 pagevec_init(&pvec);
909 index = start;
910 while (index < end && find_lock_entries(mapping, index, end - 1,
911 &pvec, indices)) {
912 for (i = 0; i < pagevec_count(&pvec); i++) {
913 struct page *page = pvec.pages[i];
914
915 index = indices[i];
916
917 if (xa_is_value(page)) {
918 if (unfalloc)
919 continue;
920 nr_swaps_freed += !shmem_free_swap(mapping,
921 index, page);
922 continue;
923 }
924 index += thp_nr_pages(page) - 1;
925
926 if (!unfalloc || !PageUptodate(page))
927 truncate_inode_page(mapping, page);
928 unlock_page(page);
929 }
930 pagevec_remove_exceptionals(&pvec);
931 pagevec_release(&pvec);
932 cond_resched();
933 index++;
934 }
935
936 if (partial_start) {
937 struct page *page = NULL;
938 shmem_getpage(inode, start - 1, &page, SGP_READ);
939 if (page) {
940 unsigned int top = PAGE_SIZE;
941 if (start > end) {
942 top = partial_end;
943 partial_end = 0;
944 }
945 zero_user_segment(page, partial_start, top);
946 set_page_dirty(page);
947 unlock_page(page);
948 put_page(page);
949 }
950 }
951 if (partial_end) {
952 struct page *page = NULL;
953 shmem_getpage(inode, end, &page, SGP_READ);
954 if (page) {
955 zero_user_segment(page, 0, partial_end);
956 set_page_dirty(page);
957 unlock_page(page);
958 put_page(page);
959 }
960 }
961 if (start >= end)
962 return;
963
964 index = start;
965 while (index < end) {
966 cond_resched();
967
968 if (!find_get_entries(mapping, index, end - 1, &pvec,
969 indices)) {
970 /* If all gone or hole-punch or unfalloc, we're done */
971 if (index == start || end != -1)
972 break;
973 /* But if truncating, restart to make sure all gone */
974 index = start;
975 continue;
976 }
977 for (i = 0; i < pagevec_count(&pvec); i++) {
978 struct page *page = pvec.pages[i];
979
980 index = indices[i];
981 if (xa_is_value(page)) {
982 if (unfalloc)
983 continue;
984 if (shmem_free_swap(mapping, index, page)) {
985 /* Swap was replaced by page: retry */
986 index--;
987 break;
988 }
989 nr_swaps_freed++;
990 continue;
991 }
992
993 lock_page(page);
994
995 if (!unfalloc || !PageUptodate(page)) {
996 if (page_mapping(page) != mapping) {
997 /* Page was replaced by swap: retry */
998 unlock_page(page);
999 index--;
1000 break;
1001 }
1002 VM_BUG_ON_PAGE(PageWriteback(page), page);
1003 if (shmem_punch_compound(page, start, end))
1004 truncate_inode_page(mapping, page);
1005 else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1006 /* Wipe the page and don't get stuck */
1007 clear_highpage(page);
1008 flush_dcache_page(page);
1009 set_page_dirty(page);
1010 if (index <
1011 round_up(start, HPAGE_PMD_NR))
1012 start = index + 1;
1013 }
1014 }
1015 unlock_page(page);
1016 }
1017 pagevec_remove_exceptionals(&pvec);
1018 pagevec_release(&pvec);
1019 index++;
1020 }
1021
1022 spin_lock_irq(&info->lock);
1023 info->swapped -= nr_swaps_freed;
1024 shmem_recalc_inode(inode);
1025 spin_unlock_irq(&info->lock);
1026}
1027
1028void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1029{
1030 shmem_undo_range(inode, lstart, lend, false);
1031 inode->i_ctime = inode->i_mtime = current_time(inode);
1032}
1033EXPORT_SYMBOL_GPL(shmem_truncate_range);
1034
1035static int shmem_getattr(struct user_namespace *mnt_userns,
1036 const struct path *path, struct kstat *stat,
1037 u32 request_mask, unsigned int query_flags)
1038{
1039 struct inode *inode = path->dentry->d_inode;
1040 struct shmem_inode_info *info = SHMEM_I(inode);
1041 struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
1042
1043 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1044 spin_lock_irq(&info->lock);
1045 shmem_recalc_inode(inode);
1046 spin_unlock_irq(&info->lock);
1047 }
1048 generic_fillattr(&init_user_ns, inode, stat);
1049
1050 if (is_huge_enabled(sb_info))
1051 stat->blksize = HPAGE_PMD_SIZE;
1052
1053 return 0;
1054}
1055
1056static int shmem_setattr(struct user_namespace *mnt_userns,
1057 struct dentry *dentry, struct iattr *attr)
1058{
1059 struct inode *inode = d_inode(dentry);
1060 struct shmem_inode_info *info = SHMEM_I(inode);
1061 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1062 int error;
1063
1064 error = setattr_prepare(&init_user_ns, dentry, attr);
1065 if (error)
1066 return error;
1067
1068 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1069 loff_t oldsize = inode->i_size;
1070 loff_t newsize = attr->ia_size;
1071
1072 /* protected by i_mutex */
1073 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1074 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1075 return -EPERM;
1076
1077 if (newsize != oldsize) {
1078 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1079 oldsize, newsize);
1080 if (error)
1081 return error;
1082 i_size_write(inode, newsize);
1083 inode->i_ctime = inode->i_mtime = current_time(inode);
1084 }
1085 if (newsize <= oldsize) {
1086 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1087 if (oldsize > holebegin)
1088 unmap_mapping_range(inode->i_mapping,
1089 holebegin, 0, 1);
1090 if (info->alloced)
1091 shmem_truncate_range(inode,
1092 newsize, (loff_t)-1);
1093 /* unmap again to remove racily COWed private pages */
1094 if (oldsize > holebegin)
1095 unmap_mapping_range(inode->i_mapping,
1096 holebegin, 0, 1);
1097
1098 /*
1099 * Part of the huge page can be beyond i_size: subject
1100 * to shrink under memory pressure.
1101 */
1102 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1103 spin_lock(&sbinfo->shrinklist_lock);
1104 /*
1105 * _careful to defend against unlocked access to
1106 * ->shrink_list in shmem_unused_huge_shrink()
1107 */
1108 if (list_empty_careful(&info->shrinklist)) {
1109 list_add_tail(&info->shrinklist,
1110 &sbinfo->shrinklist);
1111 sbinfo->shrinklist_len++;
1112 }
1113 spin_unlock(&sbinfo->shrinklist_lock);
1114 }
1115 }
1116 }
1117
1118 setattr_copy(&init_user_ns, inode, attr);
1119 if (attr->ia_valid & ATTR_MODE)
1120 error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
1121 return error;
1122}
1123
1124static void shmem_evict_inode(struct inode *inode)
1125{
1126 struct shmem_inode_info *info = SHMEM_I(inode);
1127 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1128
1129 if (shmem_mapping(inode->i_mapping)) {
1130 shmem_unacct_size(info->flags, inode->i_size);
1131 inode->i_size = 0;
1132 shmem_truncate_range(inode, 0, (loff_t)-1);
1133 if (!list_empty(&info->shrinklist)) {
1134 spin_lock(&sbinfo->shrinklist_lock);
1135 if (!list_empty(&info->shrinklist)) {
1136 list_del_init(&info->shrinklist);
1137 sbinfo->shrinklist_len--;
1138 }
1139 spin_unlock(&sbinfo->shrinklist_lock);
1140 }
1141 while (!list_empty(&info->swaplist)) {
1142 /* Wait while shmem_unuse() is scanning this inode... */
1143 wait_var_event(&info->stop_eviction,
1144 !atomic_read(&info->stop_eviction));
1145 mutex_lock(&shmem_swaplist_mutex);
1146 /* ...but beware of the race if we peeked too early */
1147 if (!atomic_read(&info->stop_eviction))
1148 list_del_init(&info->swaplist);
1149 mutex_unlock(&shmem_swaplist_mutex);
1150 }
1151 }
1152
1153 simple_xattrs_free(&info->xattrs);
1154 WARN_ON(inode->i_blocks);
1155 shmem_free_inode(inode->i_sb);
1156 clear_inode(inode);
1157}
1158
1159extern struct swap_info_struct *swap_info[];
1160
1161static int shmem_find_swap_entries(struct address_space *mapping,
1162 pgoff_t start, unsigned int nr_entries,
1163 struct page **entries, pgoff_t *indices,
1164 unsigned int type, bool frontswap)
1165{
1166 XA_STATE(xas, &mapping->i_pages, start);
1167 struct page *page;
1168 swp_entry_t entry;
1169 unsigned int ret = 0;
1170
1171 if (!nr_entries)
1172 return 0;
1173
1174 rcu_read_lock();
1175 xas_for_each(&xas, page, ULONG_MAX) {
1176 if (xas_retry(&xas, page))
1177 continue;
1178
1179 if (!xa_is_value(page))
1180 continue;
1181
1182 entry = radix_to_swp_entry(page);
1183 if (swp_type(entry) != type)
1184 continue;
1185 if (frontswap &&
1186 !frontswap_test(swap_info[type], swp_offset(entry)))
1187 continue;
1188
1189 indices[ret] = xas.xa_index;
1190 entries[ret] = page;
1191
1192 if (need_resched()) {
1193 xas_pause(&xas);
1194 cond_resched_rcu();
1195 }
1196 if (++ret == nr_entries)
1197 break;
1198 }
1199 rcu_read_unlock();
1200
1201 return ret;
1202}
1203
1204/*
1205 * Move the swapped pages for an inode to page cache. Returns the count
1206 * of pages swapped in, or the error in case of failure.
1207 */
1208static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1209 pgoff_t *indices)
1210{
1211 int i = 0;
1212 int ret = 0;
1213 int error = 0;
1214 struct address_space *mapping = inode->i_mapping;
1215
1216 for (i = 0; i < pvec.nr; i++) {
1217 struct page *page = pvec.pages[i];
1218
1219 if (!xa_is_value(page))
1220 continue;
1221 error = shmem_swapin_page(inode, indices[i],
1222 &page, SGP_CACHE,
1223 mapping_gfp_mask(mapping),
1224 NULL, NULL);
1225 if (error == 0) {
1226 unlock_page(page);
1227 put_page(page);
1228 ret++;
1229 }
1230 if (error == -ENOMEM)
1231 break;
1232 error = 0;
1233 }
1234 return error ? error : ret;
1235}
1236
1237/*
1238 * If swap found in inode, free it and move page from swapcache to filecache.
1239 */
1240static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1241 bool frontswap, unsigned long *fs_pages_to_unuse)
1242{
1243 struct address_space *mapping = inode->i_mapping;
1244 pgoff_t start = 0;
1245 struct pagevec pvec;
1246 pgoff_t indices[PAGEVEC_SIZE];
1247 bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1248 int ret = 0;
1249
1250 pagevec_init(&pvec);
1251 do {
1252 unsigned int nr_entries = PAGEVEC_SIZE;
1253
1254 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1255 nr_entries = *fs_pages_to_unuse;
1256
1257 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1258 pvec.pages, indices,
1259 type, frontswap);
1260 if (pvec.nr == 0) {
1261 ret = 0;
1262 break;
1263 }
1264
1265 ret = shmem_unuse_swap_entries(inode, pvec, indices);
1266 if (ret < 0)
1267 break;
1268
1269 if (frontswap_partial) {
1270 *fs_pages_to_unuse -= ret;
1271 if (*fs_pages_to_unuse == 0) {
1272 ret = FRONTSWAP_PAGES_UNUSED;
1273 break;
1274 }
1275 }
1276
1277 start = indices[pvec.nr - 1];
1278 } while (true);
1279
1280 return ret;
1281}
1282
1283/*
1284 * Read all the shared memory data that resides in the swap
1285 * device 'type' back into memory, so the swap device can be
1286 * unused.
1287 */
1288int shmem_unuse(unsigned int type, bool frontswap,
1289 unsigned long *fs_pages_to_unuse)
1290{
1291 struct shmem_inode_info *info, *next;
1292 int error = 0;
1293
1294 if (list_empty(&shmem_swaplist))
1295 return 0;
1296
1297 mutex_lock(&shmem_swaplist_mutex);
1298 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1299 if (!info->swapped) {
1300 list_del_init(&info->swaplist);
1301 continue;
1302 }
1303 /*
1304 * Drop the swaplist mutex while searching the inode for swap;
1305 * but before doing so, make sure shmem_evict_inode() will not
1306 * remove placeholder inode from swaplist, nor let it be freed
1307 * (igrab() would protect from unlink, but not from unmount).
1308 */
1309 atomic_inc(&info->stop_eviction);
1310 mutex_unlock(&shmem_swaplist_mutex);
1311
1312 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1313 fs_pages_to_unuse);
1314 cond_resched();
1315
1316 mutex_lock(&shmem_swaplist_mutex);
1317 next = list_next_entry(info, swaplist);
1318 if (!info->swapped)
1319 list_del_init(&info->swaplist);
1320 if (atomic_dec_and_test(&info->stop_eviction))
1321 wake_up_var(&info->stop_eviction);
1322 if (error)
1323 break;
1324 }
1325 mutex_unlock(&shmem_swaplist_mutex);
1326
1327 return error;
1328}
1329
1330/*
1331 * Move the page from the page cache to the swap cache.
1332 */
1333static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1334{
1335 struct shmem_inode_info *info;
1336 struct address_space *mapping;
1337 struct inode *inode;
1338 swp_entry_t swap;
1339 pgoff_t index;
1340
1341 VM_BUG_ON_PAGE(PageCompound(page), page);
1342 BUG_ON(!PageLocked(page));
1343 mapping = page->mapping;
1344 index = page->index;
1345 inode = mapping->host;
1346 info = SHMEM_I(inode);
1347 if (info->flags & VM_LOCKED)
1348 goto redirty;
1349 if (!total_swap_pages)
1350 goto redirty;
1351
1352 /*
1353 * Our capabilities prevent regular writeback or sync from ever calling
1354 * shmem_writepage; but a stacking filesystem might use ->writepage of
1355 * its underlying filesystem, in which case tmpfs should write out to
1356 * swap only in response to memory pressure, and not for the writeback
1357 * threads or sync.
1358 */
1359 if (!wbc->for_reclaim) {
1360 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
1361 goto redirty;
1362 }
1363
1364 /*
1365 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1366 * value into swapfile.c, the only way we can correctly account for a
1367 * fallocated page arriving here is now to initialize it and write it.
1368 *
1369 * That's okay for a page already fallocated earlier, but if we have
1370 * not yet completed the fallocation, then (a) we want to keep track
1371 * of this page in case we have to undo it, and (b) it may not be a
1372 * good idea to continue anyway, once we're pushing into swap. So
1373 * reactivate the page, and let shmem_fallocate() quit when too many.
1374 */
1375 if (!PageUptodate(page)) {
1376 if (inode->i_private) {
1377 struct shmem_falloc *shmem_falloc;
1378 spin_lock(&inode->i_lock);
1379 shmem_falloc = inode->i_private;
1380 if (shmem_falloc &&
1381 !shmem_falloc->waitq &&
1382 index >= shmem_falloc->start &&
1383 index < shmem_falloc->next)
1384 shmem_falloc->nr_unswapped++;
1385 else
1386 shmem_falloc = NULL;
1387 spin_unlock(&inode->i_lock);
1388 if (shmem_falloc)
1389 goto redirty;
1390 }
1391 clear_highpage(page);
1392 flush_dcache_page(page);
1393 SetPageUptodate(page);
1394 }
1395
1396 swap = get_swap_page(page);
1397 if (!swap.val)
1398 goto redirty;
1399
1400 /*
1401 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1402 * if it's not already there. Do it now before the page is
1403 * moved to swap cache, when its pagelock no longer protects
1404 * the inode from eviction. But don't unlock the mutex until
1405 * we've incremented swapped, because shmem_unuse_inode() will
1406 * prune a !swapped inode from the swaplist under this mutex.
1407 */
1408 mutex_lock(&shmem_swaplist_mutex);
1409 if (list_empty(&info->swaplist))
1410 list_add(&info->swaplist, &shmem_swaplist);
1411
1412 if (add_to_swap_cache(page, swap,
1413 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1414 NULL) == 0) {
1415 spin_lock_irq(&info->lock);
1416 shmem_recalc_inode(inode);
1417 info->swapped++;
1418 spin_unlock_irq(&info->lock);
1419
1420 swap_shmem_alloc(swap);
1421 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1422
1423 mutex_unlock(&shmem_swaplist_mutex);
1424 BUG_ON(page_mapped(page));
1425 swap_writepage(page, wbc);
1426 return 0;
1427 }
1428
1429 mutex_unlock(&shmem_swaplist_mutex);
1430 put_swap_page(page, swap);
1431redirty:
1432 set_page_dirty(page);
1433 if (wbc->for_reclaim)
1434 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1435 unlock_page(page);
1436 return 0;
1437}
1438
1439#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1440static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1441{
1442 char buffer[64];
1443
1444 if (!mpol || mpol->mode == MPOL_DEFAULT)
1445 return; /* show nothing */
1446
1447 mpol_to_str(buffer, sizeof(buffer), mpol);
1448
1449 seq_printf(seq, ",mpol=%s", buffer);
1450}
1451
1452static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1453{
1454 struct mempolicy *mpol = NULL;
1455 if (sbinfo->mpol) {
1456 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1457 mpol = sbinfo->mpol;
1458 mpol_get(mpol);
1459 spin_unlock(&sbinfo->stat_lock);
1460 }
1461 return mpol;
1462}
1463#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1464static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1465{
1466}
1467static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1468{
1469 return NULL;
1470}
1471#endif /* CONFIG_NUMA && CONFIG_TMPFS */
1472#ifndef CONFIG_NUMA
1473#define vm_policy vm_private_data
1474#endif
1475
1476static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1477 struct shmem_inode_info *info, pgoff_t index)
1478{
1479 /* Create a pseudo vma that just contains the policy */
1480 vma_init(vma, NULL);
1481 /* Bias interleave by inode number to distribute better across nodes */
1482 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1483 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1484}
1485
1486static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1487{
1488 /* Drop reference taken by mpol_shared_policy_lookup() */
1489 mpol_cond_put(vma->vm_policy);
1490}
1491
1492static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1493 struct shmem_inode_info *info, pgoff_t index)
1494{
1495 struct vm_area_struct pvma;
1496 struct page *page;
1497 struct vm_fault vmf = {
1498 .vma = &pvma,
1499 };
1500
1501 shmem_pseudo_vma_init(&pvma, info, index);
1502 page = swap_cluster_readahead(swap, gfp, &vmf);
1503 shmem_pseudo_vma_destroy(&pvma);
1504
1505 return page;
1506}
1507
1508/*
1509 * Make sure huge_gfp is always more limited than limit_gfp.
1510 * Some of the flags set permissions, while others set limitations.
1511 */
1512static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1513{
1514 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1515 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1516 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1517 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1518
1519 /* Allow allocations only from the originally specified zones. */
1520 result |= zoneflags;
1521
1522 /*
1523 * Minimize the result gfp by taking the union with the deny flags,
1524 * and the intersection of the allow flags.
1525 */
1526 result |= (limit_gfp & denyflags);
1527 result |= (huge_gfp & limit_gfp) & allowflags;
1528
1529 return result;
1530}
1531
1532static struct page *shmem_alloc_hugepage(gfp_t gfp,
1533 struct shmem_inode_info *info, pgoff_t index)
1534{
1535 struct vm_area_struct pvma;
1536 struct address_space *mapping = info->vfs_inode.i_mapping;
1537 pgoff_t hindex;
1538 struct page *page;
1539
1540 hindex = round_down(index, HPAGE_PMD_NR);
1541 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1542 XA_PRESENT))
1543 return NULL;
1544
1545 shmem_pseudo_vma_init(&pvma, info, hindex);
1546 page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(),
1547 true);
1548 shmem_pseudo_vma_destroy(&pvma);
1549 if (page)
1550 prep_transhuge_page(page);
1551 else
1552 count_vm_event(THP_FILE_FALLBACK);
1553 return page;
1554}
1555
1556static struct page *shmem_alloc_page(gfp_t gfp,
1557 struct shmem_inode_info *info, pgoff_t index)
1558{
1559 struct vm_area_struct pvma;
1560 struct page *page;
1561
1562 shmem_pseudo_vma_init(&pvma, info, index);
1563 page = alloc_page_vma(gfp, &pvma, 0);
1564 shmem_pseudo_vma_destroy(&pvma);
1565
1566 return page;
1567}
1568
1569static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1570 struct inode *inode,
1571 pgoff_t index, bool huge)
1572{
1573 struct shmem_inode_info *info = SHMEM_I(inode);
1574 struct page *page;
1575 int nr;
1576 int err = -ENOSPC;
1577
1578 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1579 huge = false;
1580 nr = huge ? HPAGE_PMD_NR : 1;
1581
1582 if (!shmem_inode_acct_block(inode, nr))
1583 goto failed;
1584
1585 if (huge)
1586 page = shmem_alloc_hugepage(gfp, info, index);
1587 else
1588 page = shmem_alloc_page(gfp, info, index);
1589 if (page) {
1590 __SetPageLocked(page);
1591 __SetPageSwapBacked(page);
1592 return page;
1593 }
1594
1595 err = -ENOMEM;
1596 shmem_inode_unacct_blocks(inode, nr);
1597failed:
1598 return ERR_PTR(err);
1599}
1600
1601/*
1602 * When a page is moved from swapcache to shmem filecache (either by the
1603 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1604 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1605 * ignorance of the mapping it belongs to. If that mapping has special
1606 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1607 * we may need to copy to a suitable page before moving to filecache.
1608 *
1609 * In a future release, this may well be extended to respect cpuset and
1610 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1611 * but for now it is a simple matter of zone.
1612 */
1613static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1614{
1615 return page_zonenum(page) > gfp_zone(gfp);
1616}
1617
1618static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1619 struct shmem_inode_info *info, pgoff_t index)
1620{
1621 struct page *oldpage, *newpage;
1622 struct address_space *swap_mapping;
1623 swp_entry_t entry;
1624 pgoff_t swap_index;
1625 int error;
1626
1627 oldpage = *pagep;
1628 entry.val = page_private(oldpage);
1629 swap_index = swp_offset(entry);
1630 swap_mapping = page_mapping(oldpage);
1631
1632 /*
1633 * We have arrived here because our zones are constrained, so don't
1634 * limit chance of success by further cpuset and node constraints.
1635 */
1636 gfp &= ~GFP_CONSTRAINT_MASK;
1637 newpage = shmem_alloc_page(gfp, info, index);
1638 if (!newpage)
1639 return -ENOMEM;
1640
1641 get_page(newpage);
1642 copy_highpage(newpage, oldpage);
1643 flush_dcache_page(newpage);
1644
1645 __SetPageLocked(newpage);
1646 __SetPageSwapBacked(newpage);
1647 SetPageUptodate(newpage);
1648 set_page_private(newpage, entry.val);
1649 SetPageSwapCache(newpage);
1650
1651 /*
1652 * Our caller will very soon move newpage out of swapcache, but it's
1653 * a nice clean interface for us to replace oldpage by newpage there.
1654 */
1655 xa_lock_irq(&swap_mapping->i_pages);
1656 error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1657 if (!error) {
1658 mem_cgroup_migrate(oldpage, newpage);
1659 __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
1660 __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
1661 }
1662 xa_unlock_irq(&swap_mapping->i_pages);
1663
1664 if (unlikely(error)) {
1665 /*
1666 * Is this possible? I think not, now that our callers check
1667 * both PageSwapCache and page_private after getting page lock;
1668 * but be defensive. Reverse old to newpage for clear and free.
1669 */
1670 oldpage = newpage;
1671 } else {
1672 lru_cache_add(newpage);
1673 *pagep = newpage;
1674 }
1675
1676 ClearPageSwapCache(oldpage);
1677 set_page_private(oldpage, 0);
1678
1679 unlock_page(oldpage);
1680 put_page(oldpage);
1681 put_page(oldpage);
1682 return error;
1683}
1684
1685/*
1686 * Swap in the page pointed to by *pagep.
1687 * Caller has to make sure that *pagep contains a valid swapped page.
1688 * Returns 0 and the page in pagep if success. On failure, returns the
1689 * error code and NULL in *pagep.
1690 */
1691static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1692 struct page **pagep, enum sgp_type sgp,
1693 gfp_t gfp, struct vm_area_struct *vma,
1694 vm_fault_t *fault_type)
1695{
1696 struct address_space *mapping = inode->i_mapping;
1697 struct shmem_inode_info *info = SHMEM_I(inode);
1698 struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1699 struct page *page;
1700 swp_entry_t swap;
1701 int error;
1702
1703 VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1704 swap = radix_to_swp_entry(*pagep);
1705 *pagep = NULL;
1706
1707 /* Look it up and read it in.. */
1708 page = lookup_swap_cache(swap, NULL, 0);
1709 if (!page) {
1710 /* Or update major stats only when swapin succeeds?? */
1711 if (fault_type) {
1712 *fault_type |= VM_FAULT_MAJOR;
1713 count_vm_event(PGMAJFAULT);
1714 count_memcg_event_mm(charge_mm, PGMAJFAULT);
1715 }
1716 /* Here we actually start the io */
1717 page = shmem_swapin(swap, gfp, info, index);
1718 if (!page) {
1719 error = -ENOMEM;
1720 goto failed;
1721 }
1722 }
1723
1724 /* We have to do this with page locked to prevent races */
1725 lock_page(page);
1726 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1727 !shmem_confirm_swap(mapping, index, swap)) {
1728 error = -EEXIST;
1729 goto unlock;
1730 }
1731 if (!PageUptodate(page)) {
1732 error = -EIO;
1733 goto failed;
1734 }
1735 wait_on_page_writeback(page);
1736
1737 /*
1738 * Some architectures may have to restore extra metadata to the
1739 * physical page after reading from swap.
1740 */
1741 arch_swap_restore(swap, page);
1742
1743 if (shmem_should_replace_page(page, gfp)) {
1744 error = shmem_replace_page(&page, gfp, info, index);
1745 if (error)
1746 goto failed;
1747 }
1748
1749 error = shmem_add_to_page_cache(page, mapping, index,
1750 swp_to_radix_entry(swap), gfp,
1751 charge_mm);
1752 if (error)
1753 goto failed;
1754
1755 spin_lock_irq(&info->lock);
1756 info->swapped--;
1757 shmem_recalc_inode(inode);
1758 spin_unlock_irq(&info->lock);
1759
1760 if (sgp == SGP_WRITE)
1761 mark_page_accessed(page);
1762
1763 delete_from_swap_cache(page);
1764 set_page_dirty(page);
1765 swap_free(swap);
1766
1767 *pagep = page;
1768 return 0;
1769failed:
1770 if (!shmem_confirm_swap(mapping, index, swap))
1771 error = -EEXIST;
1772unlock:
1773 if (page) {
1774 unlock_page(page);
1775 put_page(page);
1776 }
1777
1778 return error;
1779}
1780
1781/*
1782 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1783 *
1784 * If we allocate a new one we do not mark it dirty. That's up to the
1785 * vm. If we swap it in we mark it dirty since we also free the swap
1786 * entry since a page cannot live in both the swap and page cache.
1787 *
1788 * vma, vmf, and fault_type are only supplied by shmem_fault:
1789 * otherwise they are NULL.
1790 */
1791static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1792 struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1793 struct vm_area_struct *vma, struct vm_fault *vmf,
1794 vm_fault_t *fault_type)
1795{
1796 struct address_space *mapping = inode->i_mapping;
1797 struct shmem_inode_info *info = SHMEM_I(inode);
1798 struct shmem_sb_info *sbinfo;
1799 struct mm_struct *charge_mm;
1800 struct page *page;
1801 enum sgp_type sgp_huge = sgp;
1802 pgoff_t hindex = index;
1803 gfp_t huge_gfp;
1804 int error;
1805 int once = 0;
1806 int alloced = 0;
1807
1808 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1809 return -EFBIG;
1810 if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1811 sgp = SGP_CACHE;
1812repeat:
1813 if (sgp <= SGP_CACHE &&
1814 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1815 return -EINVAL;
1816 }
1817
1818 sbinfo = SHMEM_SB(inode->i_sb);
1819 charge_mm = vma ? vma->vm_mm : NULL;
1820
1821 page = pagecache_get_page(mapping, index,
1822 FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0);
1823
1824 if (page && vma && userfaultfd_minor(vma)) {
1825 if (!xa_is_value(page)) {
1826 unlock_page(page);
1827 put_page(page);
1828 }
1829 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1830 return 0;
1831 }
1832
1833 if (xa_is_value(page)) {
1834 error = shmem_swapin_page(inode, index, &page,
1835 sgp, gfp, vma, fault_type);
1836 if (error == -EEXIST)
1837 goto repeat;
1838
1839 *pagep = page;
1840 return error;
1841 }
1842
1843 if (page)
1844 hindex = page->index;
1845 if (page && sgp == SGP_WRITE)
1846 mark_page_accessed(page);
1847
1848 /* fallocated page? */
1849 if (page && !PageUptodate(page)) {
1850 if (sgp != SGP_READ)
1851 goto clear;
1852 unlock_page(page);
1853 put_page(page);
1854 page = NULL;
1855 hindex = index;
1856 }
1857 if (page || sgp == SGP_READ)
1858 goto out;
1859
1860 /*
1861 * Fast cache lookup did not find it:
1862 * bring it back from swap or allocate.
1863 */
1864
1865 if (vma && userfaultfd_missing(vma)) {
1866 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1867 return 0;
1868 }
1869
1870 /* shmem_symlink() */
1871 if (!shmem_mapping(mapping))
1872 goto alloc_nohuge;
1873 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1874 goto alloc_nohuge;
1875 if (shmem_huge == SHMEM_HUGE_FORCE)
1876 goto alloc_huge;
1877 switch (sbinfo->huge) {
1878 case SHMEM_HUGE_NEVER:
1879 goto alloc_nohuge;
1880 case SHMEM_HUGE_WITHIN_SIZE: {
1881 loff_t i_size;
1882 pgoff_t off;
1883
1884 off = round_up(index, HPAGE_PMD_NR);
1885 i_size = round_up(i_size_read(inode), PAGE_SIZE);
1886 if (i_size >= HPAGE_PMD_SIZE &&
1887 i_size >> PAGE_SHIFT >= off)
1888 goto alloc_huge;
1889
1890 fallthrough;
1891 }
1892 case SHMEM_HUGE_ADVISE:
1893 if (sgp_huge == SGP_HUGE)
1894 goto alloc_huge;
1895 /* TODO: implement fadvise() hints */
1896 goto alloc_nohuge;
1897 }
1898
1899alloc_huge:
1900 huge_gfp = vma_thp_gfp_mask(vma);
1901 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1902 page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
1903 if (IS_ERR(page)) {
1904alloc_nohuge:
1905 page = shmem_alloc_and_acct_page(gfp, inode,
1906 index, false);
1907 }
1908 if (IS_ERR(page)) {
1909 int retry = 5;
1910
1911 error = PTR_ERR(page);
1912 page = NULL;
1913 if (error != -ENOSPC)
1914 goto unlock;
1915 /*
1916 * Try to reclaim some space by splitting a huge page
1917 * beyond i_size on the filesystem.
1918 */
1919 while (retry--) {
1920 int ret;
1921
1922 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1923 if (ret == SHRINK_STOP)
1924 break;
1925 if (ret)
1926 goto alloc_nohuge;
1927 }
1928 goto unlock;
1929 }
1930
1931 if (PageTransHuge(page))
1932 hindex = round_down(index, HPAGE_PMD_NR);
1933 else
1934 hindex = index;
1935
1936 if (sgp == SGP_WRITE)
1937 __SetPageReferenced(page);
1938
1939 error = shmem_add_to_page_cache(page, mapping, hindex,
1940 NULL, gfp & GFP_RECLAIM_MASK,
1941 charge_mm);
1942 if (error)
1943 goto unacct;
1944 lru_cache_add(page);
1945
1946 spin_lock_irq(&info->lock);
1947 info->alloced += compound_nr(page);
1948 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1949 shmem_recalc_inode(inode);
1950 spin_unlock_irq(&info->lock);
1951 alloced = true;
1952
1953 if (PageTransHuge(page) &&
1954 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1955 hindex + HPAGE_PMD_NR - 1) {
1956 /*
1957 * Part of the huge page is beyond i_size: subject
1958 * to shrink under memory pressure.
1959 */
1960 spin_lock(&sbinfo->shrinklist_lock);
1961 /*
1962 * _careful to defend against unlocked access to
1963 * ->shrink_list in shmem_unused_huge_shrink()
1964 */
1965 if (list_empty_careful(&info->shrinklist)) {
1966 list_add_tail(&info->shrinklist,
1967 &sbinfo->shrinklist);
1968 sbinfo->shrinklist_len++;
1969 }
1970 spin_unlock(&sbinfo->shrinklist_lock);
1971 }
1972
1973 /*
1974 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1975 */
1976 if (sgp == SGP_FALLOC)
1977 sgp = SGP_WRITE;
1978clear:
1979 /*
1980 * Let SGP_WRITE caller clear ends if write does not fill page;
1981 * but SGP_FALLOC on a page fallocated earlier must initialize
1982 * it now, lest undo on failure cancel our earlier guarantee.
1983 */
1984 if (sgp != SGP_WRITE && !PageUptodate(page)) {
1985 int i;
1986
1987 for (i = 0; i < compound_nr(page); i++) {
1988 clear_highpage(page + i);
1989 flush_dcache_page(page + i);
1990 }
1991 SetPageUptodate(page);
1992 }
1993
1994 /* Perhaps the file has been truncated since we checked */
1995 if (sgp <= SGP_CACHE &&
1996 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1997 if (alloced) {
1998 ClearPageDirty(page);
1999 delete_from_page_cache(page);
2000 spin_lock_irq(&info->lock);
2001 shmem_recalc_inode(inode);
2002 spin_unlock_irq(&info->lock);
2003 }
2004 error = -EINVAL;
2005 goto unlock;
2006 }
2007out:
2008 *pagep = page + index - hindex;
2009 return 0;
2010
2011 /*
2012 * Error recovery.
2013 */
2014unacct:
2015 shmem_inode_unacct_blocks(inode, compound_nr(page));
2016
2017 if (PageTransHuge(page)) {
2018 unlock_page(page);
2019 put_page(page);
2020 goto alloc_nohuge;
2021 }
2022unlock:
2023 if (page) {
2024 unlock_page(page);
2025 put_page(page);
2026 }
2027 if (error == -ENOSPC && !once++) {
2028 spin_lock_irq(&info->lock);
2029 shmem_recalc_inode(inode);
2030 spin_unlock_irq(&info->lock);
2031 goto repeat;
2032 }
2033 if (error == -EEXIST)
2034 goto repeat;
2035 return error;
2036}
2037
2038/*
2039 * This is like autoremove_wake_function, but it removes the wait queue
2040 * entry unconditionally - even if something else had already woken the
2041 * target.
2042 */
2043static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2044{
2045 int ret = default_wake_function(wait, mode, sync, key);
2046 list_del_init(&wait->entry);
2047 return ret;
2048}
2049
2050static vm_fault_t shmem_fault(struct vm_fault *vmf)
2051{
2052 struct vm_area_struct *vma = vmf->vma;
2053 struct inode *inode = file_inode(vma->vm_file);
2054 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2055 enum sgp_type sgp;
2056 int err;
2057 vm_fault_t ret = VM_FAULT_LOCKED;
2058
2059 /*
2060 * Trinity finds that probing a hole which tmpfs is punching can
2061 * prevent the hole-punch from ever completing: which in turn
2062 * locks writers out with its hold on i_mutex. So refrain from
2063 * faulting pages into the hole while it's being punched. Although
2064 * shmem_undo_range() does remove the additions, it may be unable to
2065 * keep up, as each new page needs its own unmap_mapping_range() call,
2066 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2067 *
2068 * It does not matter if we sometimes reach this check just before the
2069 * hole-punch begins, so that one fault then races with the punch:
2070 * we just need to make racing faults a rare case.
2071 *
2072 * The implementation below would be much simpler if we just used a
2073 * standard mutex or completion: but we cannot take i_mutex in fault,
2074 * and bloating every shmem inode for this unlikely case would be sad.
2075 */
2076 if (unlikely(inode->i_private)) {
2077 struct shmem_falloc *shmem_falloc;
2078
2079 spin_lock(&inode->i_lock);
2080 shmem_falloc = inode->i_private;
2081 if (shmem_falloc &&
2082 shmem_falloc->waitq &&
2083 vmf->pgoff >= shmem_falloc->start &&
2084 vmf->pgoff < shmem_falloc->next) {
2085 struct file *fpin;
2086 wait_queue_head_t *shmem_falloc_waitq;
2087 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2088
2089 ret = VM_FAULT_NOPAGE;
2090 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2091 if (fpin)
2092 ret = VM_FAULT_RETRY;
2093
2094 shmem_falloc_waitq = shmem_falloc->waitq;
2095 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2096 TASK_UNINTERRUPTIBLE);
2097 spin_unlock(&inode->i_lock);
2098 schedule();
2099
2100 /*
2101 * shmem_falloc_waitq points into the shmem_fallocate()
2102 * stack of the hole-punching task: shmem_falloc_waitq
2103 * is usually invalid by the time we reach here, but
2104 * finish_wait() does not dereference it in that case;
2105 * though i_lock needed lest racing with wake_up_all().
2106 */
2107 spin_lock(&inode->i_lock);
2108 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2109 spin_unlock(&inode->i_lock);
2110
2111 if (fpin)
2112 fput(fpin);
2113 return ret;
2114 }
2115 spin_unlock(&inode->i_lock);
2116 }
2117
2118 sgp = SGP_CACHE;
2119
2120 if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2121 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2122 sgp = SGP_NOHUGE;
2123 else if (vma->vm_flags & VM_HUGEPAGE)
2124 sgp = SGP_HUGE;
2125
2126 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2127 gfp, vma, vmf, &ret);
2128 if (err)
2129 return vmf_error(err);
2130 return ret;
2131}
2132
2133unsigned long shmem_get_unmapped_area(struct file *file,
2134 unsigned long uaddr, unsigned long len,
2135 unsigned long pgoff, unsigned long flags)
2136{
2137 unsigned long (*get_area)(struct file *,
2138 unsigned long, unsigned long, unsigned long, unsigned long);
2139 unsigned long addr;
2140 unsigned long offset;
2141 unsigned long inflated_len;
2142 unsigned long inflated_addr;
2143 unsigned long inflated_offset;
2144
2145 if (len > TASK_SIZE)
2146 return -ENOMEM;
2147
2148 get_area = current->mm->get_unmapped_area;
2149 addr = get_area(file, uaddr, len, pgoff, flags);
2150
2151 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2152 return addr;
2153 if (IS_ERR_VALUE(addr))
2154 return addr;
2155 if (addr & ~PAGE_MASK)
2156 return addr;
2157 if (addr > TASK_SIZE - len)
2158 return addr;
2159
2160 if (shmem_huge == SHMEM_HUGE_DENY)
2161 return addr;
2162 if (len < HPAGE_PMD_SIZE)
2163 return addr;
2164 if (flags & MAP_FIXED)
2165 return addr;
2166 /*
2167 * Our priority is to support MAP_SHARED mapped hugely;
2168 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2169 * But if caller specified an address hint and we allocated area there
2170 * successfully, respect that as before.
2171 */
2172 if (uaddr == addr)
2173 return addr;
2174
2175 if (shmem_huge != SHMEM_HUGE_FORCE) {
2176 struct super_block *sb;
2177
2178 if (file) {
2179 VM_BUG_ON(file->f_op != &shmem_file_operations);
2180 sb = file_inode(file)->i_sb;
2181 } else {
2182 /*
2183 * Called directly from mm/mmap.c, or drivers/char/mem.c
2184 * for "/dev/zero", to create a shared anonymous object.
2185 */
2186 if (IS_ERR(shm_mnt))
2187 return addr;
2188 sb = shm_mnt->mnt_sb;
2189 }
2190 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2191 return addr;
2192 }
2193
2194 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2195 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2196 return addr;
2197 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2198 return addr;
2199
2200 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2201 if (inflated_len > TASK_SIZE)
2202 return addr;
2203 if (inflated_len < len)
2204 return addr;
2205
2206 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2207 if (IS_ERR_VALUE(inflated_addr))
2208 return addr;
2209 if (inflated_addr & ~PAGE_MASK)
2210 return addr;
2211
2212 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2213 inflated_addr += offset - inflated_offset;
2214 if (inflated_offset > offset)
2215 inflated_addr += HPAGE_PMD_SIZE;
2216
2217 if (inflated_addr > TASK_SIZE - len)
2218 return addr;
2219 return inflated_addr;
2220}
2221
2222#ifdef CONFIG_NUMA
2223static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2224{
2225 struct inode *inode = file_inode(vma->vm_file);
2226 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2227}
2228
2229static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2230 unsigned long addr)
2231{
2232 struct inode *inode = file_inode(vma->vm_file);
2233 pgoff_t index;
2234
2235 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2236 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2237}
2238#endif
2239
2240int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2241{
2242 struct inode *inode = file_inode(file);
2243 struct shmem_inode_info *info = SHMEM_I(inode);
2244 int retval = -ENOMEM;
2245
2246 /*
2247 * What serializes the accesses to info->flags?
2248 * ipc_lock_object() when called from shmctl_do_lock(),
2249 * no serialization needed when called from shm_destroy().
2250 */
2251 if (lock && !(info->flags & VM_LOCKED)) {
2252 if (!user_shm_lock(inode->i_size, ucounts))
2253 goto out_nomem;
2254 info->flags |= VM_LOCKED;
2255 mapping_set_unevictable(file->f_mapping);
2256 }
2257 if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2258 user_shm_unlock(inode->i_size, ucounts);
2259 info->flags &= ~VM_LOCKED;
2260 mapping_clear_unevictable(file->f_mapping);
2261 }
2262 retval = 0;
2263
2264out_nomem:
2265 return retval;
2266}
2267
2268static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2269{
2270 struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2271 int ret;
2272
2273 ret = seal_check_future_write(info->seals, vma);
2274 if (ret)
2275 return ret;
2276
2277 /* arm64 - allow memory tagging on RAM-based files */
2278 vma->vm_flags |= VM_MTE_ALLOWED;
2279
2280 file_accessed(file);
2281 vma->vm_ops = &shmem_vm_ops;
2282 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2283 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2284 (vma->vm_end & HPAGE_PMD_MASK)) {
2285 khugepaged_enter(vma, vma->vm_flags);
2286 }
2287 return 0;
2288}
2289
2290static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2291 umode_t mode, dev_t dev, unsigned long flags)
2292{
2293 struct inode *inode;
2294 struct shmem_inode_info *info;
2295 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2296 ino_t ino;
2297
2298 if (shmem_reserve_inode(sb, &ino))
2299 return NULL;
2300
2301 inode = new_inode(sb);
2302 if (inode) {
2303 inode->i_ino = ino;
2304 inode_init_owner(&init_user_ns, inode, dir, mode);
2305 inode->i_blocks = 0;
2306 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2307 inode->i_generation = prandom_u32();
2308 info = SHMEM_I(inode);
2309 memset(info, 0, (char *)inode - (char *)info);
2310 spin_lock_init(&info->lock);
2311 atomic_set(&info->stop_eviction, 0);
2312 info->seals = F_SEAL_SEAL;
2313 info->flags = flags & VM_NORESERVE;
2314 INIT_LIST_HEAD(&info->shrinklist);
2315 INIT_LIST_HEAD(&info->swaplist);
2316 simple_xattrs_init(&info->xattrs);
2317 cache_no_acl(inode);
2318
2319 switch (mode & S_IFMT) {
2320 default:
2321 inode->i_op = &shmem_special_inode_operations;
2322 init_special_inode(inode, mode, dev);
2323 break;
2324 case S_IFREG:
2325 inode->i_mapping->a_ops = &shmem_aops;
2326 inode->i_op = &shmem_inode_operations;
2327 inode->i_fop = &shmem_file_operations;
2328 mpol_shared_policy_init(&info->policy,
2329 shmem_get_sbmpol(sbinfo));
2330 break;
2331 case S_IFDIR:
2332 inc_nlink(inode);
2333 /* Some things misbehave if size == 0 on a directory */
2334 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2335 inode->i_op = &shmem_dir_inode_operations;
2336 inode->i_fop = &simple_dir_operations;
2337 break;
2338 case S_IFLNK:
2339 /*
2340 * Must not load anything in the rbtree,
2341 * mpol_free_shared_policy will not be called.
2342 */
2343 mpol_shared_policy_init(&info->policy, NULL);
2344 break;
2345 }
2346
2347 lockdep_annotate_inode_mutex_key(inode);
2348 } else
2349 shmem_free_inode(sb);
2350 return inode;
2351}
2352
2353#ifdef CONFIG_USERFAULTFD
2354int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2355 pmd_t *dst_pmd,
2356 struct vm_area_struct *dst_vma,
2357 unsigned long dst_addr,
2358 unsigned long src_addr,
2359 bool zeropage,
2360 struct page **pagep)
2361{
2362 struct inode *inode = file_inode(dst_vma->vm_file);
2363 struct shmem_inode_info *info = SHMEM_I(inode);
2364 struct address_space *mapping = inode->i_mapping;
2365 gfp_t gfp = mapping_gfp_mask(mapping);
2366 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2367 void *page_kaddr;
2368 struct page *page;
2369 int ret;
2370 pgoff_t max_off;
2371
2372 if (!shmem_inode_acct_block(inode, 1)) {
2373 /*
2374 * We may have got a page, returned -ENOENT triggering a retry,
2375 * and now we find ourselves with -ENOMEM. Release the page, to
2376 * avoid a BUG_ON in our caller.
2377 */
2378 if (unlikely(*pagep)) {
2379 put_page(*pagep);
2380 *pagep = NULL;
2381 }
2382 return -ENOMEM;
2383 }
2384
2385 if (!*pagep) {
2386 ret = -ENOMEM;
2387 page = shmem_alloc_page(gfp, info, pgoff);
2388 if (!page)
2389 goto out_unacct_blocks;
2390
2391 if (!zeropage) { /* COPY */
2392 page_kaddr = kmap_atomic(page);
2393 ret = copy_from_user(page_kaddr,
2394 (const void __user *)src_addr,
2395 PAGE_SIZE);
2396 kunmap_atomic(page_kaddr);
2397
2398 /* fallback to copy_from_user outside mmap_lock */
2399 if (unlikely(ret)) {
2400 *pagep = page;
2401 ret = -ENOENT;
2402 /* don't free the page */
2403 goto out_unacct_blocks;
2404 }
2405 } else { /* ZEROPAGE */
2406 clear_highpage(page);
2407 }
2408 } else {
2409 page = *pagep;
2410 *pagep = NULL;
2411 }
2412
2413 VM_BUG_ON(PageLocked(page));
2414 VM_BUG_ON(PageSwapBacked(page));
2415 __SetPageLocked(page);
2416 __SetPageSwapBacked(page);
2417 __SetPageUptodate(page);
2418
2419 ret = -EFAULT;
2420 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2421 if (unlikely(pgoff >= max_off))
2422 goto out_release;
2423
2424 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2425 gfp & GFP_RECLAIM_MASK, dst_mm);
2426 if (ret)
2427 goto out_release;
2428
2429 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
2430 page, true, false);
2431 if (ret)
2432 goto out_delete_from_cache;
2433
2434 spin_lock_irq(&info->lock);
2435 info->alloced++;
2436 inode->i_blocks += BLOCKS_PER_PAGE;
2437 shmem_recalc_inode(inode);
2438 spin_unlock_irq(&info->lock);
2439
2440 SetPageDirty(page);
2441 unlock_page(page);
2442 return 0;
2443out_delete_from_cache:
2444 delete_from_page_cache(page);
2445out_release:
2446 unlock_page(page);
2447 put_page(page);
2448out_unacct_blocks:
2449 shmem_inode_unacct_blocks(inode, 1);
2450 return ret;
2451}
2452#endif /* CONFIG_USERFAULTFD */
2453
2454#ifdef CONFIG_TMPFS
2455static const struct inode_operations shmem_symlink_inode_operations;
2456static const struct inode_operations shmem_short_symlink_operations;
2457
2458#ifdef CONFIG_TMPFS_XATTR
2459static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2460#else
2461#define shmem_initxattrs NULL
2462#endif
2463
2464static int
2465shmem_write_begin(struct file *file, struct address_space *mapping,
2466 loff_t pos, unsigned len, unsigned flags,
2467 struct page **pagep, void **fsdata)
2468{
2469 struct inode *inode = mapping->host;
2470 struct shmem_inode_info *info = SHMEM_I(inode);
2471 pgoff_t index = pos >> PAGE_SHIFT;
2472
2473 /* i_mutex is held by caller */
2474 if (unlikely(info->seals & (F_SEAL_GROW |
2475 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2476 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2477 return -EPERM;
2478 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2479 return -EPERM;
2480 }
2481
2482 return shmem_getpage(inode, index, pagep, SGP_WRITE);
2483}
2484
2485static int
2486shmem_write_end(struct file *file, struct address_space *mapping,
2487 loff_t pos, unsigned len, unsigned copied,
2488 struct page *page, void *fsdata)
2489{
2490 struct inode *inode = mapping->host;
2491
2492 if (pos + copied > inode->i_size)
2493 i_size_write(inode, pos + copied);
2494
2495 if (!PageUptodate(page)) {
2496 struct page *head = compound_head(page);
2497 if (PageTransCompound(page)) {
2498 int i;
2499
2500 for (i = 0; i < HPAGE_PMD_NR; i++) {
2501 if (head + i == page)
2502 continue;
2503 clear_highpage(head + i);
2504 flush_dcache_page(head + i);
2505 }
2506 }
2507 if (copied < PAGE_SIZE) {
2508 unsigned from = pos & (PAGE_SIZE - 1);
2509 zero_user_segments(page, 0, from,
2510 from + copied, PAGE_SIZE);
2511 }
2512 SetPageUptodate(head);
2513 }
2514 set_page_dirty(page);
2515 unlock_page(page);
2516 put_page(page);
2517
2518 return copied;
2519}
2520
2521static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2522{
2523 struct file *file = iocb->ki_filp;
2524 struct inode *inode = file_inode(file);
2525 struct address_space *mapping = inode->i_mapping;
2526 pgoff_t index;
2527 unsigned long offset;
2528 enum sgp_type sgp = SGP_READ;
2529 int error = 0;
2530 ssize_t retval = 0;
2531 loff_t *ppos = &iocb->ki_pos;
2532
2533 /*
2534 * Might this read be for a stacking filesystem? Then when reading
2535 * holes of a sparse file, we actually need to allocate those pages,
2536 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2537 */
2538 if (!iter_is_iovec(to))
2539 sgp = SGP_CACHE;
2540
2541 index = *ppos >> PAGE_SHIFT;
2542 offset = *ppos & ~PAGE_MASK;
2543
2544 for (;;) {
2545 struct page *page = NULL;
2546 pgoff_t end_index;
2547 unsigned long nr, ret;
2548 loff_t i_size = i_size_read(inode);
2549
2550 end_index = i_size >> PAGE_SHIFT;
2551 if (index > end_index)
2552 break;
2553 if (index == end_index) {
2554 nr = i_size & ~PAGE_MASK;
2555 if (nr <= offset)
2556 break;
2557 }
2558
2559 error = shmem_getpage(inode, index, &page, sgp);
2560 if (error) {
2561 if (error == -EINVAL)
2562 error = 0;
2563 break;
2564 }
2565 if (page) {
2566 if (sgp == SGP_CACHE)
2567 set_page_dirty(page);
2568 unlock_page(page);
2569 }
2570
2571 /*
2572 * We must evaluate after, since reads (unlike writes)
2573 * are called without i_mutex protection against truncate
2574 */
2575 nr = PAGE_SIZE;
2576 i_size = i_size_read(inode);
2577 end_index = i_size >> PAGE_SHIFT;
2578 if (index == end_index) {
2579 nr = i_size & ~PAGE_MASK;
2580 if (nr <= offset) {
2581 if (page)
2582 put_page(page);
2583 break;
2584 }
2585 }
2586 nr -= offset;
2587
2588 if (page) {
2589 /*
2590 * If users can be writing to this page using arbitrary
2591 * virtual addresses, take care about potential aliasing
2592 * before reading the page on the kernel side.
2593 */
2594 if (mapping_writably_mapped(mapping))
2595 flush_dcache_page(page);
2596 /*
2597 * Mark the page accessed if we read the beginning.
2598 */
2599 if (!offset)
2600 mark_page_accessed(page);
2601 } else {
2602 page = ZERO_PAGE(0);
2603 get_page(page);
2604 }
2605
2606 /*
2607 * Ok, we have the page, and it's up-to-date, so
2608 * now we can copy it to user space...
2609 */
2610 ret = copy_page_to_iter(page, offset, nr, to);
2611 retval += ret;
2612 offset += ret;
2613 index += offset >> PAGE_SHIFT;
2614 offset &= ~PAGE_MASK;
2615
2616 put_page(page);
2617 if (!iov_iter_count(to))
2618 break;
2619 if (ret < nr) {
2620 error = -EFAULT;
2621 break;
2622 }
2623 cond_resched();
2624 }
2625
2626 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2627 file_accessed(file);
2628 return retval ? retval : error;
2629}
2630
2631static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2632{
2633 struct address_space *mapping = file->f_mapping;
2634 struct inode *inode = mapping->host;
2635
2636 if (whence != SEEK_DATA && whence != SEEK_HOLE)
2637 return generic_file_llseek_size(file, offset, whence,
2638 MAX_LFS_FILESIZE, i_size_read(inode));
2639 if (offset < 0)
2640 return -ENXIO;
2641
2642 inode_lock(inode);
2643 /* We're holding i_mutex so we can access i_size directly */
2644 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2645 if (offset >= 0)
2646 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2647 inode_unlock(inode);
2648 return offset;
2649}
2650
2651static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2652 loff_t len)
2653{
2654 struct inode *inode = file_inode(file);
2655 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2656 struct shmem_inode_info *info = SHMEM_I(inode);
2657 struct shmem_falloc shmem_falloc;
2658 pgoff_t start, index, end;
2659 int error;
2660
2661 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2662 return -EOPNOTSUPP;
2663
2664 inode_lock(inode);
2665
2666 if (mode & FALLOC_FL_PUNCH_HOLE) {
2667 struct address_space *mapping = file->f_mapping;
2668 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2669 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2670 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2671
2672 /* protected by i_mutex */
2673 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2674 error = -EPERM;
2675 goto out;
2676 }
2677
2678 shmem_falloc.waitq = &shmem_falloc_waitq;
2679 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2680 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2681 spin_lock(&inode->i_lock);
2682 inode->i_private = &shmem_falloc;
2683 spin_unlock(&inode->i_lock);
2684
2685 if ((u64)unmap_end > (u64)unmap_start)
2686 unmap_mapping_range(mapping, unmap_start,
2687 1 + unmap_end - unmap_start, 0);
2688 shmem_truncate_range(inode, offset, offset + len - 1);
2689 /* No need to unmap again: hole-punching leaves COWed pages */
2690
2691 spin_lock(&inode->i_lock);
2692 inode->i_private = NULL;
2693 wake_up_all(&shmem_falloc_waitq);
2694 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2695 spin_unlock(&inode->i_lock);
2696 error = 0;
2697 goto out;
2698 }
2699
2700 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2701 error = inode_newsize_ok(inode, offset + len);
2702 if (error)
2703 goto out;
2704
2705 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2706 error = -EPERM;
2707 goto out;
2708 }
2709
2710 start = offset >> PAGE_SHIFT;
2711 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2712 /* Try to avoid a swapstorm if len is impossible to satisfy */
2713 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2714 error = -ENOSPC;
2715 goto out;
2716 }
2717
2718 shmem_falloc.waitq = NULL;
2719 shmem_falloc.start = start;
2720 shmem_falloc.next = start;
2721 shmem_falloc.nr_falloced = 0;
2722 shmem_falloc.nr_unswapped = 0;
2723 spin_lock(&inode->i_lock);
2724 inode->i_private = &shmem_falloc;
2725 spin_unlock(&inode->i_lock);
2726
2727 for (index = start; index < end; index++) {
2728 struct page *page;
2729
2730 /*
2731 * Good, the fallocate(2) manpage permits EINTR: we may have
2732 * been interrupted because we are using up too much memory.
2733 */
2734 if (signal_pending(current))
2735 error = -EINTR;
2736 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2737 error = -ENOMEM;
2738 else
2739 error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2740 if (error) {
2741 /* Remove the !PageUptodate pages we added */
2742 if (index > start) {
2743 shmem_undo_range(inode,
2744 (loff_t)start << PAGE_SHIFT,
2745 ((loff_t)index << PAGE_SHIFT) - 1, true);
2746 }
2747 goto undone;
2748 }
2749
2750 /*
2751 * Inform shmem_writepage() how far we have reached.
2752 * No need for lock or barrier: we have the page lock.
2753 */
2754 shmem_falloc.next++;
2755 if (!PageUptodate(page))
2756 shmem_falloc.nr_falloced++;
2757
2758 /*
2759 * If !PageUptodate, leave it that way so that freeable pages
2760 * can be recognized if we need to rollback on error later.
2761 * But set_page_dirty so that memory pressure will swap rather
2762 * than free the pages we are allocating (and SGP_CACHE pages
2763 * might still be clean: we now need to mark those dirty too).
2764 */
2765 set_page_dirty(page);
2766 unlock_page(page);
2767 put_page(page);
2768 cond_resched();
2769 }
2770
2771 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2772 i_size_write(inode, offset + len);
2773 inode->i_ctime = current_time(inode);
2774undone:
2775 spin_lock(&inode->i_lock);
2776 inode->i_private = NULL;
2777 spin_unlock(&inode->i_lock);
2778out:
2779 inode_unlock(inode);
2780 return error;
2781}
2782
2783static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2784{
2785 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2786
2787 buf->f_type = TMPFS_MAGIC;
2788 buf->f_bsize = PAGE_SIZE;
2789 buf->f_namelen = NAME_MAX;
2790 if (sbinfo->max_blocks) {
2791 buf->f_blocks = sbinfo->max_blocks;
2792 buf->f_bavail =
2793 buf->f_bfree = sbinfo->max_blocks -
2794 percpu_counter_sum(&sbinfo->used_blocks);
2795 }
2796 if (sbinfo->max_inodes) {
2797 buf->f_files = sbinfo->max_inodes;
2798 buf->f_ffree = sbinfo->free_inodes;
2799 }
2800 /* else leave those fields 0 like simple_statfs */
2801
2802 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
2803
2804 return 0;
2805}
2806
2807/*
2808 * File creation. Allocate an inode, and we're done..
2809 */
2810static int
2811shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2812 struct dentry *dentry, umode_t mode, dev_t dev)
2813{
2814 struct inode *inode;
2815 int error = -ENOSPC;
2816
2817 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2818 if (inode) {
2819 error = simple_acl_create(dir, inode);
2820 if (error)
2821 goto out_iput;
2822 error = security_inode_init_security(inode, dir,
2823 &dentry->d_name,
2824 shmem_initxattrs, NULL);
2825 if (error && error != -EOPNOTSUPP)
2826 goto out_iput;
2827
2828 error = 0;
2829 dir->i_size += BOGO_DIRENT_SIZE;
2830 dir->i_ctime = dir->i_mtime = current_time(dir);
2831 d_instantiate(dentry, inode);
2832 dget(dentry); /* Extra count - pin the dentry in core */
2833 }
2834 return error;
2835out_iput:
2836 iput(inode);
2837 return error;
2838}
2839
2840static int
2841shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
2842 struct dentry *dentry, umode_t mode)
2843{
2844 struct inode *inode;
2845 int error = -ENOSPC;
2846
2847 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2848 if (inode) {
2849 error = security_inode_init_security(inode, dir,
2850 NULL,
2851 shmem_initxattrs, NULL);
2852 if (error && error != -EOPNOTSUPP)
2853 goto out_iput;
2854 error = simple_acl_create(dir, inode);
2855 if (error)
2856 goto out_iput;
2857 d_tmpfile(dentry, inode);
2858 }
2859 return error;
2860out_iput:
2861 iput(inode);
2862 return error;
2863}
2864
2865static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2866 struct dentry *dentry, umode_t mode)
2867{
2868 int error;
2869
2870 if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2871 mode | S_IFDIR, 0)))
2872 return error;
2873 inc_nlink(dir);
2874 return 0;
2875}
2876
2877static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2878 struct dentry *dentry, umode_t mode, bool excl)
2879{
2880 return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
2881}
2882
2883/*
2884 * Link a file..
2885 */
2886static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2887{
2888 struct inode *inode = d_inode(old_dentry);
2889 int ret = 0;
2890
2891 /*
2892 * No ordinary (disk based) filesystem counts links as inodes;
2893 * but each new link needs a new dentry, pinning lowmem, and
2894 * tmpfs dentries cannot be pruned until they are unlinked.
2895 * But if an O_TMPFILE file is linked into the tmpfs, the
2896 * first link must skip that, to get the accounting right.
2897 */
2898 if (inode->i_nlink) {
2899 ret = shmem_reserve_inode(inode->i_sb, NULL);
2900 if (ret)
2901 goto out;
2902 }
2903
2904 dir->i_size += BOGO_DIRENT_SIZE;
2905 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2906 inc_nlink(inode);
2907 ihold(inode); /* New dentry reference */
2908 dget(dentry); /* Extra pinning count for the created dentry */
2909 d_instantiate(dentry, inode);
2910out:
2911 return ret;
2912}
2913
2914static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2915{
2916 struct inode *inode = d_inode(dentry);
2917
2918 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2919 shmem_free_inode(inode->i_sb);
2920
2921 dir->i_size -= BOGO_DIRENT_SIZE;
2922 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2923 drop_nlink(inode);
2924 dput(dentry); /* Undo the count from "create" - this does all the work */
2925 return 0;
2926}
2927
2928static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2929{
2930 if (!simple_empty(dentry))
2931 return -ENOTEMPTY;
2932
2933 drop_nlink(d_inode(dentry));
2934 drop_nlink(dir);
2935 return shmem_unlink(dir, dentry);
2936}
2937
2938static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2939{
2940 bool old_is_dir = d_is_dir(old_dentry);
2941 bool new_is_dir = d_is_dir(new_dentry);
2942
2943 if (old_dir != new_dir && old_is_dir != new_is_dir) {
2944 if (old_is_dir) {
2945 drop_nlink(old_dir);
2946 inc_nlink(new_dir);
2947 } else {
2948 drop_nlink(new_dir);
2949 inc_nlink(old_dir);
2950 }
2951 }
2952 old_dir->i_ctime = old_dir->i_mtime =
2953 new_dir->i_ctime = new_dir->i_mtime =
2954 d_inode(old_dentry)->i_ctime =
2955 d_inode(new_dentry)->i_ctime = current_time(old_dir);
2956
2957 return 0;
2958}
2959
2960static int shmem_whiteout(struct user_namespace *mnt_userns,
2961 struct inode *old_dir, struct dentry *old_dentry)
2962{
2963 struct dentry *whiteout;
2964 int error;
2965
2966 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
2967 if (!whiteout)
2968 return -ENOMEM;
2969
2970 error = shmem_mknod(&init_user_ns, old_dir, whiteout,
2971 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
2972 dput(whiteout);
2973 if (error)
2974 return error;
2975
2976 /*
2977 * Cheat and hash the whiteout while the old dentry is still in
2978 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
2979 *
2980 * d_lookup() will consistently find one of them at this point,
2981 * not sure which one, but that isn't even important.
2982 */
2983 d_rehash(whiteout);
2984 return 0;
2985}
2986
2987/*
2988 * The VFS layer already does all the dentry stuff for rename,
2989 * we just have to decrement the usage count for the target if
2990 * it exists so that the VFS layer correctly free's it when it
2991 * gets overwritten.
2992 */
2993static int shmem_rename2(struct user_namespace *mnt_userns,
2994 struct inode *old_dir, struct dentry *old_dentry,
2995 struct inode *new_dir, struct dentry *new_dentry,
2996 unsigned int flags)
2997{
2998 struct inode *inode = d_inode(old_dentry);
2999 int they_are_dirs = S_ISDIR(inode->i_mode);
3000
3001 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3002 return -EINVAL;
3003
3004 if (flags & RENAME_EXCHANGE)
3005 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3006
3007 if (!simple_empty(new_dentry))
3008 return -ENOTEMPTY;
3009
3010 if (flags & RENAME_WHITEOUT) {
3011 int error;
3012
3013 error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
3014 if (error)
3015 return error;
3016 }
3017
3018 if (d_really_is_positive(new_dentry)) {
3019 (void) shmem_unlink(new_dir, new_dentry);
3020 if (they_are_dirs) {
3021 drop_nlink(d_inode(new_dentry));
3022 drop_nlink(old_dir);
3023 }
3024 } else if (they_are_dirs) {
3025 drop_nlink(old_dir);
3026 inc_nlink(new_dir);
3027 }
3028
3029 old_dir->i_size -= BOGO_DIRENT_SIZE;
3030 new_dir->i_size += BOGO_DIRENT_SIZE;
3031 old_dir->i_ctime = old_dir->i_mtime =
3032 new_dir->i_ctime = new_dir->i_mtime =
3033 inode->i_ctime = current_time(old_dir);
3034 return 0;
3035}
3036
3037static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3038 struct dentry *dentry, const char *symname)
3039{
3040 int error;
3041 int len;
3042 struct inode *inode;
3043 struct page *page;
3044
3045 len = strlen(symname) + 1;
3046 if (len > PAGE_SIZE)
3047 return -ENAMETOOLONG;
3048
3049 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3050 VM_NORESERVE);
3051 if (!inode)
3052 return -ENOSPC;
3053
3054 error = security_inode_init_security(inode, dir, &dentry->d_name,
3055 shmem_initxattrs, NULL);
3056 if (error && error != -EOPNOTSUPP) {
3057 iput(inode);
3058 return error;
3059 }
3060
3061 inode->i_size = len-1;
3062 if (len <= SHORT_SYMLINK_LEN) {
3063 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3064 if (!inode->i_link) {
3065 iput(inode);
3066 return -ENOMEM;
3067 }
3068 inode->i_op = &shmem_short_symlink_operations;
3069 } else {
3070 inode_nohighmem(inode);
3071 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3072 if (error) {
3073 iput(inode);
3074 return error;
3075 }
3076 inode->i_mapping->a_ops = &shmem_aops;
3077 inode->i_op = &shmem_symlink_inode_operations;
3078 memcpy(page_address(page), symname, len);
3079 SetPageUptodate(page);
3080 set_page_dirty(page);
3081 unlock_page(page);
3082 put_page(page);
3083 }
3084 dir->i_size += BOGO_DIRENT_SIZE;
3085 dir->i_ctime = dir->i_mtime = current_time(dir);
3086 d_instantiate(dentry, inode);
3087 dget(dentry);
3088 return 0;
3089}
3090
3091static void shmem_put_link(void *arg)
3092{
3093 mark_page_accessed(arg);
3094 put_page(arg);
3095}
3096
3097static const char *shmem_get_link(struct dentry *dentry,
3098 struct inode *inode,
3099 struct delayed_call *done)
3100{
3101 struct page *page = NULL;
3102 int error;
3103 if (!dentry) {
3104 page = find_get_page(inode->i_mapping, 0);
3105 if (!page)
3106 return ERR_PTR(-ECHILD);
3107 if (!PageUptodate(page)) {
3108 put_page(page);
3109 return ERR_PTR(-ECHILD);
3110 }
3111 } else {
3112 error = shmem_getpage(inode, 0, &page, SGP_READ);
3113 if (error)
3114 return ERR_PTR(error);
3115 unlock_page(page);
3116 }
3117 set_delayed_call(done, shmem_put_link, page);
3118 return page_address(page);
3119}
3120
3121#ifdef CONFIG_TMPFS_XATTR
3122/*
3123 * Superblocks without xattr inode operations may get some security.* xattr
3124 * support from the LSM "for free". As soon as we have any other xattrs
3125 * like ACLs, we also need to implement the security.* handlers at
3126 * filesystem level, though.
3127 */
3128
3129/*
3130 * Callback for security_inode_init_security() for acquiring xattrs.
3131 */
3132static int shmem_initxattrs(struct inode *inode,
3133 const struct xattr *xattr_array,
3134 void *fs_info)
3135{
3136 struct shmem_inode_info *info = SHMEM_I(inode);
3137 const struct xattr *xattr;
3138 struct simple_xattr *new_xattr;
3139 size_t len;
3140
3141 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3142 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3143 if (!new_xattr)
3144 return -ENOMEM;
3145
3146 len = strlen(xattr->name) + 1;
3147 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3148 GFP_KERNEL);
3149 if (!new_xattr->name) {
3150 kvfree(new_xattr);
3151 return -ENOMEM;
3152 }
3153
3154 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3155 XATTR_SECURITY_PREFIX_LEN);
3156 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3157 xattr->name, len);
3158
3159 simple_xattr_list_add(&info->xattrs, new_xattr);
3160 }
3161
3162 return 0;
3163}
3164
3165static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3166 struct dentry *unused, struct inode *inode,
3167 const char *name, void *buffer, size_t size)
3168{
3169 struct shmem_inode_info *info = SHMEM_I(inode);
3170
3171 name = xattr_full_name(handler, name);
3172 return simple_xattr_get(&info->xattrs, name, buffer, size);
3173}
3174
3175static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3176 struct user_namespace *mnt_userns,
3177 struct dentry *unused, struct inode *inode,
3178 const char *name, const void *value,
3179 size_t size, int flags)
3180{
3181 struct shmem_inode_info *info = SHMEM_I(inode);
3182
3183 name = xattr_full_name(handler, name);
3184 return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3185}
3186
3187static const struct xattr_handler shmem_security_xattr_handler = {
3188 .prefix = XATTR_SECURITY_PREFIX,
3189 .get = shmem_xattr_handler_get,
3190 .set = shmem_xattr_handler_set,
3191};
3192
3193static const struct xattr_handler shmem_trusted_xattr_handler = {
3194 .prefix = XATTR_TRUSTED_PREFIX,
3195 .get = shmem_xattr_handler_get,
3196 .set = shmem_xattr_handler_set,
3197};
3198
3199static const struct xattr_handler *shmem_xattr_handlers[] = {
3200#ifdef CONFIG_TMPFS_POSIX_ACL
3201 &posix_acl_access_xattr_handler,
3202 &posix_acl_default_xattr_handler,
3203#endif
3204 &shmem_security_xattr_handler,
3205 &shmem_trusted_xattr_handler,
3206 NULL
3207};
3208
3209static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3210{
3211 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3212 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3213}
3214#endif /* CONFIG_TMPFS_XATTR */
3215
3216static const struct inode_operations shmem_short_symlink_operations = {
3217 .get_link = simple_get_link,
3218#ifdef CONFIG_TMPFS_XATTR
3219 .listxattr = shmem_listxattr,
3220#endif
3221};
3222
3223static const struct inode_operations shmem_symlink_inode_operations = {
3224 .get_link = shmem_get_link,
3225#ifdef CONFIG_TMPFS_XATTR
3226 .listxattr = shmem_listxattr,
3227#endif
3228};
3229
3230static struct dentry *shmem_get_parent(struct dentry *child)
3231{
3232 return ERR_PTR(-ESTALE);
3233}
3234
3235static int shmem_match(struct inode *ino, void *vfh)
3236{
3237 __u32 *fh = vfh;
3238 __u64 inum = fh[2];
3239 inum = (inum << 32) | fh[1];
3240 return ino->i_ino == inum && fh[0] == ino->i_generation;
3241}
3242
3243/* Find any alias of inode, but prefer a hashed alias */
3244static struct dentry *shmem_find_alias(struct inode *inode)
3245{
3246 struct dentry *alias = d_find_alias(inode);
3247
3248 return alias ?: d_find_any_alias(inode);
3249}
3250
3251
3252static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3253 struct fid *fid, int fh_len, int fh_type)
3254{
3255 struct inode *inode;
3256 struct dentry *dentry = NULL;
3257 u64 inum;
3258
3259 if (fh_len < 3)
3260 return NULL;
3261
3262 inum = fid->raw[2];
3263 inum = (inum << 32) | fid->raw[1];
3264
3265 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3266 shmem_match, fid->raw);
3267 if (inode) {
3268 dentry = shmem_find_alias(inode);
3269 iput(inode);
3270 }
3271
3272 return dentry;
3273}
3274
3275static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3276 struct inode *parent)
3277{
3278 if (*len < 3) {
3279 *len = 3;
3280 return FILEID_INVALID;
3281 }
3282
3283 if (inode_unhashed(inode)) {
3284 /* Unfortunately insert_inode_hash is not idempotent,
3285 * so as we hash inodes here rather than at creation
3286 * time, we need a lock to ensure we only try
3287 * to do it once
3288 */
3289 static DEFINE_SPINLOCK(lock);
3290 spin_lock(&lock);
3291 if (inode_unhashed(inode))
3292 __insert_inode_hash(inode,
3293 inode->i_ino + inode->i_generation);
3294 spin_unlock(&lock);
3295 }
3296
3297 fh[0] = inode->i_generation;
3298 fh[1] = inode->i_ino;
3299 fh[2] = ((__u64)inode->i_ino) >> 32;
3300
3301 *len = 3;
3302 return 1;
3303}
3304
3305static const struct export_operations shmem_export_ops = {
3306 .get_parent = shmem_get_parent,
3307 .encode_fh = shmem_encode_fh,
3308 .fh_to_dentry = shmem_fh_to_dentry,
3309};
3310
3311enum shmem_param {
3312 Opt_gid,
3313 Opt_huge,
3314 Opt_mode,
3315 Opt_mpol,
3316 Opt_nr_blocks,
3317 Opt_nr_inodes,
3318 Opt_size,
3319 Opt_uid,
3320 Opt_inode32,
3321 Opt_inode64,
3322};
3323
3324static const struct constant_table shmem_param_enums_huge[] = {
3325 {"never", SHMEM_HUGE_NEVER },
3326 {"always", SHMEM_HUGE_ALWAYS },
3327 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3328 {"advise", SHMEM_HUGE_ADVISE },
3329 {}
3330};
3331
3332const struct fs_parameter_spec shmem_fs_parameters[] = {
3333 fsparam_u32 ("gid", Opt_gid),
3334 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
3335 fsparam_u32oct("mode", Opt_mode),
3336 fsparam_string("mpol", Opt_mpol),
3337 fsparam_string("nr_blocks", Opt_nr_blocks),
3338 fsparam_string("nr_inodes", Opt_nr_inodes),
3339 fsparam_string("size", Opt_size),
3340 fsparam_u32 ("uid", Opt_uid),
3341 fsparam_flag ("inode32", Opt_inode32),
3342 fsparam_flag ("inode64", Opt_inode64),
3343 {}
3344};
3345
3346static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3347{
3348 struct shmem_options *ctx = fc->fs_private;
3349 struct fs_parse_result result;
3350 unsigned long long size;
3351 char *rest;
3352 int opt;
3353
3354 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3355 if (opt < 0)
3356 return opt;
3357
3358 switch (opt) {
3359 case Opt_size:
3360 size = memparse(param->string, &rest);
3361 if (*rest == '%') {
3362 size <<= PAGE_SHIFT;
3363 size *= totalram_pages();
3364 do_div(size, 100);
3365 rest++;
3366 }
3367 if (*rest)
3368 goto bad_value;
3369 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3370 ctx->seen |= SHMEM_SEEN_BLOCKS;
3371 break;
3372 case Opt_nr_blocks:
3373 ctx->blocks = memparse(param->string, &rest);
3374 if (*rest)
3375 goto bad_value;
3376 ctx->seen |= SHMEM_SEEN_BLOCKS;
3377 break;
3378 case Opt_nr_inodes:
3379 ctx->inodes = memparse(param->string, &rest);
3380 if (*rest)
3381 goto bad_value;
3382 ctx->seen |= SHMEM_SEEN_INODES;
3383 break;
3384 case Opt_mode:
3385 ctx->mode = result.uint_32 & 07777;
3386 break;
3387 case Opt_uid:
3388 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3389 if (!uid_valid(ctx->uid))
3390 goto bad_value;
3391 break;
3392 case Opt_gid:
3393 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3394 if (!gid_valid(ctx->gid))
3395 goto bad_value;
3396 break;
3397 case Opt_huge:
3398 ctx->huge = result.uint_32;
3399 if (ctx->huge != SHMEM_HUGE_NEVER &&
3400 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3401 has_transparent_hugepage()))
3402 goto unsupported_parameter;
3403 ctx->seen |= SHMEM_SEEN_HUGE;
3404 break;
3405 case Opt_mpol:
3406 if (IS_ENABLED(CONFIG_NUMA)) {
3407 mpol_put(ctx->mpol);
3408 ctx->mpol = NULL;
3409 if (mpol_parse_str(param->string, &ctx->mpol))
3410 goto bad_value;
3411 break;
3412 }
3413 goto unsupported_parameter;
3414 case Opt_inode32:
3415 ctx->full_inums = false;
3416 ctx->seen |= SHMEM_SEEN_INUMS;
3417 break;
3418 case Opt_inode64:
3419 if (sizeof(ino_t) < 8) {
3420 return invalfc(fc,
3421 "Cannot use inode64 with <64bit inums in kernel\n");
3422 }
3423 ctx->full_inums = true;
3424 ctx->seen |= SHMEM_SEEN_INUMS;
3425 break;
3426 }
3427 return 0;
3428
3429unsupported_parameter:
3430 return invalfc(fc, "Unsupported parameter '%s'", param->key);
3431bad_value:
3432 return invalfc(fc, "Bad value for '%s'", param->key);
3433}
3434
3435static int shmem_parse_options(struct fs_context *fc, void *data)
3436{
3437 char *options = data;
3438
3439 if (options) {
3440 int err = security_sb_eat_lsm_opts(options, &fc->security);
3441 if (err)
3442 return err;
3443 }
3444
3445 while (options != NULL) {
3446 char *this_char = options;
3447 for (;;) {
3448 /*
3449 * NUL-terminate this option: unfortunately,
3450 * mount options form a comma-separated list,
3451 * but mpol's nodelist may also contain commas.
3452 */
3453 options = strchr(options, ',');
3454 if (options == NULL)
3455 break;
3456 options++;
3457 if (!isdigit(*options)) {
3458 options[-1] = '\0';
3459 break;
3460 }
3461 }
3462 if (*this_char) {
3463 char *value = strchr(this_char, '=');
3464 size_t len = 0;
3465 int err;
3466
3467 if (value) {
3468 *value++ = '\0';
3469 len = strlen(value);
3470 }
3471 err = vfs_parse_fs_string(fc, this_char, value, len);
3472 if (err < 0)
3473 return err;
3474 }
3475 }
3476 return 0;
3477}
3478
3479/*
3480 * Reconfigure a shmem filesystem.
3481 *
3482 * Note that we disallow change from limited->unlimited blocks/inodes while any
3483 * are in use; but we must separately disallow unlimited->limited, because in
3484 * that case we have no record of how much is already in use.
3485 */
3486static int shmem_reconfigure(struct fs_context *fc)
3487{
3488 struct shmem_options *ctx = fc->fs_private;
3489 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3490 unsigned long inodes;
3491 const char *err;
3492
3493 spin_lock(&sbinfo->stat_lock);
3494 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3495 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3496 if (!sbinfo->max_blocks) {
3497 err = "Cannot retroactively limit size";
3498 goto out;
3499 }
3500 if (percpu_counter_compare(&sbinfo->used_blocks,
3501 ctx->blocks) > 0) {
3502 err = "Too small a size for current use";
3503 goto out;
3504 }
3505 }
3506 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3507 if (!sbinfo->max_inodes) {
3508 err = "Cannot retroactively limit inodes";
3509 goto out;
3510 }
3511 if (ctx->inodes < inodes) {
3512 err = "Too few inodes for current use";
3513 goto out;
3514 }
3515 }
3516
3517 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3518 sbinfo->next_ino > UINT_MAX) {
3519 err = "Current inum too high to switch to 32-bit inums";
3520 goto out;
3521 }
3522
3523 if (ctx->seen & SHMEM_SEEN_HUGE)
3524 sbinfo->huge = ctx->huge;
3525 if (ctx->seen & SHMEM_SEEN_INUMS)
3526 sbinfo->full_inums = ctx->full_inums;
3527 if (ctx->seen & SHMEM_SEEN_BLOCKS)
3528 sbinfo->max_blocks = ctx->blocks;
3529 if (ctx->seen & SHMEM_SEEN_INODES) {
3530 sbinfo->max_inodes = ctx->inodes;
3531 sbinfo->free_inodes = ctx->inodes - inodes;
3532 }
3533
3534 /*
3535 * Preserve previous mempolicy unless mpol remount option was specified.
3536 */
3537 if (ctx->mpol) {
3538 mpol_put(sbinfo->mpol);
3539 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
3540 ctx->mpol = NULL;
3541 }
3542 spin_unlock(&sbinfo->stat_lock);
3543 return 0;
3544out:
3545 spin_unlock(&sbinfo->stat_lock);
3546 return invalfc(fc, "%s", err);
3547}
3548
3549static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3550{
3551 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3552
3553 if (sbinfo->max_blocks != shmem_default_max_blocks())
3554 seq_printf(seq, ",size=%luk",
3555 sbinfo->max_blocks << (PAGE_SHIFT - 10));
3556 if (sbinfo->max_inodes != shmem_default_max_inodes())
3557 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3558 if (sbinfo->mode != (0777 | S_ISVTX))
3559 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3560 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3561 seq_printf(seq, ",uid=%u",
3562 from_kuid_munged(&init_user_ns, sbinfo->uid));
3563 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3564 seq_printf(seq, ",gid=%u",
3565 from_kgid_munged(&init_user_ns, sbinfo->gid));
3566
3567 /*
3568 * Showing inode{64,32} might be useful even if it's the system default,
3569 * since then people don't have to resort to checking both here and
3570 * /proc/config.gz to confirm 64-bit inums were successfully applied
3571 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3572 *
3573 * We hide it when inode64 isn't the default and we are using 32-bit
3574 * inodes, since that probably just means the feature isn't even under
3575 * consideration.
3576 *
3577 * As such:
3578 *
3579 * +-----------------+-----------------+
3580 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
3581 * +------------------+-----------------+-----------------+
3582 * | full_inums=true | show | show |
3583 * | full_inums=false | show | hide |
3584 * +------------------+-----------------+-----------------+
3585 *
3586 */
3587 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3588 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3589#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3590 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3591 if (sbinfo->huge)
3592 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3593#endif
3594 shmem_show_mpol(seq, sbinfo->mpol);
3595 return 0;
3596}
3597
3598#endif /* CONFIG_TMPFS */
3599
3600static void shmem_put_super(struct super_block *sb)
3601{
3602 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3603
3604 free_percpu(sbinfo->ino_batch);
3605 percpu_counter_destroy(&sbinfo->used_blocks);
3606 mpol_put(sbinfo->mpol);
3607 kfree(sbinfo);
3608 sb->s_fs_info = NULL;
3609}
3610
3611static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3612{
3613 struct shmem_options *ctx = fc->fs_private;
3614 struct inode *inode;
3615 struct shmem_sb_info *sbinfo;
3616 int err = -ENOMEM;
3617
3618 /* Round up to L1_CACHE_BYTES to resist false sharing */
3619 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3620 L1_CACHE_BYTES), GFP_KERNEL);
3621 if (!sbinfo)
3622 return -ENOMEM;
3623
3624 sb->s_fs_info = sbinfo;
3625
3626#ifdef CONFIG_TMPFS
3627 /*
3628 * Per default we only allow half of the physical ram per
3629 * tmpfs instance, limiting inodes to one per page of lowmem;
3630 * but the internal instance is left unlimited.
3631 */
3632 if (!(sb->s_flags & SB_KERNMOUNT)) {
3633 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3634 ctx->blocks = shmem_default_max_blocks();
3635 if (!(ctx->seen & SHMEM_SEEN_INODES))
3636 ctx->inodes = shmem_default_max_inodes();
3637 if (!(ctx->seen & SHMEM_SEEN_INUMS))
3638 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3639 } else {
3640 sb->s_flags |= SB_NOUSER;
3641 }
3642 sb->s_export_op = &shmem_export_ops;
3643 sb->s_flags |= SB_NOSEC;
3644#else
3645 sb->s_flags |= SB_NOUSER;
3646#endif
3647 sbinfo->max_blocks = ctx->blocks;
3648 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3649 if (sb->s_flags & SB_KERNMOUNT) {
3650 sbinfo->ino_batch = alloc_percpu(ino_t);
3651 if (!sbinfo->ino_batch)
3652 goto failed;
3653 }
3654 sbinfo->uid = ctx->uid;
3655 sbinfo->gid = ctx->gid;
3656 sbinfo->full_inums = ctx->full_inums;
3657 sbinfo->mode = ctx->mode;
3658 sbinfo->huge = ctx->huge;
3659 sbinfo->mpol = ctx->mpol;
3660 ctx->mpol = NULL;
3661
3662 spin_lock_init(&sbinfo->stat_lock);
3663 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3664 goto failed;
3665 spin_lock_init(&sbinfo->shrinklist_lock);
3666 INIT_LIST_HEAD(&sbinfo->shrinklist);
3667
3668 sb->s_maxbytes = MAX_LFS_FILESIZE;
3669 sb->s_blocksize = PAGE_SIZE;
3670 sb->s_blocksize_bits = PAGE_SHIFT;
3671 sb->s_magic = TMPFS_MAGIC;
3672 sb->s_op = &shmem_ops;
3673 sb->s_time_gran = 1;
3674#ifdef CONFIG_TMPFS_XATTR
3675 sb->s_xattr = shmem_xattr_handlers;
3676#endif
3677#ifdef CONFIG_TMPFS_POSIX_ACL
3678 sb->s_flags |= SB_POSIXACL;
3679#endif
3680 uuid_gen(&sb->s_uuid);
3681
3682 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3683 if (!inode)
3684 goto failed;
3685 inode->i_uid = sbinfo->uid;
3686 inode->i_gid = sbinfo->gid;
3687 sb->s_root = d_make_root(inode);
3688 if (!sb->s_root)
3689 goto failed;
3690 return 0;
3691
3692failed:
3693 shmem_put_super(sb);
3694 return err;
3695}
3696
3697static int shmem_get_tree(struct fs_context *fc)
3698{
3699 return get_tree_nodev(fc, shmem_fill_super);
3700}
3701
3702static void shmem_free_fc(struct fs_context *fc)
3703{
3704 struct shmem_options *ctx = fc->fs_private;
3705
3706 if (ctx) {
3707 mpol_put(ctx->mpol);
3708 kfree(ctx);
3709 }
3710}
3711
3712static const struct fs_context_operations shmem_fs_context_ops = {
3713 .free = shmem_free_fc,
3714 .get_tree = shmem_get_tree,
3715#ifdef CONFIG_TMPFS
3716 .parse_monolithic = shmem_parse_options,
3717 .parse_param = shmem_parse_one,
3718 .reconfigure = shmem_reconfigure,
3719#endif
3720};
3721
3722static struct kmem_cache *shmem_inode_cachep;
3723
3724static struct inode *shmem_alloc_inode(struct super_block *sb)
3725{
3726 struct shmem_inode_info *info;
3727 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3728 if (!info)
3729 return NULL;
3730 return &info->vfs_inode;
3731}
3732
3733static void shmem_free_in_core_inode(struct inode *inode)
3734{
3735 if (S_ISLNK(inode->i_mode))
3736 kfree(inode->i_link);
3737 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3738}
3739
3740static void shmem_destroy_inode(struct inode *inode)
3741{
3742 if (S_ISREG(inode->i_mode))
3743 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3744}
3745
3746static void shmem_init_inode(void *foo)
3747{
3748 struct shmem_inode_info *info = foo;
3749 inode_init_once(&info->vfs_inode);
3750}
3751
3752static void shmem_init_inodecache(void)
3753{
3754 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3755 sizeof(struct shmem_inode_info),
3756 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3757}
3758
3759static void shmem_destroy_inodecache(void)
3760{
3761 kmem_cache_destroy(shmem_inode_cachep);
3762}
3763
3764const struct address_space_operations shmem_aops = {
3765 .writepage = shmem_writepage,
3766 .set_page_dirty = __set_page_dirty_no_writeback,
3767#ifdef CONFIG_TMPFS
3768 .write_begin = shmem_write_begin,
3769 .write_end = shmem_write_end,
3770#endif
3771#ifdef CONFIG_MIGRATION
3772 .migratepage = migrate_page,
3773#endif
3774 .error_remove_page = generic_error_remove_page,
3775};
3776EXPORT_SYMBOL(shmem_aops);
3777
3778static const struct file_operations shmem_file_operations = {
3779 .mmap = shmem_mmap,
3780 .get_unmapped_area = shmem_get_unmapped_area,
3781#ifdef CONFIG_TMPFS
3782 .llseek = shmem_file_llseek,
3783 .read_iter = shmem_file_read_iter,
3784 .write_iter = generic_file_write_iter,
3785 .fsync = noop_fsync,
3786 .splice_read = generic_file_splice_read,
3787 .splice_write = iter_file_splice_write,
3788 .fallocate = shmem_fallocate,
3789#endif
3790};
3791
3792static const struct inode_operations shmem_inode_operations = {
3793 .getattr = shmem_getattr,
3794 .setattr = shmem_setattr,
3795#ifdef CONFIG_TMPFS_XATTR
3796 .listxattr = shmem_listxattr,
3797 .set_acl = simple_set_acl,
3798#endif
3799};
3800
3801static const struct inode_operations shmem_dir_inode_operations = {
3802#ifdef CONFIG_TMPFS
3803 .create = shmem_create,
3804 .lookup = simple_lookup,
3805 .link = shmem_link,
3806 .unlink = shmem_unlink,
3807 .symlink = shmem_symlink,
3808 .mkdir = shmem_mkdir,
3809 .rmdir = shmem_rmdir,
3810 .mknod = shmem_mknod,
3811 .rename = shmem_rename2,
3812 .tmpfile = shmem_tmpfile,
3813#endif
3814#ifdef CONFIG_TMPFS_XATTR
3815 .listxattr = shmem_listxattr,
3816#endif
3817#ifdef CONFIG_TMPFS_POSIX_ACL
3818 .setattr = shmem_setattr,
3819 .set_acl = simple_set_acl,
3820#endif
3821};
3822
3823static const struct inode_operations shmem_special_inode_operations = {
3824#ifdef CONFIG_TMPFS_XATTR
3825 .listxattr = shmem_listxattr,
3826#endif
3827#ifdef CONFIG_TMPFS_POSIX_ACL
3828 .setattr = shmem_setattr,
3829 .set_acl = simple_set_acl,
3830#endif
3831};
3832
3833static const struct super_operations shmem_ops = {
3834 .alloc_inode = shmem_alloc_inode,
3835 .free_inode = shmem_free_in_core_inode,
3836 .destroy_inode = shmem_destroy_inode,
3837#ifdef CONFIG_TMPFS
3838 .statfs = shmem_statfs,
3839 .show_options = shmem_show_options,
3840#endif
3841 .evict_inode = shmem_evict_inode,
3842 .drop_inode = generic_delete_inode,
3843 .put_super = shmem_put_super,
3844#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3845 .nr_cached_objects = shmem_unused_huge_count,
3846 .free_cached_objects = shmem_unused_huge_scan,
3847#endif
3848};
3849
3850static const struct vm_operations_struct shmem_vm_ops = {
3851 .fault = shmem_fault,
3852 .map_pages = filemap_map_pages,
3853#ifdef CONFIG_NUMA
3854 .set_policy = shmem_set_policy,
3855 .get_policy = shmem_get_policy,
3856#endif
3857};
3858
3859int shmem_init_fs_context(struct fs_context *fc)
3860{
3861 struct shmem_options *ctx;
3862
3863 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3864 if (!ctx)
3865 return -ENOMEM;
3866
3867 ctx->mode = 0777 | S_ISVTX;
3868 ctx->uid = current_fsuid();
3869 ctx->gid = current_fsgid();
3870
3871 fc->fs_private = ctx;
3872 fc->ops = &shmem_fs_context_ops;
3873 return 0;
3874}
3875
3876static struct file_system_type shmem_fs_type = {
3877 .owner = THIS_MODULE,
3878 .name = "tmpfs",
3879 .init_fs_context = shmem_init_fs_context,
3880#ifdef CONFIG_TMPFS
3881 .parameters = shmem_fs_parameters,
3882#endif
3883 .kill_sb = kill_litter_super,
3884 .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT,
3885};
3886
3887int __init shmem_init(void)
3888{
3889 int error;
3890
3891 shmem_init_inodecache();
3892
3893 error = register_filesystem(&shmem_fs_type);
3894 if (error) {
3895 pr_err("Could not register tmpfs\n");
3896 goto out2;
3897 }
3898
3899 shm_mnt = kern_mount(&shmem_fs_type);
3900 if (IS_ERR(shm_mnt)) {
3901 error = PTR_ERR(shm_mnt);
3902 pr_err("Could not kern_mount tmpfs\n");
3903 goto out1;
3904 }
3905
3906#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3907 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3908 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3909 else
3910 shmem_huge = 0; /* just in case it was patched */
3911#endif
3912 return 0;
3913
3914out1:
3915 unregister_filesystem(&shmem_fs_type);
3916out2:
3917 shmem_destroy_inodecache();
3918 shm_mnt = ERR_PTR(error);
3919 return error;
3920}
3921
3922#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
3923static ssize_t shmem_enabled_show(struct kobject *kobj,
3924 struct kobj_attribute *attr, char *buf)
3925{
3926 static const int values[] = {
3927 SHMEM_HUGE_ALWAYS,
3928 SHMEM_HUGE_WITHIN_SIZE,
3929 SHMEM_HUGE_ADVISE,
3930 SHMEM_HUGE_NEVER,
3931 SHMEM_HUGE_DENY,
3932 SHMEM_HUGE_FORCE,
3933 };
3934 int len = 0;
3935 int i;
3936
3937 for (i = 0; i < ARRAY_SIZE(values); i++) {
3938 len += sysfs_emit_at(buf, len,
3939 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
3940 i ? " " : "",
3941 shmem_format_huge(values[i]));
3942 }
3943
3944 len += sysfs_emit_at(buf, len, "\n");
3945
3946 return len;
3947}
3948
3949static ssize_t shmem_enabled_store(struct kobject *kobj,
3950 struct kobj_attribute *attr, const char *buf, size_t count)
3951{
3952 char tmp[16];
3953 int huge;
3954
3955 if (count + 1 > sizeof(tmp))
3956 return -EINVAL;
3957 memcpy(tmp, buf, count);
3958 tmp[count] = '\0';
3959 if (count && tmp[count - 1] == '\n')
3960 tmp[count - 1] = '\0';
3961
3962 huge = shmem_parse_huge(tmp);
3963 if (huge == -EINVAL)
3964 return -EINVAL;
3965 if (!has_transparent_hugepage() &&
3966 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3967 return -EINVAL;
3968
3969 shmem_huge = huge;
3970 if (shmem_huge > SHMEM_HUGE_DENY)
3971 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3972 return count;
3973}
3974
3975struct kobj_attribute shmem_enabled_attr =
3976 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3977#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
3978
3979#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3980bool shmem_huge_enabled(struct vm_area_struct *vma)
3981{
3982 struct inode *inode = file_inode(vma->vm_file);
3983 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3984 loff_t i_size;
3985 pgoff_t off;
3986
3987 if (!transhuge_vma_enabled(vma, vma->vm_flags))
3988 return false;
3989 if (shmem_huge == SHMEM_HUGE_FORCE)
3990 return true;
3991 if (shmem_huge == SHMEM_HUGE_DENY)
3992 return false;
3993 switch (sbinfo->huge) {
3994 case SHMEM_HUGE_NEVER:
3995 return false;
3996 case SHMEM_HUGE_ALWAYS:
3997 return true;
3998 case SHMEM_HUGE_WITHIN_SIZE:
3999 off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4000 i_size = round_up(i_size_read(inode), PAGE_SIZE);
4001 if (i_size >= HPAGE_PMD_SIZE &&
4002 i_size >> PAGE_SHIFT >= off)
4003 return true;
4004 fallthrough;
4005 case SHMEM_HUGE_ADVISE:
4006 /* TODO: implement fadvise() hints */
4007 return (vma->vm_flags & VM_HUGEPAGE);
4008 default:
4009 VM_BUG_ON(1);
4010 return false;
4011 }
4012}
4013#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4014
4015#else /* !CONFIG_SHMEM */
4016
4017/*
4018 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4019 *
4020 * This is intended for small system where the benefits of the full
4021 * shmem code (swap-backed and resource-limited) are outweighed by
4022 * their complexity. On systems without swap this code should be
4023 * effectively equivalent, but much lighter weight.
4024 */
4025
4026static struct file_system_type shmem_fs_type = {
4027 .name = "tmpfs",
4028 .init_fs_context = ramfs_init_fs_context,
4029 .parameters = ramfs_fs_parameters,
4030 .kill_sb = kill_litter_super,
4031 .fs_flags = FS_USERNS_MOUNT,
4032};
4033
4034int __init shmem_init(void)
4035{
4036 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4037
4038 shm_mnt = kern_mount(&shmem_fs_type);
4039 BUG_ON(IS_ERR(shm_mnt));
4040
4041 return 0;
4042}
4043
4044int shmem_unuse(unsigned int type, bool frontswap,
4045 unsigned long *fs_pages_to_unuse)
4046{
4047 return 0;
4048}
4049
4050int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
4051{
4052 return 0;
4053}
4054
4055void shmem_unlock_mapping(struct address_space *mapping)
4056{
4057}
4058
4059#ifdef CONFIG_MMU
4060unsigned long shmem_get_unmapped_area(struct file *file,
4061 unsigned long addr, unsigned long len,
4062 unsigned long pgoff, unsigned long flags)
4063{
4064 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4065}
4066#endif
4067
4068void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4069{
4070 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4071}
4072EXPORT_SYMBOL_GPL(shmem_truncate_range);
4073
4074#define shmem_vm_ops generic_file_vm_ops
4075#define shmem_file_operations ramfs_file_operations
4076#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
4077#define shmem_acct_size(flags, size) 0
4078#define shmem_unacct_size(flags, size) do {} while (0)
4079
4080#endif /* CONFIG_SHMEM */
4081
4082/* common code */
4083
4084static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4085 unsigned long flags, unsigned int i_flags)
4086{
4087 struct inode *inode;
4088 struct file *res;
4089
4090 if (IS_ERR(mnt))
4091 return ERR_CAST(mnt);
4092
4093 if (size < 0 || size > MAX_LFS_FILESIZE)
4094 return ERR_PTR(-EINVAL);
4095
4096 if (shmem_acct_size(flags, size))
4097 return ERR_PTR(-ENOMEM);
4098
4099 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4100 flags);
4101 if (unlikely(!inode)) {
4102 shmem_unacct_size(flags, size);
4103 return ERR_PTR(-ENOSPC);
4104 }
4105 inode->i_flags |= i_flags;
4106 inode->i_size = size;
4107 clear_nlink(inode); /* It is unlinked */
4108 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4109 if (!IS_ERR(res))
4110 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4111 &shmem_file_operations);
4112 if (IS_ERR(res))
4113 iput(inode);
4114 return res;
4115}
4116
4117/**
4118 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4119 * kernel internal. There will be NO LSM permission checks against the
4120 * underlying inode. So users of this interface must do LSM checks at a
4121 * higher layer. The users are the big_key and shm implementations. LSM
4122 * checks are provided at the key or shm level rather than the inode.
4123 * @name: name for dentry (to be seen in /proc/<pid>/maps
4124 * @size: size to be set for the file
4125 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4126 */
4127struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4128{
4129 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4130}
4131
4132/**
4133 * shmem_file_setup - get an unlinked file living in tmpfs
4134 * @name: name for dentry (to be seen in /proc/<pid>/maps
4135 * @size: size to be set for the file
4136 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4137 */
4138struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4139{
4140 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4141}
4142EXPORT_SYMBOL_GPL(shmem_file_setup);
4143
4144/**
4145 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4146 * @mnt: the tmpfs mount where the file will be created
4147 * @name: name for dentry (to be seen in /proc/<pid>/maps
4148 * @size: size to be set for the file
4149 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4150 */
4151struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4152 loff_t size, unsigned long flags)
4153{
4154 return __shmem_file_setup(mnt, name, size, flags, 0);
4155}
4156EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4157
4158/**
4159 * shmem_zero_setup - setup a shared anonymous mapping
4160 * @vma: the vma to be mmapped is prepared by do_mmap
4161 */
4162int shmem_zero_setup(struct vm_area_struct *vma)
4163{
4164 struct file *file;
4165 loff_t size = vma->vm_end - vma->vm_start;
4166
4167 /*
4168 * Cloning a new file under mmap_lock leads to a lock ordering conflict
4169 * between XFS directory reading and selinux: since this file is only
4170 * accessible to the user through its mapping, use S_PRIVATE flag to
4171 * bypass file security, in the same way as shmem_kernel_file_setup().
4172 */
4173 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4174 if (IS_ERR(file))
4175 return PTR_ERR(file);
4176
4177 if (vma->vm_file)
4178 fput(vma->vm_file);
4179 vma->vm_file = file;
4180 vma->vm_ops = &shmem_vm_ops;
4181
4182 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4183 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4184 (vma->vm_end & HPAGE_PMD_MASK)) {
4185 khugepaged_enter(vma, vma->vm_flags);
4186 }
4187
4188 return 0;
4189}
4190
4191/**
4192 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4193 * @mapping: the page's address_space
4194 * @index: the page index
4195 * @gfp: the page allocator flags to use if allocating
4196 *
4197 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4198 * with any new page allocations done using the specified allocation flags.
4199 * But read_cache_page_gfp() uses the ->readpage() method: which does not
4200 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4201 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4202 *
4203 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4204 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4205 */
4206struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4207 pgoff_t index, gfp_t gfp)
4208{
4209#ifdef CONFIG_SHMEM
4210 struct inode *inode = mapping->host;
4211 struct page *page;
4212 int error;
4213
4214 BUG_ON(!shmem_mapping(mapping));
4215 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4216 gfp, NULL, NULL, NULL);
4217 if (error)
4218 page = ERR_PTR(error);
4219 else
4220 unlock_page(page);
4221 return page;
4222#else
4223 /*
4224 * The tiny !SHMEM case uses ramfs without swap
4225 */
4226 return read_cache_page_gfp(mapping, index, gfp);
4227#endif
4228}
4229EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);