Loading...
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
28#include <linux/ramfs.h>
29#include <linux/pagemap.h>
30#include <linux/file.h>
31#include <linux/mm.h>
32#include <linux/random.h>
33#include <linux/sched/signal.h>
34#include <linux/export.h>
35#include <linux/swap.h>
36#include <linux/uio.h>
37#include <linux/khugepaged.h>
38#include <linux/hugetlb.h>
39#include <linux/frontswap.h>
40#include <linux/fs_parser.h>
41
42#include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
43
44static struct vfsmount *shm_mnt;
45
46#ifdef CONFIG_SHMEM
47/*
48 * This virtual memory filesystem is heavily based on the ramfs. It
49 * extends ramfs by the ability to use swap and honor resource limits
50 * which makes it a completely usable filesystem.
51 */
52
53#include <linux/xattr.h>
54#include <linux/exportfs.h>
55#include <linux/posix_acl.h>
56#include <linux/posix_acl_xattr.h>
57#include <linux/mman.h>
58#include <linux/string.h>
59#include <linux/slab.h>
60#include <linux/backing-dev.h>
61#include <linux/shmem_fs.h>
62#include <linux/writeback.h>
63#include <linux/blkdev.h>
64#include <linux/pagevec.h>
65#include <linux/percpu_counter.h>
66#include <linux/falloc.h>
67#include <linux/splice.h>
68#include <linux/security.h>
69#include <linux/swapops.h>
70#include <linux/mempolicy.h>
71#include <linux/namei.h>
72#include <linux/ctype.h>
73#include <linux/migrate.h>
74#include <linux/highmem.h>
75#include <linux/seq_file.h>
76#include <linux/magic.h>
77#include <linux/syscalls.h>
78#include <linux/fcntl.h>
79#include <uapi/linux/memfd.h>
80#include <linux/userfaultfd_k.h>
81#include <linux/rmap.h>
82#include <linux/uuid.h>
83
84#include <linux/uaccess.h>
85
86#include "internal.h"
87
88#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
89#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
90
91/* Pretend that each entry is of this size in directory's i_size */
92#define BOGO_DIRENT_SIZE 20
93
94/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
95#define SHORT_SYMLINK_LEN 128
96
97/*
98 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99 * inode->i_private (with i_mutex making sure that it has only one user at
100 * a time): we would prefer not to enlarge the shmem inode just for that.
101 */
102struct shmem_falloc {
103 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
104 pgoff_t start; /* start of range currently being fallocated */
105 pgoff_t next; /* the next page offset to be fallocated */
106 pgoff_t nr_falloced; /* how many new pages have been fallocated */
107 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
108};
109
110struct shmem_options {
111 unsigned long long blocks;
112 unsigned long long inodes;
113 struct mempolicy *mpol;
114 kuid_t uid;
115 kgid_t gid;
116 umode_t mode;
117 bool full_inums;
118 int huge;
119 int seen;
120#define SHMEM_SEEN_BLOCKS 1
121#define SHMEM_SEEN_INODES 2
122#define SHMEM_SEEN_HUGE 4
123#define SHMEM_SEEN_INUMS 8
124};
125
126#ifdef CONFIG_TMPFS
127static unsigned long shmem_default_max_blocks(void)
128{
129 return totalram_pages() / 2;
130}
131
132static unsigned long shmem_default_max_inodes(void)
133{
134 unsigned long nr_pages = totalram_pages();
135
136 return min(nr_pages - totalhigh_pages(), nr_pages / 2);
137}
138#endif
139
140static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
141static int shmem_replace_page(struct page **pagep, gfp_t gfp,
142 struct shmem_inode_info *info, pgoff_t index);
143static int shmem_swapin_page(struct inode *inode, pgoff_t index,
144 struct page **pagep, enum sgp_type sgp,
145 gfp_t gfp, struct vm_area_struct *vma,
146 vm_fault_t *fault_type);
147static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
148 struct page **pagep, enum sgp_type sgp,
149 gfp_t gfp, struct vm_area_struct *vma,
150 struct vm_fault *vmf, vm_fault_t *fault_type);
151
152int shmem_getpage(struct inode *inode, pgoff_t index,
153 struct page **pagep, enum sgp_type sgp)
154{
155 return shmem_getpage_gfp(inode, index, pagep, sgp,
156 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
157}
158
159static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
160{
161 return sb->s_fs_info;
162}
163
164/*
165 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
166 * for shared memory and for shared anonymous (/dev/zero) mappings
167 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
168 * consistent with the pre-accounting of private mappings ...
169 */
170static inline int shmem_acct_size(unsigned long flags, loff_t size)
171{
172 return (flags & VM_NORESERVE) ?
173 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
174}
175
176static inline void shmem_unacct_size(unsigned long flags, loff_t size)
177{
178 if (!(flags & VM_NORESERVE))
179 vm_unacct_memory(VM_ACCT(size));
180}
181
182static inline int shmem_reacct_size(unsigned long flags,
183 loff_t oldsize, loff_t newsize)
184{
185 if (!(flags & VM_NORESERVE)) {
186 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
187 return security_vm_enough_memory_mm(current->mm,
188 VM_ACCT(newsize) - VM_ACCT(oldsize));
189 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
190 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
191 }
192 return 0;
193}
194
195/*
196 * ... whereas tmpfs objects are accounted incrementally as
197 * pages are allocated, in order to allow large sparse files.
198 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
199 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
200 */
201static inline int shmem_acct_block(unsigned long flags, long pages)
202{
203 if (!(flags & VM_NORESERVE))
204 return 0;
205
206 return security_vm_enough_memory_mm(current->mm,
207 pages * VM_ACCT(PAGE_SIZE));
208}
209
210static inline void shmem_unacct_blocks(unsigned long flags, long pages)
211{
212 if (flags & VM_NORESERVE)
213 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
214}
215
216static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
217{
218 struct shmem_inode_info *info = SHMEM_I(inode);
219 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
220
221 if (shmem_acct_block(info->flags, pages))
222 return false;
223
224 if (sbinfo->max_blocks) {
225 if (percpu_counter_compare(&sbinfo->used_blocks,
226 sbinfo->max_blocks - pages) > 0)
227 goto unacct;
228 percpu_counter_add(&sbinfo->used_blocks, pages);
229 }
230
231 return true;
232
233unacct:
234 shmem_unacct_blocks(info->flags, pages);
235 return false;
236}
237
238static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
239{
240 struct shmem_inode_info *info = SHMEM_I(inode);
241 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
242
243 if (sbinfo->max_blocks)
244 percpu_counter_sub(&sbinfo->used_blocks, pages);
245 shmem_unacct_blocks(info->flags, pages);
246}
247
248static const struct super_operations shmem_ops;
249static const struct address_space_operations shmem_aops;
250static const struct file_operations shmem_file_operations;
251static const struct inode_operations shmem_inode_operations;
252static const struct inode_operations shmem_dir_inode_operations;
253static const struct inode_operations shmem_special_inode_operations;
254static const struct vm_operations_struct shmem_vm_ops;
255static struct file_system_type shmem_fs_type;
256
257bool vma_is_shmem(struct vm_area_struct *vma)
258{
259 return vma->vm_ops == &shmem_vm_ops;
260}
261
262static LIST_HEAD(shmem_swaplist);
263static DEFINE_MUTEX(shmem_swaplist_mutex);
264
265/*
266 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
267 * produces a novel ino for the newly allocated inode.
268 *
269 * It may also be called when making a hard link to permit the space needed by
270 * each dentry. However, in that case, no new inode number is needed since that
271 * internally draws from another pool of inode numbers (currently global
272 * get_next_ino()). This case is indicated by passing NULL as inop.
273 */
274#define SHMEM_INO_BATCH 1024
275static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
276{
277 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
278 ino_t ino;
279
280 if (!(sb->s_flags & SB_KERNMOUNT)) {
281 spin_lock(&sbinfo->stat_lock);
282 if (sbinfo->max_inodes) {
283 if (!sbinfo->free_inodes) {
284 spin_unlock(&sbinfo->stat_lock);
285 return -ENOSPC;
286 }
287 sbinfo->free_inodes--;
288 }
289 if (inop) {
290 ino = sbinfo->next_ino++;
291 if (unlikely(is_zero_ino(ino)))
292 ino = sbinfo->next_ino++;
293 if (unlikely(!sbinfo->full_inums &&
294 ino > UINT_MAX)) {
295 /*
296 * Emulate get_next_ino uint wraparound for
297 * compatibility
298 */
299 if (IS_ENABLED(CONFIG_64BIT))
300 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
301 __func__, MINOR(sb->s_dev));
302 sbinfo->next_ino = 1;
303 ino = sbinfo->next_ino++;
304 }
305 *inop = ino;
306 }
307 spin_unlock(&sbinfo->stat_lock);
308 } else if (inop) {
309 /*
310 * __shmem_file_setup, one of our callers, is lock-free: it
311 * doesn't hold stat_lock in shmem_reserve_inode since
312 * max_inodes is always 0, and is called from potentially
313 * unknown contexts. As such, use a per-cpu batched allocator
314 * which doesn't require the per-sb stat_lock unless we are at
315 * the batch boundary.
316 *
317 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
318 * shmem mounts are not exposed to userspace, so we don't need
319 * to worry about things like glibc compatibility.
320 */
321 ino_t *next_ino;
322 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
323 ino = *next_ino;
324 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
325 spin_lock(&sbinfo->stat_lock);
326 ino = sbinfo->next_ino;
327 sbinfo->next_ino += SHMEM_INO_BATCH;
328 spin_unlock(&sbinfo->stat_lock);
329 if (unlikely(is_zero_ino(ino)))
330 ino++;
331 }
332 *inop = ino;
333 *next_ino = ++ino;
334 put_cpu();
335 }
336
337 return 0;
338}
339
340static void shmem_free_inode(struct super_block *sb)
341{
342 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
343 if (sbinfo->max_inodes) {
344 spin_lock(&sbinfo->stat_lock);
345 sbinfo->free_inodes++;
346 spin_unlock(&sbinfo->stat_lock);
347 }
348}
349
350/**
351 * shmem_recalc_inode - recalculate the block usage of an inode
352 * @inode: inode to recalc
353 *
354 * We have to calculate the free blocks since the mm can drop
355 * undirtied hole pages behind our back.
356 *
357 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
358 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
359 *
360 * It has to be called with the spinlock held.
361 */
362static void shmem_recalc_inode(struct inode *inode)
363{
364 struct shmem_inode_info *info = SHMEM_I(inode);
365 long freed;
366
367 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
368 if (freed > 0) {
369 info->alloced -= freed;
370 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
371 shmem_inode_unacct_blocks(inode, freed);
372 }
373}
374
375bool shmem_charge(struct inode *inode, long pages)
376{
377 struct shmem_inode_info *info = SHMEM_I(inode);
378 unsigned long flags;
379
380 if (!shmem_inode_acct_block(inode, pages))
381 return false;
382
383 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
384 inode->i_mapping->nrpages += pages;
385
386 spin_lock_irqsave(&info->lock, flags);
387 info->alloced += pages;
388 inode->i_blocks += pages * BLOCKS_PER_PAGE;
389 shmem_recalc_inode(inode);
390 spin_unlock_irqrestore(&info->lock, flags);
391
392 return true;
393}
394
395void shmem_uncharge(struct inode *inode, long pages)
396{
397 struct shmem_inode_info *info = SHMEM_I(inode);
398 unsigned long flags;
399
400 /* nrpages adjustment done by __delete_from_page_cache() or caller */
401
402 spin_lock_irqsave(&info->lock, flags);
403 info->alloced -= pages;
404 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
405 shmem_recalc_inode(inode);
406 spin_unlock_irqrestore(&info->lock, flags);
407
408 shmem_inode_unacct_blocks(inode, pages);
409}
410
411/*
412 * Replace item expected in xarray by a new item, while holding xa_lock.
413 */
414static int shmem_replace_entry(struct address_space *mapping,
415 pgoff_t index, void *expected, void *replacement)
416{
417 XA_STATE(xas, &mapping->i_pages, index);
418 void *item;
419
420 VM_BUG_ON(!expected);
421 VM_BUG_ON(!replacement);
422 item = xas_load(&xas);
423 if (item != expected)
424 return -ENOENT;
425 xas_store(&xas, replacement);
426 return 0;
427}
428
429/*
430 * Sometimes, before we decide whether to proceed or to fail, we must check
431 * that an entry was not already brought back from swap by a racing thread.
432 *
433 * Checking page is not enough: by the time a SwapCache page is locked, it
434 * might be reused, and again be SwapCache, using the same swap as before.
435 */
436static bool shmem_confirm_swap(struct address_space *mapping,
437 pgoff_t index, swp_entry_t swap)
438{
439 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
440}
441
442/*
443 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
444 *
445 * SHMEM_HUGE_NEVER:
446 * disables huge pages for the mount;
447 * SHMEM_HUGE_ALWAYS:
448 * enables huge pages for the mount;
449 * SHMEM_HUGE_WITHIN_SIZE:
450 * only allocate huge pages if the page will be fully within i_size,
451 * also respect fadvise()/madvise() hints;
452 * SHMEM_HUGE_ADVISE:
453 * only allocate huge pages if requested with fadvise()/madvise();
454 */
455
456#define SHMEM_HUGE_NEVER 0
457#define SHMEM_HUGE_ALWAYS 1
458#define SHMEM_HUGE_WITHIN_SIZE 2
459#define SHMEM_HUGE_ADVISE 3
460
461/*
462 * Special values.
463 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
464 *
465 * SHMEM_HUGE_DENY:
466 * disables huge on shm_mnt and all mounts, for emergency use;
467 * SHMEM_HUGE_FORCE:
468 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
469 *
470 */
471#define SHMEM_HUGE_DENY (-1)
472#define SHMEM_HUGE_FORCE (-2)
473
474#ifdef CONFIG_TRANSPARENT_HUGEPAGE
475/* ifdef here to avoid bloating shmem.o when not necessary */
476
477static int shmem_huge __read_mostly;
478
479#if defined(CONFIG_SYSFS)
480static int shmem_parse_huge(const char *str)
481{
482 if (!strcmp(str, "never"))
483 return SHMEM_HUGE_NEVER;
484 if (!strcmp(str, "always"))
485 return SHMEM_HUGE_ALWAYS;
486 if (!strcmp(str, "within_size"))
487 return SHMEM_HUGE_WITHIN_SIZE;
488 if (!strcmp(str, "advise"))
489 return SHMEM_HUGE_ADVISE;
490 if (!strcmp(str, "deny"))
491 return SHMEM_HUGE_DENY;
492 if (!strcmp(str, "force"))
493 return SHMEM_HUGE_FORCE;
494 return -EINVAL;
495}
496#endif
497
498#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
499static const char *shmem_format_huge(int huge)
500{
501 switch (huge) {
502 case SHMEM_HUGE_NEVER:
503 return "never";
504 case SHMEM_HUGE_ALWAYS:
505 return "always";
506 case SHMEM_HUGE_WITHIN_SIZE:
507 return "within_size";
508 case SHMEM_HUGE_ADVISE:
509 return "advise";
510 case SHMEM_HUGE_DENY:
511 return "deny";
512 case SHMEM_HUGE_FORCE:
513 return "force";
514 default:
515 VM_BUG_ON(1);
516 return "bad_val";
517 }
518}
519#endif
520
521static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
522 struct shrink_control *sc, unsigned long nr_to_split)
523{
524 LIST_HEAD(list), *pos, *next;
525 LIST_HEAD(to_remove);
526 struct inode *inode;
527 struct shmem_inode_info *info;
528 struct page *page;
529 unsigned long batch = sc ? sc->nr_to_scan : 128;
530 int removed = 0, split = 0;
531
532 if (list_empty(&sbinfo->shrinklist))
533 return SHRINK_STOP;
534
535 spin_lock(&sbinfo->shrinklist_lock);
536 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
537 info = list_entry(pos, struct shmem_inode_info, shrinklist);
538
539 /* pin the inode */
540 inode = igrab(&info->vfs_inode);
541
542 /* inode is about to be evicted */
543 if (!inode) {
544 list_del_init(&info->shrinklist);
545 removed++;
546 goto next;
547 }
548
549 /* Check if there's anything to gain */
550 if (round_up(inode->i_size, PAGE_SIZE) ==
551 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
552 list_move(&info->shrinklist, &to_remove);
553 removed++;
554 goto next;
555 }
556
557 list_move(&info->shrinklist, &list);
558next:
559 if (!--batch)
560 break;
561 }
562 spin_unlock(&sbinfo->shrinklist_lock);
563
564 list_for_each_safe(pos, next, &to_remove) {
565 info = list_entry(pos, struct shmem_inode_info, shrinklist);
566 inode = &info->vfs_inode;
567 list_del_init(&info->shrinklist);
568 iput(inode);
569 }
570
571 list_for_each_safe(pos, next, &list) {
572 int ret;
573
574 info = list_entry(pos, struct shmem_inode_info, shrinklist);
575 inode = &info->vfs_inode;
576
577 if (nr_to_split && split >= nr_to_split)
578 goto leave;
579
580 page = find_get_page(inode->i_mapping,
581 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
582 if (!page)
583 goto drop;
584
585 /* No huge page at the end of the file: nothing to split */
586 if (!PageTransHuge(page)) {
587 put_page(page);
588 goto drop;
589 }
590
591 /*
592 * Leave the inode on the list if we failed to lock
593 * the page at this time.
594 *
595 * Waiting for the lock may lead to deadlock in the
596 * reclaim path.
597 */
598 if (!trylock_page(page)) {
599 put_page(page);
600 goto leave;
601 }
602
603 ret = split_huge_page(page);
604 unlock_page(page);
605 put_page(page);
606
607 /* If split failed leave the inode on the list */
608 if (ret)
609 goto leave;
610
611 split++;
612drop:
613 list_del_init(&info->shrinklist);
614 removed++;
615leave:
616 iput(inode);
617 }
618
619 spin_lock(&sbinfo->shrinklist_lock);
620 list_splice_tail(&list, &sbinfo->shrinklist);
621 sbinfo->shrinklist_len -= removed;
622 spin_unlock(&sbinfo->shrinklist_lock);
623
624 return split;
625}
626
627static long shmem_unused_huge_scan(struct super_block *sb,
628 struct shrink_control *sc)
629{
630 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
631
632 if (!READ_ONCE(sbinfo->shrinklist_len))
633 return SHRINK_STOP;
634
635 return shmem_unused_huge_shrink(sbinfo, sc, 0);
636}
637
638static long shmem_unused_huge_count(struct super_block *sb,
639 struct shrink_control *sc)
640{
641 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
642 return READ_ONCE(sbinfo->shrinklist_len);
643}
644#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
645
646#define shmem_huge SHMEM_HUGE_DENY
647
648static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
649 struct shrink_control *sc, unsigned long nr_to_split)
650{
651 return 0;
652}
653#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
654
655static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
656{
657 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
658 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
659 shmem_huge != SHMEM_HUGE_DENY)
660 return true;
661 return false;
662}
663
664/*
665 * Like add_to_page_cache_locked, but error if expected item has gone.
666 */
667static int shmem_add_to_page_cache(struct page *page,
668 struct address_space *mapping,
669 pgoff_t index, void *expected, gfp_t gfp,
670 struct mm_struct *charge_mm)
671{
672 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
673 unsigned long i = 0;
674 unsigned long nr = compound_nr(page);
675 int error;
676
677 VM_BUG_ON_PAGE(PageTail(page), page);
678 VM_BUG_ON_PAGE(index != round_down(index, nr), page);
679 VM_BUG_ON_PAGE(!PageLocked(page), page);
680 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
681 VM_BUG_ON(expected && PageTransHuge(page));
682
683 page_ref_add(page, nr);
684 page->mapping = mapping;
685 page->index = index;
686
687 if (!PageSwapCache(page)) {
688 error = mem_cgroup_charge(page, charge_mm, gfp);
689 if (error) {
690 if (PageTransHuge(page)) {
691 count_vm_event(THP_FILE_FALLBACK);
692 count_vm_event(THP_FILE_FALLBACK_CHARGE);
693 }
694 goto error;
695 }
696 }
697 cgroup_throttle_swaprate(page, gfp);
698
699 do {
700 void *entry;
701 xas_lock_irq(&xas);
702 entry = xas_find_conflict(&xas);
703 if (entry != expected)
704 xas_set_err(&xas, -EEXIST);
705 xas_create_range(&xas);
706 if (xas_error(&xas))
707 goto unlock;
708next:
709 xas_store(&xas, page);
710 if (++i < nr) {
711 xas_next(&xas);
712 goto next;
713 }
714 if (PageTransHuge(page)) {
715 count_vm_event(THP_FILE_ALLOC);
716 __inc_node_page_state(page, NR_SHMEM_THPS);
717 }
718 mapping->nrpages += nr;
719 __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
720 __mod_lruvec_page_state(page, NR_SHMEM, nr);
721unlock:
722 xas_unlock_irq(&xas);
723 } while (xas_nomem(&xas, gfp));
724
725 if (xas_error(&xas)) {
726 error = xas_error(&xas);
727 goto error;
728 }
729
730 return 0;
731error:
732 page->mapping = NULL;
733 page_ref_sub(page, nr);
734 return error;
735}
736
737/*
738 * Like delete_from_page_cache, but substitutes swap for page.
739 */
740static void shmem_delete_from_page_cache(struct page *page, void *radswap)
741{
742 struct address_space *mapping = page->mapping;
743 int error;
744
745 VM_BUG_ON_PAGE(PageCompound(page), page);
746
747 xa_lock_irq(&mapping->i_pages);
748 error = shmem_replace_entry(mapping, page->index, page, radswap);
749 page->mapping = NULL;
750 mapping->nrpages--;
751 __dec_lruvec_page_state(page, NR_FILE_PAGES);
752 __dec_lruvec_page_state(page, NR_SHMEM);
753 xa_unlock_irq(&mapping->i_pages);
754 put_page(page);
755 BUG_ON(error);
756}
757
758/*
759 * Remove swap entry from page cache, free the swap and its page cache.
760 */
761static int shmem_free_swap(struct address_space *mapping,
762 pgoff_t index, void *radswap)
763{
764 void *old;
765
766 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
767 if (old != radswap)
768 return -ENOENT;
769 free_swap_and_cache(radix_to_swp_entry(radswap));
770 return 0;
771}
772
773/*
774 * Determine (in bytes) how many of the shmem object's pages mapped by the
775 * given offsets are swapped out.
776 *
777 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
778 * as long as the inode doesn't go away and racy results are not a problem.
779 */
780unsigned long shmem_partial_swap_usage(struct address_space *mapping,
781 pgoff_t start, pgoff_t end)
782{
783 XA_STATE(xas, &mapping->i_pages, start);
784 struct page *page;
785 unsigned long swapped = 0;
786
787 rcu_read_lock();
788 xas_for_each(&xas, page, end - 1) {
789 if (xas_retry(&xas, page))
790 continue;
791 if (xa_is_value(page))
792 swapped++;
793
794 if (need_resched()) {
795 xas_pause(&xas);
796 cond_resched_rcu();
797 }
798 }
799
800 rcu_read_unlock();
801
802 return swapped << PAGE_SHIFT;
803}
804
805/*
806 * Determine (in bytes) how many of the shmem object's pages mapped by the
807 * given vma is swapped out.
808 *
809 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
810 * as long as the inode doesn't go away and racy results are not a problem.
811 */
812unsigned long shmem_swap_usage(struct vm_area_struct *vma)
813{
814 struct inode *inode = file_inode(vma->vm_file);
815 struct shmem_inode_info *info = SHMEM_I(inode);
816 struct address_space *mapping = inode->i_mapping;
817 unsigned long swapped;
818
819 /* Be careful as we don't hold info->lock */
820 swapped = READ_ONCE(info->swapped);
821
822 /*
823 * The easier cases are when the shmem object has nothing in swap, or
824 * the vma maps it whole. Then we can simply use the stats that we
825 * already track.
826 */
827 if (!swapped)
828 return 0;
829
830 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
831 return swapped << PAGE_SHIFT;
832
833 /* Here comes the more involved part */
834 return shmem_partial_swap_usage(mapping,
835 linear_page_index(vma, vma->vm_start),
836 linear_page_index(vma, vma->vm_end));
837}
838
839/*
840 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
841 */
842void shmem_unlock_mapping(struct address_space *mapping)
843{
844 struct pagevec pvec;
845 pgoff_t indices[PAGEVEC_SIZE];
846 pgoff_t index = 0;
847
848 pagevec_init(&pvec);
849 /*
850 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
851 */
852 while (!mapping_unevictable(mapping)) {
853 /*
854 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
855 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
856 */
857 pvec.nr = find_get_entries(mapping, index,
858 PAGEVEC_SIZE, pvec.pages, indices);
859 if (!pvec.nr)
860 break;
861 index = indices[pvec.nr - 1] + 1;
862 pagevec_remove_exceptionals(&pvec);
863 check_move_unevictable_pages(&pvec);
864 pagevec_release(&pvec);
865 cond_resched();
866 }
867}
868
869/*
870 * Check whether a hole-punch or truncation needs to split a huge page,
871 * returning true if no split was required, or the split has been successful.
872 *
873 * Eviction (or truncation to 0 size) should never need to split a huge page;
874 * but in rare cases might do so, if shmem_undo_range() failed to trylock on
875 * head, and then succeeded to trylock on tail.
876 *
877 * A split can only succeed when there are no additional references on the
878 * huge page: so the split below relies upon find_get_entries() having stopped
879 * when it found a subpage of the huge page, without getting further references.
880 */
881static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
882{
883 if (!PageTransCompound(page))
884 return true;
885
886 /* Just proceed to delete a huge page wholly within the range punched */
887 if (PageHead(page) &&
888 page->index >= start && page->index + HPAGE_PMD_NR <= end)
889 return true;
890
891 /* Try to split huge page, so we can truly punch the hole or truncate */
892 return split_huge_page(page) >= 0;
893}
894
895/*
896 * Remove range of pages and swap entries from page cache, and free them.
897 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
898 */
899static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
900 bool unfalloc)
901{
902 struct address_space *mapping = inode->i_mapping;
903 struct shmem_inode_info *info = SHMEM_I(inode);
904 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
905 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
906 unsigned int partial_start = lstart & (PAGE_SIZE - 1);
907 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
908 struct pagevec pvec;
909 pgoff_t indices[PAGEVEC_SIZE];
910 long nr_swaps_freed = 0;
911 pgoff_t index;
912 int i;
913
914 if (lend == -1)
915 end = -1; /* unsigned, so actually very big */
916
917 pagevec_init(&pvec);
918 index = start;
919 while (index < end) {
920 pvec.nr = find_get_entries(mapping, index,
921 min(end - index, (pgoff_t)PAGEVEC_SIZE),
922 pvec.pages, indices);
923 if (!pvec.nr)
924 break;
925 for (i = 0; i < pagevec_count(&pvec); i++) {
926 struct page *page = pvec.pages[i];
927
928 index = indices[i];
929 if (index >= end)
930 break;
931
932 if (xa_is_value(page)) {
933 if (unfalloc)
934 continue;
935 nr_swaps_freed += !shmem_free_swap(mapping,
936 index, page);
937 continue;
938 }
939
940 VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
941
942 if (!trylock_page(page))
943 continue;
944
945 if ((!unfalloc || !PageUptodate(page)) &&
946 page_mapping(page) == mapping) {
947 VM_BUG_ON_PAGE(PageWriteback(page), page);
948 if (shmem_punch_compound(page, start, end))
949 truncate_inode_page(mapping, page);
950 }
951 unlock_page(page);
952 }
953 pagevec_remove_exceptionals(&pvec);
954 pagevec_release(&pvec);
955 cond_resched();
956 index++;
957 }
958
959 if (partial_start) {
960 struct page *page = NULL;
961 shmem_getpage(inode, start - 1, &page, SGP_READ);
962 if (page) {
963 unsigned int top = PAGE_SIZE;
964 if (start > end) {
965 top = partial_end;
966 partial_end = 0;
967 }
968 zero_user_segment(page, partial_start, top);
969 set_page_dirty(page);
970 unlock_page(page);
971 put_page(page);
972 }
973 }
974 if (partial_end) {
975 struct page *page = NULL;
976 shmem_getpage(inode, end, &page, SGP_READ);
977 if (page) {
978 zero_user_segment(page, 0, partial_end);
979 set_page_dirty(page);
980 unlock_page(page);
981 put_page(page);
982 }
983 }
984 if (start >= end)
985 return;
986
987 index = start;
988 while (index < end) {
989 cond_resched();
990
991 pvec.nr = find_get_entries(mapping, index,
992 min(end - index, (pgoff_t)PAGEVEC_SIZE),
993 pvec.pages, indices);
994 if (!pvec.nr) {
995 /* If all gone or hole-punch or unfalloc, we're done */
996 if (index == start || end != -1)
997 break;
998 /* But if truncating, restart to make sure all gone */
999 index = start;
1000 continue;
1001 }
1002 for (i = 0; i < pagevec_count(&pvec); i++) {
1003 struct page *page = pvec.pages[i];
1004
1005 index = indices[i];
1006 if (index >= end)
1007 break;
1008
1009 if (xa_is_value(page)) {
1010 if (unfalloc)
1011 continue;
1012 if (shmem_free_swap(mapping, index, page)) {
1013 /* Swap was replaced by page: retry */
1014 index--;
1015 break;
1016 }
1017 nr_swaps_freed++;
1018 continue;
1019 }
1020
1021 lock_page(page);
1022
1023 if (!unfalloc || !PageUptodate(page)) {
1024 if (page_mapping(page) != mapping) {
1025 /* Page was replaced by swap: retry */
1026 unlock_page(page);
1027 index--;
1028 break;
1029 }
1030 VM_BUG_ON_PAGE(PageWriteback(page), page);
1031 if (shmem_punch_compound(page, start, end))
1032 truncate_inode_page(mapping, page);
1033 else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1034 /* Wipe the page and don't get stuck */
1035 clear_highpage(page);
1036 flush_dcache_page(page);
1037 set_page_dirty(page);
1038 if (index <
1039 round_up(start, HPAGE_PMD_NR))
1040 start = index + 1;
1041 }
1042 }
1043 unlock_page(page);
1044 }
1045 pagevec_remove_exceptionals(&pvec);
1046 pagevec_release(&pvec);
1047 index++;
1048 }
1049
1050 spin_lock_irq(&info->lock);
1051 info->swapped -= nr_swaps_freed;
1052 shmem_recalc_inode(inode);
1053 spin_unlock_irq(&info->lock);
1054}
1055
1056void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1057{
1058 shmem_undo_range(inode, lstart, lend, false);
1059 inode->i_ctime = inode->i_mtime = current_time(inode);
1060}
1061EXPORT_SYMBOL_GPL(shmem_truncate_range);
1062
1063static int shmem_getattr(const struct path *path, struct kstat *stat,
1064 u32 request_mask, unsigned int query_flags)
1065{
1066 struct inode *inode = path->dentry->d_inode;
1067 struct shmem_inode_info *info = SHMEM_I(inode);
1068 struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
1069
1070 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1071 spin_lock_irq(&info->lock);
1072 shmem_recalc_inode(inode);
1073 spin_unlock_irq(&info->lock);
1074 }
1075 generic_fillattr(inode, stat);
1076
1077 if (is_huge_enabled(sb_info))
1078 stat->blksize = HPAGE_PMD_SIZE;
1079
1080 return 0;
1081}
1082
1083static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1084{
1085 struct inode *inode = d_inode(dentry);
1086 struct shmem_inode_info *info = SHMEM_I(inode);
1087 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1088 int error;
1089
1090 error = setattr_prepare(dentry, attr);
1091 if (error)
1092 return error;
1093
1094 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1095 loff_t oldsize = inode->i_size;
1096 loff_t newsize = attr->ia_size;
1097
1098 /* protected by i_mutex */
1099 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1100 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1101 return -EPERM;
1102
1103 if (newsize != oldsize) {
1104 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1105 oldsize, newsize);
1106 if (error)
1107 return error;
1108 i_size_write(inode, newsize);
1109 inode->i_ctime = inode->i_mtime = current_time(inode);
1110 }
1111 if (newsize <= oldsize) {
1112 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1113 if (oldsize > holebegin)
1114 unmap_mapping_range(inode->i_mapping,
1115 holebegin, 0, 1);
1116 if (info->alloced)
1117 shmem_truncate_range(inode,
1118 newsize, (loff_t)-1);
1119 /* unmap again to remove racily COWed private pages */
1120 if (oldsize > holebegin)
1121 unmap_mapping_range(inode->i_mapping,
1122 holebegin, 0, 1);
1123
1124 /*
1125 * Part of the huge page can be beyond i_size: subject
1126 * to shrink under memory pressure.
1127 */
1128 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1129 spin_lock(&sbinfo->shrinklist_lock);
1130 /*
1131 * _careful to defend against unlocked access to
1132 * ->shrink_list in shmem_unused_huge_shrink()
1133 */
1134 if (list_empty_careful(&info->shrinklist)) {
1135 list_add_tail(&info->shrinklist,
1136 &sbinfo->shrinklist);
1137 sbinfo->shrinklist_len++;
1138 }
1139 spin_unlock(&sbinfo->shrinklist_lock);
1140 }
1141 }
1142 }
1143
1144 setattr_copy(inode, attr);
1145 if (attr->ia_valid & ATTR_MODE)
1146 error = posix_acl_chmod(inode, inode->i_mode);
1147 return error;
1148}
1149
1150static void shmem_evict_inode(struct inode *inode)
1151{
1152 struct shmem_inode_info *info = SHMEM_I(inode);
1153 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1154
1155 if (inode->i_mapping->a_ops == &shmem_aops) {
1156 shmem_unacct_size(info->flags, inode->i_size);
1157 inode->i_size = 0;
1158 shmem_truncate_range(inode, 0, (loff_t)-1);
1159 if (!list_empty(&info->shrinklist)) {
1160 spin_lock(&sbinfo->shrinklist_lock);
1161 if (!list_empty(&info->shrinklist)) {
1162 list_del_init(&info->shrinklist);
1163 sbinfo->shrinklist_len--;
1164 }
1165 spin_unlock(&sbinfo->shrinklist_lock);
1166 }
1167 while (!list_empty(&info->swaplist)) {
1168 /* Wait while shmem_unuse() is scanning this inode... */
1169 wait_var_event(&info->stop_eviction,
1170 !atomic_read(&info->stop_eviction));
1171 mutex_lock(&shmem_swaplist_mutex);
1172 /* ...but beware of the race if we peeked too early */
1173 if (!atomic_read(&info->stop_eviction))
1174 list_del_init(&info->swaplist);
1175 mutex_unlock(&shmem_swaplist_mutex);
1176 }
1177 }
1178
1179 simple_xattrs_free(&info->xattrs);
1180 WARN_ON(inode->i_blocks);
1181 shmem_free_inode(inode->i_sb);
1182 clear_inode(inode);
1183}
1184
1185extern struct swap_info_struct *swap_info[];
1186
1187static int shmem_find_swap_entries(struct address_space *mapping,
1188 pgoff_t start, unsigned int nr_entries,
1189 struct page **entries, pgoff_t *indices,
1190 unsigned int type, bool frontswap)
1191{
1192 XA_STATE(xas, &mapping->i_pages, start);
1193 struct page *page;
1194 swp_entry_t entry;
1195 unsigned int ret = 0;
1196
1197 if (!nr_entries)
1198 return 0;
1199
1200 rcu_read_lock();
1201 xas_for_each(&xas, page, ULONG_MAX) {
1202 if (xas_retry(&xas, page))
1203 continue;
1204
1205 if (!xa_is_value(page))
1206 continue;
1207
1208 entry = radix_to_swp_entry(page);
1209 if (swp_type(entry) != type)
1210 continue;
1211 if (frontswap &&
1212 !frontswap_test(swap_info[type], swp_offset(entry)))
1213 continue;
1214
1215 indices[ret] = xas.xa_index;
1216 entries[ret] = page;
1217
1218 if (need_resched()) {
1219 xas_pause(&xas);
1220 cond_resched_rcu();
1221 }
1222 if (++ret == nr_entries)
1223 break;
1224 }
1225 rcu_read_unlock();
1226
1227 return ret;
1228}
1229
1230/*
1231 * Move the swapped pages for an inode to page cache. Returns the count
1232 * of pages swapped in, or the error in case of failure.
1233 */
1234static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1235 pgoff_t *indices)
1236{
1237 int i = 0;
1238 int ret = 0;
1239 int error = 0;
1240 struct address_space *mapping = inode->i_mapping;
1241
1242 for (i = 0; i < pvec.nr; i++) {
1243 struct page *page = pvec.pages[i];
1244
1245 if (!xa_is_value(page))
1246 continue;
1247 error = shmem_swapin_page(inode, indices[i],
1248 &page, SGP_CACHE,
1249 mapping_gfp_mask(mapping),
1250 NULL, NULL);
1251 if (error == 0) {
1252 unlock_page(page);
1253 put_page(page);
1254 ret++;
1255 }
1256 if (error == -ENOMEM)
1257 break;
1258 error = 0;
1259 }
1260 return error ? error : ret;
1261}
1262
1263/*
1264 * If swap found in inode, free it and move page from swapcache to filecache.
1265 */
1266static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1267 bool frontswap, unsigned long *fs_pages_to_unuse)
1268{
1269 struct address_space *mapping = inode->i_mapping;
1270 pgoff_t start = 0;
1271 struct pagevec pvec;
1272 pgoff_t indices[PAGEVEC_SIZE];
1273 bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1274 int ret = 0;
1275
1276 pagevec_init(&pvec);
1277 do {
1278 unsigned int nr_entries = PAGEVEC_SIZE;
1279
1280 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1281 nr_entries = *fs_pages_to_unuse;
1282
1283 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1284 pvec.pages, indices,
1285 type, frontswap);
1286 if (pvec.nr == 0) {
1287 ret = 0;
1288 break;
1289 }
1290
1291 ret = shmem_unuse_swap_entries(inode, pvec, indices);
1292 if (ret < 0)
1293 break;
1294
1295 if (frontswap_partial) {
1296 *fs_pages_to_unuse -= ret;
1297 if (*fs_pages_to_unuse == 0) {
1298 ret = FRONTSWAP_PAGES_UNUSED;
1299 break;
1300 }
1301 }
1302
1303 start = indices[pvec.nr - 1];
1304 } while (true);
1305
1306 return ret;
1307}
1308
1309/*
1310 * Read all the shared memory data that resides in the swap
1311 * device 'type' back into memory, so the swap device can be
1312 * unused.
1313 */
1314int shmem_unuse(unsigned int type, bool frontswap,
1315 unsigned long *fs_pages_to_unuse)
1316{
1317 struct shmem_inode_info *info, *next;
1318 int error = 0;
1319
1320 if (list_empty(&shmem_swaplist))
1321 return 0;
1322
1323 mutex_lock(&shmem_swaplist_mutex);
1324 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1325 if (!info->swapped) {
1326 list_del_init(&info->swaplist);
1327 continue;
1328 }
1329 /*
1330 * Drop the swaplist mutex while searching the inode for swap;
1331 * but before doing so, make sure shmem_evict_inode() will not
1332 * remove placeholder inode from swaplist, nor let it be freed
1333 * (igrab() would protect from unlink, but not from unmount).
1334 */
1335 atomic_inc(&info->stop_eviction);
1336 mutex_unlock(&shmem_swaplist_mutex);
1337
1338 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1339 fs_pages_to_unuse);
1340 cond_resched();
1341
1342 mutex_lock(&shmem_swaplist_mutex);
1343 next = list_next_entry(info, swaplist);
1344 if (!info->swapped)
1345 list_del_init(&info->swaplist);
1346 if (atomic_dec_and_test(&info->stop_eviction))
1347 wake_up_var(&info->stop_eviction);
1348 if (error)
1349 break;
1350 }
1351 mutex_unlock(&shmem_swaplist_mutex);
1352
1353 return error;
1354}
1355
1356/*
1357 * Move the page from the page cache to the swap cache.
1358 */
1359static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1360{
1361 struct shmem_inode_info *info;
1362 struct address_space *mapping;
1363 struct inode *inode;
1364 swp_entry_t swap;
1365 pgoff_t index;
1366
1367 VM_BUG_ON_PAGE(PageCompound(page), page);
1368 BUG_ON(!PageLocked(page));
1369 mapping = page->mapping;
1370 index = page->index;
1371 inode = mapping->host;
1372 info = SHMEM_I(inode);
1373 if (info->flags & VM_LOCKED)
1374 goto redirty;
1375 if (!total_swap_pages)
1376 goto redirty;
1377
1378 /*
1379 * Our capabilities prevent regular writeback or sync from ever calling
1380 * shmem_writepage; but a stacking filesystem might use ->writepage of
1381 * its underlying filesystem, in which case tmpfs should write out to
1382 * swap only in response to memory pressure, and not for the writeback
1383 * threads or sync.
1384 */
1385 if (!wbc->for_reclaim) {
1386 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
1387 goto redirty;
1388 }
1389
1390 /*
1391 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1392 * value into swapfile.c, the only way we can correctly account for a
1393 * fallocated page arriving here is now to initialize it and write it.
1394 *
1395 * That's okay for a page already fallocated earlier, but if we have
1396 * not yet completed the fallocation, then (a) we want to keep track
1397 * of this page in case we have to undo it, and (b) it may not be a
1398 * good idea to continue anyway, once we're pushing into swap. So
1399 * reactivate the page, and let shmem_fallocate() quit when too many.
1400 */
1401 if (!PageUptodate(page)) {
1402 if (inode->i_private) {
1403 struct shmem_falloc *shmem_falloc;
1404 spin_lock(&inode->i_lock);
1405 shmem_falloc = inode->i_private;
1406 if (shmem_falloc &&
1407 !shmem_falloc->waitq &&
1408 index >= shmem_falloc->start &&
1409 index < shmem_falloc->next)
1410 shmem_falloc->nr_unswapped++;
1411 else
1412 shmem_falloc = NULL;
1413 spin_unlock(&inode->i_lock);
1414 if (shmem_falloc)
1415 goto redirty;
1416 }
1417 clear_highpage(page);
1418 flush_dcache_page(page);
1419 SetPageUptodate(page);
1420 }
1421
1422 swap = get_swap_page(page);
1423 if (!swap.val)
1424 goto redirty;
1425
1426 /*
1427 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1428 * if it's not already there. Do it now before the page is
1429 * moved to swap cache, when its pagelock no longer protects
1430 * the inode from eviction. But don't unlock the mutex until
1431 * we've incremented swapped, because shmem_unuse_inode() will
1432 * prune a !swapped inode from the swaplist under this mutex.
1433 */
1434 mutex_lock(&shmem_swaplist_mutex);
1435 if (list_empty(&info->swaplist))
1436 list_add(&info->swaplist, &shmem_swaplist);
1437
1438 if (add_to_swap_cache(page, swap,
1439 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1440 NULL) == 0) {
1441 spin_lock_irq(&info->lock);
1442 shmem_recalc_inode(inode);
1443 info->swapped++;
1444 spin_unlock_irq(&info->lock);
1445
1446 swap_shmem_alloc(swap);
1447 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1448
1449 mutex_unlock(&shmem_swaplist_mutex);
1450 BUG_ON(page_mapped(page));
1451 swap_writepage(page, wbc);
1452 return 0;
1453 }
1454
1455 mutex_unlock(&shmem_swaplist_mutex);
1456 put_swap_page(page, swap);
1457redirty:
1458 set_page_dirty(page);
1459 if (wbc->for_reclaim)
1460 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1461 unlock_page(page);
1462 return 0;
1463}
1464
1465#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1466static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1467{
1468 char buffer[64];
1469
1470 if (!mpol || mpol->mode == MPOL_DEFAULT)
1471 return; /* show nothing */
1472
1473 mpol_to_str(buffer, sizeof(buffer), mpol);
1474
1475 seq_printf(seq, ",mpol=%s", buffer);
1476}
1477
1478static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1479{
1480 struct mempolicy *mpol = NULL;
1481 if (sbinfo->mpol) {
1482 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1483 mpol = sbinfo->mpol;
1484 mpol_get(mpol);
1485 spin_unlock(&sbinfo->stat_lock);
1486 }
1487 return mpol;
1488}
1489#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1490static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1491{
1492}
1493static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1494{
1495 return NULL;
1496}
1497#endif /* CONFIG_NUMA && CONFIG_TMPFS */
1498#ifndef CONFIG_NUMA
1499#define vm_policy vm_private_data
1500#endif
1501
1502static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1503 struct shmem_inode_info *info, pgoff_t index)
1504{
1505 /* Create a pseudo vma that just contains the policy */
1506 vma_init(vma, NULL);
1507 /* Bias interleave by inode number to distribute better across nodes */
1508 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1509 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1510}
1511
1512static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1513{
1514 /* Drop reference taken by mpol_shared_policy_lookup() */
1515 mpol_cond_put(vma->vm_policy);
1516}
1517
1518static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1519 struct shmem_inode_info *info, pgoff_t index)
1520{
1521 struct vm_area_struct pvma;
1522 struct page *page;
1523 struct vm_fault vmf;
1524
1525 shmem_pseudo_vma_init(&pvma, info, index);
1526 vmf.vma = &pvma;
1527 vmf.address = 0;
1528 page = swap_cluster_readahead(swap, gfp, &vmf);
1529 shmem_pseudo_vma_destroy(&pvma);
1530
1531 return page;
1532}
1533
1534static struct page *shmem_alloc_hugepage(gfp_t gfp,
1535 struct shmem_inode_info *info, pgoff_t index)
1536{
1537 struct vm_area_struct pvma;
1538 struct address_space *mapping = info->vfs_inode.i_mapping;
1539 pgoff_t hindex;
1540 struct page *page;
1541
1542 hindex = round_down(index, HPAGE_PMD_NR);
1543 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1544 XA_PRESENT))
1545 return NULL;
1546
1547 shmem_pseudo_vma_init(&pvma, info, hindex);
1548 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1549 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1550 shmem_pseudo_vma_destroy(&pvma);
1551 if (page)
1552 prep_transhuge_page(page);
1553 else
1554 count_vm_event(THP_FILE_FALLBACK);
1555 return page;
1556}
1557
1558static struct page *shmem_alloc_page(gfp_t gfp,
1559 struct shmem_inode_info *info, pgoff_t index)
1560{
1561 struct vm_area_struct pvma;
1562 struct page *page;
1563
1564 shmem_pseudo_vma_init(&pvma, info, index);
1565 page = alloc_page_vma(gfp, &pvma, 0);
1566 shmem_pseudo_vma_destroy(&pvma);
1567
1568 return page;
1569}
1570
1571static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1572 struct inode *inode,
1573 pgoff_t index, bool huge)
1574{
1575 struct shmem_inode_info *info = SHMEM_I(inode);
1576 struct page *page;
1577 int nr;
1578 int err = -ENOSPC;
1579
1580 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1581 huge = false;
1582 nr = huge ? HPAGE_PMD_NR : 1;
1583
1584 if (!shmem_inode_acct_block(inode, nr))
1585 goto failed;
1586
1587 if (huge)
1588 page = shmem_alloc_hugepage(gfp, info, index);
1589 else
1590 page = shmem_alloc_page(gfp, info, index);
1591 if (page) {
1592 __SetPageLocked(page);
1593 __SetPageSwapBacked(page);
1594 return page;
1595 }
1596
1597 err = -ENOMEM;
1598 shmem_inode_unacct_blocks(inode, nr);
1599failed:
1600 return ERR_PTR(err);
1601}
1602
1603/*
1604 * When a page is moved from swapcache to shmem filecache (either by the
1605 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1606 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1607 * ignorance of the mapping it belongs to. If that mapping has special
1608 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1609 * we may need to copy to a suitable page before moving to filecache.
1610 *
1611 * In a future release, this may well be extended to respect cpuset and
1612 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1613 * but for now it is a simple matter of zone.
1614 */
1615static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1616{
1617 return page_zonenum(page) > gfp_zone(gfp);
1618}
1619
1620static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1621 struct shmem_inode_info *info, pgoff_t index)
1622{
1623 struct page *oldpage, *newpage;
1624 struct address_space *swap_mapping;
1625 swp_entry_t entry;
1626 pgoff_t swap_index;
1627 int error;
1628
1629 oldpage = *pagep;
1630 entry.val = page_private(oldpage);
1631 swap_index = swp_offset(entry);
1632 swap_mapping = page_mapping(oldpage);
1633
1634 /*
1635 * We have arrived here because our zones are constrained, so don't
1636 * limit chance of success by further cpuset and node constraints.
1637 */
1638 gfp &= ~GFP_CONSTRAINT_MASK;
1639 newpage = shmem_alloc_page(gfp, info, index);
1640 if (!newpage)
1641 return -ENOMEM;
1642
1643 get_page(newpage);
1644 copy_highpage(newpage, oldpage);
1645 flush_dcache_page(newpage);
1646
1647 __SetPageLocked(newpage);
1648 __SetPageSwapBacked(newpage);
1649 SetPageUptodate(newpage);
1650 set_page_private(newpage, entry.val);
1651 SetPageSwapCache(newpage);
1652
1653 /*
1654 * Our caller will very soon move newpage out of swapcache, but it's
1655 * a nice clean interface for us to replace oldpage by newpage there.
1656 */
1657 xa_lock_irq(&swap_mapping->i_pages);
1658 error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1659 if (!error) {
1660 mem_cgroup_migrate(oldpage, newpage);
1661 __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
1662 __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
1663 }
1664 xa_unlock_irq(&swap_mapping->i_pages);
1665
1666 if (unlikely(error)) {
1667 /*
1668 * Is this possible? I think not, now that our callers check
1669 * both PageSwapCache and page_private after getting page lock;
1670 * but be defensive. Reverse old to newpage for clear and free.
1671 */
1672 oldpage = newpage;
1673 } else {
1674 lru_cache_add(newpage);
1675 *pagep = newpage;
1676 }
1677
1678 ClearPageSwapCache(oldpage);
1679 set_page_private(oldpage, 0);
1680
1681 unlock_page(oldpage);
1682 put_page(oldpage);
1683 put_page(oldpage);
1684 return error;
1685}
1686
1687/*
1688 * Swap in the page pointed to by *pagep.
1689 * Caller has to make sure that *pagep contains a valid swapped page.
1690 * Returns 0 and the page in pagep if success. On failure, returns the
1691 * error code and NULL in *pagep.
1692 */
1693static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1694 struct page **pagep, enum sgp_type sgp,
1695 gfp_t gfp, struct vm_area_struct *vma,
1696 vm_fault_t *fault_type)
1697{
1698 struct address_space *mapping = inode->i_mapping;
1699 struct shmem_inode_info *info = SHMEM_I(inode);
1700 struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
1701 struct page *page;
1702 swp_entry_t swap;
1703 int error;
1704
1705 VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1706 swap = radix_to_swp_entry(*pagep);
1707 *pagep = NULL;
1708
1709 /* Look it up and read it in.. */
1710 page = lookup_swap_cache(swap, NULL, 0);
1711 if (!page) {
1712 /* Or update major stats only when swapin succeeds?? */
1713 if (fault_type) {
1714 *fault_type |= VM_FAULT_MAJOR;
1715 count_vm_event(PGMAJFAULT);
1716 count_memcg_event_mm(charge_mm, PGMAJFAULT);
1717 }
1718 /* Here we actually start the io */
1719 page = shmem_swapin(swap, gfp, info, index);
1720 if (!page) {
1721 error = -ENOMEM;
1722 goto failed;
1723 }
1724 }
1725
1726 /* We have to do this with page locked to prevent races */
1727 lock_page(page);
1728 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1729 !shmem_confirm_swap(mapping, index, swap)) {
1730 error = -EEXIST;
1731 goto unlock;
1732 }
1733 if (!PageUptodate(page)) {
1734 error = -EIO;
1735 goto failed;
1736 }
1737 wait_on_page_writeback(page);
1738
1739 if (shmem_should_replace_page(page, gfp)) {
1740 error = shmem_replace_page(&page, gfp, info, index);
1741 if (error)
1742 goto failed;
1743 }
1744
1745 error = shmem_add_to_page_cache(page, mapping, index,
1746 swp_to_radix_entry(swap), gfp,
1747 charge_mm);
1748 if (error)
1749 goto failed;
1750
1751 spin_lock_irq(&info->lock);
1752 info->swapped--;
1753 shmem_recalc_inode(inode);
1754 spin_unlock_irq(&info->lock);
1755
1756 if (sgp == SGP_WRITE)
1757 mark_page_accessed(page);
1758
1759 delete_from_swap_cache(page);
1760 set_page_dirty(page);
1761 swap_free(swap);
1762
1763 *pagep = page;
1764 return 0;
1765failed:
1766 if (!shmem_confirm_swap(mapping, index, swap))
1767 error = -EEXIST;
1768unlock:
1769 if (page) {
1770 unlock_page(page);
1771 put_page(page);
1772 }
1773
1774 return error;
1775}
1776
1777/*
1778 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1779 *
1780 * If we allocate a new one we do not mark it dirty. That's up to the
1781 * vm. If we swap it in we mark it dirty since we also free the swap
1782 * entry since a page cannot live in both the swap and page cache.
1783 *
1784 * vmf and fault_type are only supplied by shmem_fault:
1785 * otherwise they are NULL.
1786 */
1787static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1788 struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1789 struct vm_area_struct *vma, struct vm_fault *vmf,
1790 vm_fault_t *fault_type)
1791{
1792 struct address_space *mapping = inode->i_mapping;
1793 struct shmem_inode_info *info = SHMEM_I(inode);
1794 struct shmem_sb_info *sbinfo;
1795 struct mm_struct *charge_mm;
1796 struct page *page;
1797 enum sgp_type sgp_huge = sgp;
1798 pgoff_t hindex = index;
1799 int error;
1800 int once = 0;
1801 int alloced = 0;
1802
1803 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1804 return -EFBIG;
1805 if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1806 sgp = SGP_CACHE;
1807repeat:
1808 if (sgp <= SGP_CACHE &&
1809 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1810 return -EINVAL;
1811 }
1812
1813 sbinfo = SHMEM_SB(inode->i_sb);
1814 charge_mm = vma ? vma->vm_mm : current->mm;
1815
1816 page = find_lock_entry(mapping, index);
1817 if (xa_is_value(page)) {
1818 error = shmem_swapin_page(inode, index, &page,
1819 sgp, gfp, vma, fault_type);
1820 if (error == -EEXIST)
1821 goto repeat;
1822
1823 *pagep = page;
1824 return error;
1825 }
1826
1827 if (page && sgp == SGP_WRITE)
1828 mark_page_accessed(page);
1829
1830 /* fallocated page? */
1831 if (page && !PageUptodate(page)) {
1832 if (sgp != SGP_READ)
1833 goto clear;
1834 unlock_page(page);
1835 put_page(page);
1836 page = NULL;
1837 }
1838 if (page || sgp == SGP_READ) {
1839 *pagep = page;
1840 return 0;
1841 }
1842
1843 /*
1844 * Fast cache lookup did not find it:
1845 * bring it back from swap or allocate.
1846 */
1847
1848 if (vma && userfaultfd_missing(vma)) {
1849 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1850 return 0;
1851 }
1852
1853 /* shmem_symlink() */
1854 if (mapping->a_ops != &shmem_aops)
1855 goto alloc_nohuge;
1856 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1857 goto alloc_nohuge;
1858 if (shmem_huge == SHMEM_HUGE_FORCE)
1859 goto alloc_huge;
1860 switch (sbinfo->huge) {
1861 case SHMEM_HUGE_NEVER:
1862 goto alloc_nohuge;
1863 case SHMEM_HUGE_WITHIN_SIZE: {
1864 loff_t i_size;
1865 pgoff_t off;
1866
1867 off = round_up(index, HPAGE_PMD_NR);
1868 i_size = round_up(i_size_read(inode), PAGE_SIZE);
1869 if (i_size >= HPAGE_PMD_SIZE &&
1870 i_size >> PAGE_SHIFT >= off)
1871 goto alloc_huge;
1872
1873 fallthrough;
1874 }
1875 case SHMEM_HUGE_ADVISE:
1876 if (sgp_huge == SGP_HUGE)
1877 goto alloc_huge;
1878 /* TODO: implement fadvise() hints */
1879 goto alloc_nohuge;
1880 }
1881
1882alloc_huge:
1883 page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1884 if (IS_ERR(page)) {
1885alloc_nohuge:
1886 page = shmem_alloc_and_acct_page(gfp, inode,
1887 index, false);
1888 }
1889 if (IS_ERR(page)) {
1890 int retry = 5;
1891
1892 error = PTR_ERR(page);
1893 page = NULL;
1894 if (error != -ENOSPC)
1895 goto unlock;
1896 /*
1897 * Try to reclaim some space by splitting a huge page
1898 * beyond i_size on the filesystem.
1899 */
1900 while (retry--) {
1901 int ret;
1902
1903 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1904 if (ret == SHRINK_STOP)
1905 break;
1906 if (ret)
1907 goto alloc_nohuge;
1908 }
1909 goto unlock;
1910 }
1911
1912 if (PageTransHuge(page))
1913 hindex = round_down(index, HPAGE_PMD_NR);
1914 else
1915 hindex = index;
1916
1917 if (sgp == SGP_WRITE)
1918 __SetPageReferenced(page);
1919
1920 error = shmem_add_to_page_cache(page, mapping, hindex,
1921 NULL, gfp & GFP_RECLAIM_MASK,
1922 charge_mm);
1923 if (error)
1924 goto unacct;
1925 lru_cache_add(page);
1926
1927 spin_lock_irq(&info->lock);
1928 info->alloced += compound_nr(page);
1929 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1930 shmem_recalc_inode(inode);
1931 spin_unlock_irq(&info->lock);
1932 alloced = true;
1933
1934 if (PageTransHuge(page) &&
1935 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1936 hindex + HPAGE_PMD_NR - 1) {
1937 /*
1938 * Part of the huge page is beyond i_size: subject
1939 * to shrink under memory pressure.
1940 */
1941 spin_lock(&sbinfo->shrinklist_lock);
1942 /*
1943 * _careful to defend against unlocked access to
1944 * ->shrink_list in shmem_unused_huge_shrink()
1945 */
1946 if (list_empty_careful(&info->shrinklist)) {
1947 list_add_tail(&info->shrinklist,
1948 &sbinfo->shrinklist);
1949 sbinfo->shrinklist_len++;
1950 }
1951 spin_unlock(&sbinfo->shrinklist_lock);
1952 }
1953
1954 /*
1955 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1956 */
1957 if (sgp == SGP_FALLOC)
1958 sgp = SGP_WRITE;
1959clear:
1960 /*
1961 * Let SGP_WRITE caller clear ends if write does not fill page;
1962 * but SGP_FALLOC on a page fallocated earlier must initialize
1963 * it now, lest undo on failure cancel our earlier guarantee.
1964 */
1965 if (sgp != SGP_WRITE && !PageUptodate(page)) {
1966 struct page *head = compound_head(page);
1967 int i;
1968
1969 for (i = 0; i < compound_nr(head); i++) {
1970 clear_highpage(head + i);
1971 flush_dcache_page(head + i);
1972 }
1973 SetPageUptodate(head);
1974 }
1975
1976 /* Perhaps the file has been truncated since we checked */
1977 if (sgp <= SGP_CACHE &&
1978 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1979 if (alloced) {
1980 ClearPageDirty(page);
1981 delete_from_page_cache(page);
1982 spin_lock_irq(&info->lock);
1983 shmem_recalc_inode(inode);
1984 spin_unlock_irq(&info->lock);
1985 }
1986 error = -EINVAL;
1987 goto unlock;
1988 }
1989 *pagep = page + index - hindex;
1990 return 0;
1991
1992 /*
1993 * Error recovery.
1994 */
1995unacct:
1996 shmem_inode_unacct_blocks(inode, compound_nr(page));
1997
1998 if (PageTransHuge(page)) {
1999 unlock_page(page);
2000 put_page(page);
2001 goto alloc_nohuge;
2002 }
2003unlock:
2004 if (page) {
2005 unlock_page(page);
2006 put_page(page);
2007 }
2008 if (error == -ENOSPC && !once++) {
2009 spin_lock_irq(&info->lock);
2010 shmem_recalc_inode(inode);
2011 spin_unlock_irq(&info->lock);
2012 goto repeat;
2013 }
2014 if (error == -EEXIST)
2015 goto repeat;
2016 return error;
2017}
2018
2019/*
2020 * This is like autoremove_wake_function, but it removes the wait queue
2021 * entry unconditionally - even if something else had already woken the
2022 * target.
2023 */
2024static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2025{
2026 int ret = default_wake_function(wait, mode, sync, key);
2027 list_del_init(&wait->entry);
2028 return ret;
2029}
2030
2031static vm_fault_t shmem_fault(struct vm_fault *vmf)
2032{
2033 struct vm_area_struct *vma = vmf->vma;
2034 struct inode *inode = file_inode(vma->vm_file);
2035 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2036 enum sgp_type sgp;
2037 int err;
2038 vm_fault_t ret = VM_FAULT_LOCKED;
2039
2040 /*
2041 * Trinity finds that probing a hole which tmpfs is punching can
2042 * prevent the hole-punch from ever completing: which in turn
2043 * locks writers out with its hold on i_mutex. So refrain from
2044 * faulting pages into the hole while it's being punched. Although
2045 * shmem_undo_range() does remove the additions, it may be unable to
2046 * keep up, as each new page needs its own unmap_mapping_range() call,
2047 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2048 *
2049 * It does not matter if we sometimes reach this check just before the
2050 * hole-punch begins, so that one fault then races with the punch:
2051 * we just need to make racing faults a rare case.
2052 *
2053 * The implementation below would be much simpler if we just used a
2054 * standard mutex or completion: but we cannot take i_mutex in fault,
2055 * and bloating every shmem inode for this unlikely case would be sad.
2056 */
2057 if (unlikely(inode->i_private)) {
2058 struct shmem_falloc *shmem_falloc;
2059
2060 spin_lock(&inode->i_lock);
2061 shmem_falloc = inode->i_private;
2062 if (shmem_falloc &&
2063 shmem_falloc->waitq &&
2064 vmf->pgoff >= shmem_falloc->start &&
2065 vmf->pgoff < shmem_falloc->next) {
2066 struct file *fpin;
2067 wait_queue_head_t *shmem_falloc_waitq;
2068 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2069
2070 ret = VM_FAULT_NOPAGE;
2071 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2072 if (fpin)
2073 ret = VM_FAULT_RETRY;
2074
2075 shmem_falloc_waitq = shmem_falloc->waitq;
2076 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2077 TASK_UNINTERRUPTIBLE);
2078 spin_unlock(&inode->i_lock);
2079 schedule();
2080
2081 /*
2082 * shmem_falloc_waitq points into the shmem_fallocate()
2083 * stack of the hole-punching task: shmem_falloc_waitq
2084 * is usually invalid by the time we reach here, but
2085 * finish_wait() does not dereference it in that case;
2086 * though i_lock needed lest racing with wake_up_all().
2087 */
2088 spin_lock(&inode->i_lock);
2089 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2090 spin_unlock(&inode->i_lock);
2091
2092 if (fpin)
2093 fput(fpin);
2094 return ret;
2095 }
2096 spin_unlock(&inode->i_lock);
2097 }
2098
2099 sgp = SGP_CACHE;
2100
2101 if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2102 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2103 sgp = SGP_NOHUGE;
2104 else if (vma->vm_flags & VM_HUGEPAGE)
2105 sgp = SGP_HUGE;
2106
2107 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2108 gfp, vma, vmf, &ret);
2109 if (err)
2110 return vmf_error(err);
2111 return ret;
2112}
2113
2114unsigned long shmem_get_unmapped_area(struct file *file,
2115 unsigned long uaddr, unsigned long len,
2116 unsigned long pgoff, unsigned long flags)
2117{
2118 unsigned long (*get_area)(struct file *,
2119 unsigned long, unsigned long, unsigned long, unsigned long);
2120 unsigned long addr;
2121 unsigned long offset;
2122 unsigned long inflated_len;
2123 unsigned long inflated_addr;
2124 unsigned long inflated_offset;
2125
2126 if (len > TASK_SIZE)
2127 return -ENOMEM;
2128
2129 get_area = current->mm->get_unmapped_area;
2130 addr = get_area(file, uaddr, len, pgoff, flags);
2131
2132 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2133 return addr;
2134 if (IS_ERR_VALUE(addr))
2135 return addr;
2136 if (addr & ~PAGE_MASK)
2137 return addr;
2138 if (addr > TASK_SIZE - len)
2139 return addr;
2140
2141 if (shmem_huge == SHMEM_HUGE_DENY)
2142 return addr;
2143 if (len < HPAGE_PMD_SIZE)
2144 return addr;
2145 if (flags & MAP_FIXED)
2146 return addr;
2147 /*
2148 * Our priority is to support MAP_SHARED mapped hugely;
2149 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2150 * But if caller specified an address hint and we allocated area there
2151 * successfully, respect that as before.
2152 */
2153 if (uaddr == addr)
2154 return addr;
2155
2156 if (shmem_huge != SHMEM_HUGE_FORCE) {
2157 struct super_block *sb;
2158
2159 if (file) {
2160 VM_BUG_ON(file->f_op != &shmem_file_operations);
2161 sb = file_inode(file)->i_sb;
2162 } else {
2163 /*
2164 * Called directly from mm/mmap.c, or drivers/char/mem.c
2165 * for "/dev/zero", to create a shared anonymous object.
2166 */
2167 if (IS_ERR(shm_mnt))
2168 return addr;
2169 sb = shm_mnt->mnt_sb;
2170 }
2171 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2172 return addr;
2173 }
2174
2175 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2176 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2177 return addr;
2178 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2179 return addr;
2180
2181 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2182 if (inflated_len > TASK_SIZE)
2183 return addr;
2184 if (inflated_len < len)
2185 return addr;
2186
2187 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2188 if (IS_ERR_VALUE(inflated_addr))
2189 return addr;
2190 if (inflated_addr & ~PAGE_MASK)
2191 return addr;
2192
2193 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2194 inflated_addr += offset - inflated_offset;
2195 if (inflated_offset > offset)
2196 inflated_addr += HPAGE_PMD_SIZE;
2197
2198 if (inflated_addr > TASK_SIZE - len)
2199 return addr;
2200 return inflated_addr;
2201}
2202
2203#ifdef CONFIG_NUMA
2204static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2205{
2206 struct inode *inode = file_inode(vma->vm_file);
2207 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2208}
2209
2210static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2211 unsigned long addr)
2212{
2213 struct inode *inode = file_inode(vma->vm_file);
2214 pgoff_t index;
2215
2216 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2217 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2218}
2219#endif
2220
2221int shmem_lock(struct file *file, int lock, struct user_struct *user)
2222{
2223 struct inode *inode = file_inode(file);
2224 struct shmem_inode_info *info = SHMEM_I(inode);
2225 int retval = -ENOMEM;
2226
2227 /*
2228 * What serializes the accesses to info->flags?
2229 * ipc_lock_object() when called from shmctl_do_lock(),
2230 * no serialization needed when called from shm_destroy().
2231 */
2232 if (lock && !(info->flags & VM_LOCKED)) {
2233 if (!user_shm_lock(inode->i_size, user))
2234 goto out_nomem;
2235 info->flags |= VM_LOCKED;
2236 mapping_set_unevictable(file->f_mapping);
2237 }
2238 if (!lock && (info->flags & VM_LOCKED) && user) {
2239 user_shm_unlock(inode->i_size, user);
2240 info->flags &= ~VM_LOCKED;
2241 mapping_clear_unevictable(file->f_mapping);
2242 }
2243 retval = 0;
2244
2245out_nomem:
2246 return retval;
2247}
2248
2249static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2250{
2251 struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2252
2253 if (info->seals & F_SEAL_FUTURE_WRITE) {
2254 /*
2255 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
2256 * "future write" seal active.
2257 */
2258 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
2259 return -EPERM;
2260
2261 /*
2262 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
2263 * MAP_SHARED and read-only, take care to not allow mprotect to
2264 * revert protections on such mappings. Do this only for shared
2265 * mappings. For private mappings, don't need to mask
2266 * VM_MAYWRITE as we still want them to be COW-writable.
2267 */
2268 if (vma->vm_flags & VM_SHARED)
2269 vma->vm_flags &= ~(VM_MAYWRITE);
2270 }
2271
2272 file_accessed(file);
2273 vma->vm_ops = &shmem_vm_ops;
2274 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2275 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2276 (vma->vm_end & HPAGE_PMD_MASK)) {
2277 khugepaged_enter(vma, vma->vm_flags);
2278 }
2279 return 0;
2280}
2281
2282static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2283 umode_t mode, dev_t dev, unsigned long flags)
2284{
2285 struct inode *inode;
2286 struct shmem_inode_info *info;
2287 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2288 ino_t ino;
2289
2290 if (shmem_reserve_inode(sb, &ino))
2291 return NULL;
2292
2293 inode = new_inode(sb);
2294 if (inode) {
2295 inode->i_ino = ino;
2296 inode_init_owner(inode, dir, mode);
2297 inode->i_blocks = 0;
2298 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2299 inode->i_generation = prandom_u32();
2300 info = SHMEM_I(inode);
2301 memset(info, 0, (char *)inode - (char *)info);
2302 spin_lock_init(&info->lock);
2303 atomic_set(&info->stop_eviction, 0);
2304 info->seals = F_SEAL_SEAL;
2305 info->flags = flags & VM_NORESERVE;
2306 INIT_LIST_HEAD(&info->shrinklist);
2307 INIT_LIST_HEAD(&info->swaplist);
2308 simple_xattrs_init(&info->xattrs);
2309 cache_no_acl(inode);
2310
2311 switch (mode & S_IFMT) {
2312 default:
2313 inode->i_op = &shmem_special_inode_operations;
2314 init_special_inode(inode, mode, dev);
2315 break;
2316 case S_IFREG:
2317 inode->i_mapping->a_ops = &shmem_aops;
2318 inode->i_op = &shmem_inode_operations;
2319 inode->i_fop = &shmem_file_operations;
2320 mpol_shared_policy_init(&info->policy,
2321 shmem_get_sbmpol(sbinfo));
2322 break;
2323 case S_IFDIR:
2324 inc_nlink(inode);
2325 /* Some things misbehave if size == 0 on a directory */
2326 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2327 inode->i_op = &shmem_dir_inode_operations;
2328 inode->i_fop = &simple_dir_operations;
2329 break;
2330 case S_IFLNK:
2331 /*
2332 * Must not load anything in the rbtree,
2333 * mpol_free_shared_policy will not be called.
2334 */
2335 mpol_shared_policy_init(&info->policy, NULL);
2336 break;
2337 }
2338
2339 lockdep_annotate_inode_mutex_key(inode);
2340 } else
2341 shmem_free_inode(sb);
2342 return inode;
2343}
2344
2345bool shmem_mapping(struct address_space *mapping)
2346{
2347 return mapping->a_ops == &shmem_aops;
2348}
2349
2350static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2351 pmd_t *dst_pmd,
2352 struct vm_area_struct *dst_vma,
2353 unsigned long dst_addr,
2354 unsigned long src_addr,
2355 bool zeropage,
2356 struct page **pagep)
2357{
2358 struct inode *inode = file_inode(dst_vma->vm_file);
2359 struct shmem_inode_info *info = SHMEM_I(inode);
2360 struct address_space *mapping = inode->i_mapping;
2361 gfp_t gfp = mapping_gfp_mask(mapping);
2362 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2363 spinlock_t *ptl;
2364 void *page_kaddr;
2365 struct page *page;
2366 pte_t _dst_pte, *dst_pte;
2367 int ret;
2368 pgoff_t offset, max_off;
2369
2370 ret = -ENOMEM;
2371 if (!shmem_inode_acct_block(inode, 1))
2372 goto out;
2373
2374 if (!*pagep) {
2375 page = shmem_alloc_page(gfp, info, pgoff);
2376 if (!page)
2377 goto out_unacct_blocks;
2378
2379 if (!zeropage) { /* mcopy_atomic */
2380 page_kaddr = kmap_atomic(page);
2381 ret = copy_from_user(page_kaddr,
2382 (const void __user *)src_addr,
2383 PAGE_SIZE);
2384 kunmap_atomic(page_kaddr);
2385
2386 /* fallback to copy_from_user outside mmap_lock */
2387 if (unlikely(ret)) {
2388 *pagep = page;
2389 shmem_inode_unacct_blocks(inode, 1);
2390 /* don't free the page */
2391 return -ENOENT;
2392 }
2393 } else { /* mfill_zeropage_atomic */
2394 clear_highpage(page);
2395 }
2396 } else {
2397 page = *pagep;
2398 *pagep = NULL;
2399 }
2400
2401 VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
2402 __SetPageLocked(page);
2403 __SetPageSwapBacked(page);
2404 __SetPageUptodate(page);
2405
2406 ret = -EFAULT;
2407 offset = linear_page_index(dst_vma, dst_addr);
2408 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2409 if (unlikely(offset >= max_off))
2410 goto out_release;
2411
2412 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2413 gfp & GFP_RECLAIM_MASK, dst_mm);
2414 if (ret)
2415 goto out_release;
2416
2417 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2418 if (dst_vma->vm_flags & VM_WRITE)
2419 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2420 else {
2421 /*
2422 * We don't set the pte dirty if the vma has no
2423 * VM_WRITE permission, so mark the page dirty or it
2424 * could be freed from under us. We could do it
2425 * unconditionally before unlock_page(), but doing it
2426 * only if VM_WRITE is not set is faster.
2427 */
2428 set_page_dirty(page);
2429 }
2430
2431 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2432
2433 ret = -EFAULT;
2434 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2435 if (unlikely(offset >= max_off))
2436 goto out_release_unlock;
2437
2438 ret = -EEXIST;
2439 if (!pte_none(*dst_pte))
2440 goto out_release_unlock;
2441
2442 lru_cache_add(page);
2443
2444 spin_lock_irq(&info->lock);
2445 info->alloced++;
2446 inode->i_blocks += BLOCKS_PER_PAGE;
2447 shmem_recalc_inode(inode);
2448 spin_unlock_irq(&info->lock);
2449
2450 inc_mm_counter(dst_mm, mm_counter_file(page));
2451 page_add_file_rmap(page, false);
2452 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
2453
2454 /* No need to invalidate - it was non-present before */
2455 update_mmu_cache(dst_vma, dst_addr, dst_pte);
2456 pte_unmap_unlock(dst_pte, ptl);
2457 unlock_page(page);
2458 ret = 0;
2459out:
2460 return ret;
2461out_release_unlock:
2462 pte_unmap_unlock(dst_pte, ptl);
2463 ClearPageDirty(page);
2464 delete_from_page_cache(page);
2465out_release:
2466 unlock_page(page);
2467 put_page(page);
2468out_unacct_blocks:
2469 shmem_inode_unacct_blocks(inode, 1);
2470 goto out;
2471}
2472
2473int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
2474 pmd_t *dst_pmd,
2475 struct vm_area_struct *dst_vma,
2476 unsigned long dst_addr,
2477 unsigned long src_addr,
2478 struct page **pagep)
2479{
2480 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2481 dst_addr, src_addr, false, pagep);
2482}
2483
2484int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
2485 pmd_t *dst_pmd,
2486 struct vm_area_struct *dst_vma,
2487 unsigned long dst_addr)
2488{
2489 struct page *page = NULL;
2490
2491 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2492 dst_addr, 0, true, &page);
2493}
2494
2495#ifdef CONFIG_TMPFS
2496static const struct inode_operations shmem_symlink_inode_operations;
2497static const struct inode_operations shmem_short_symlink_operations;
2498
2499#ifdef CONFIG_TMPFS_XATTR
2500static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2501#else
2502#define shmem_initxattrs NULL
2503#endif
2504
2505static int
2506shmem_write_begin(struct file *file, struct address_space *mapping,
2507 loff_t pos, unsigned len, unsigned flags,
2508 struct page **pagep, void **fsdata)
2509{
2510 struct inode *inode = mapping->host;
2511 struct shmem_inode_info *info = SHMEM_I(inode);
2512 pgoff_t index = pos >> PAGE_SHIFT;
2513
2514 /* i_mutex is held by caller */
2515 if (unlikely(info->seals & (F_SEAL_GROW |
2516 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2517 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2518 return -EPERM;
2519 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2520 return -EPERM;
2521 }
2522
2523 return shmem_getpage(inode, index, pagep, SGP_WRITE);
2524}
2525
2526static int
2527shmem_write_end(struct file *file, struct address_space *mapping,
2528 loff_t pos, unsigned len, unsigned copied,
2529 struct page *page, void *fsdata)
2530{
2531 struct inode *inode = mapping->host;
2532
2533 if (pos + copied > inode->i_size)
2534 i_size_write(inode, pos + copied);
2535
2536 if (!PageUptodate(page)) {
2537 struct page *head = compound_head(page);
2538 if (PageTransCompound(page)) {
2539 int i;
2540
2541 for (i = 0; i < HPAGE_PMD_NR; i++) {
2542 if (head + i == page)
2543 continue;
2544 clear_highpage(head + i);
2545 flush_dcache_page(head + i);
2546 }
2547 }
2548 if (copied < PAGE_SIZE) {
2549 unsigned from = pos & (PAGE_SIZE - 1);
2550 zero_user_segments(page, 0, from,
2551 from + copied, PAGE_SIZE);
2552 }
2553 SetPageUptodate(head);
2554 }
2555 set_page_dirty(page);
2556 unlock_page(page);
2557 put_page(page);
2558
2559 return copied;
2560}
2561
2562static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2563{
2564 struct file *file = iocb->ki_filp;
2565 struct inode *inode = file_inode(file);
2566 struct address_space *mapping = inode->i_mapping;
2567 pgoff_t index;
2568 unsigned long offset;
2569 enum sgp_type sgp = SGP_READ;
2570 int error = 0;
2571 ssize_t retval = 0;
2572 loff_t *ppos = &iocb->ki_pos;
2573
2574 /*
2575 * Might this read be for a stacking filesystem? Then when reading
2576 * holes of a sparse file, we actually need to allocate those pages,
2577 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2578 */
2579 if (!iter_is_iovec(to))
2580 sgp = SGP_CACHE;
2581
2582 index = *ppos >> PAGE_SHIFT;
2583 offset = *ppos & ~PAGE_MASK;
2584
2585 for (;;) {
2586 struct page *page = NULL;
2587 pgoff_t end_index;
2588 unsigned long nr, ret;
2589 loff_t i_size = i_size_read(inode);
2590
2591 end_index = i_size >> PAGE_SHIFT;
2592 if (index > end_index)
2593 break;
2594 if (index == end_index) {
2595 nr = i_size & ~PAGE_MASK;
2596 if (nr <= offset)
2597 break;
2598 }
2599
2600 error = shmem_getpage(inode, index, &page, sgp);
2601 if (error) {
2602 if (error == -EINVAL)
2603 error = 0;
2604 break;
2605 }
2606 if (page) {
2607 if (sgp == SGP_CACHE)
2608 set_page_dirty(page);
2609 unlock_page(page);
2610 }
2611
2612 /*
2613 * We must evaluate after, since reads (unlike writes)
2614 * are called without i_mutex protection against truncate
2615 */
2616 nr = PAGE_SIZE;
2617 i_size = i_size_read(inode);
2618 end_index = i_size >> PAGE_SHIFT;
2619 if (index == end_index) {
2620 nr = i_size & ~PAGE_MASK;
2621 if (nr <= offset) {
2622 if (page)
2623 put_page(page);
2624 break;
2625 }
2626 }
2627 nr -= offset;
2628
2629 if (page) {
2630 /*
2631 * If users can be writing to this page using arbitrary
2632 * virtual addresses, take care about potential aliasing
2633 * before reading the page on the kernel side.
2634 */
2635 if (mapping_writably_mapped(mapping))
2636 flush_dcache_page(page);
2637 /*
2638 * Mark the page accessed if we read the beginning.
2639 */
2640 if (!offset)
2641 mark_page_accessed(page);
2642 } else {
2643 page = ZERO_PAGE(0);
2644 get_page(page);
2645 }
2646
2647 /*
2648 * Ok, we have the page, and it's up-to-date, so
2649 * now we can copy it to user space...
2650 */
2651 ret = copy_page_to_iter(page, offset, nr, to);
2652 retval += ret;
2653 offset += ret;
2654 index += offset >> PAGE_SHIFT;
2655 offset &= ~PAGE_MASK;
2656
2657 put_page(page);
2658 if (!iov_iter_count(to))
2659 break;
2660 if (ret < nr) {
2661 error = -EFAULT;
2662 break;
2663 }
2664 cond_resched();
2665 }
2666
2667 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2668 file_accessed(file);
2669 return retval ? retval : error;
2670}
2671
2672/*
2673 * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2674 */
2675static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2676 pgoff_t index, pgoff_t end, int whence)
2677{
2678 struct page *page;
2679 struct pagevec pvec;
2680 pgoff_t indices[PAGEVEC_SIZE];
2681 bool done = false;
2682 int i;
2683
2684 pagevec_init(&pvec);
2685 pvec.nr = 1; /* start small: we may be there already */
2686 while (!done) {
2687 pvec.nr = find_get_entries(mapping, index,
2688 pvec.nr, pvec.pages, indices);
2689 if (!pvec.nr) {
2690 if (whence == SEEK_DATA)
2691 index = end;
2692 break;
2693 }
2694 for (i = 0; i < pvec.nr; i++, index++) {
2695 if (index < indices[i]) {
2696 if (whence == SEEK_HOLE) {
2697 done = true;
2698 break;
2699 }
2700 index = indices[i];
2701 }
2702 page = pvec.pages[i];
2703 if (page && !xa_is_value(page)) {
2704 if (!PageUptodate(page))
2705 page = NULL;
2706 }
2707 if (index >= end ||
2708 (page && whence == SEEK_DATA) ||
2709 (!page && whence == SEEK_HOLE)) {
2710 done = true;
2711 break;
2712 }
2713 }
2714 pagevec_remove_exceptionals(&pvec);
2715 pagevec_release(&pvec);
2716 pvec.nr = PAGEVEC_SIZE;
2717 cond_resched();
2718 }
2719 return index;
2720}
2721
2722static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2723{
2724 struct address_space *mapping = file->f_mapping;
2725 struct inode *inode = mapping->host;
2726 pgoff_t start, end;
2727 loff_t new_offset;
2728
2729 if (whence != SEEK_DATA && whence != SEEK_HOLE)
2730 return generic_file_llseek_size(file, offset, whence,
2731 MAX_LFS_FILESIZE, i_size_read(inode));
2732 inode_lock(inode);
2733 /* We're holding i_mutex so we can access i_size directly */
2734
2735 if (offset < 0 || offset >= inode->i_size)
2736 offset = -ENXIO;
2737 else {
2738 start = offset >> PAGE_SHIFT;
2739 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2740 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2741 new_offset <<= PAGE_SHIFT;
2742 if (new_offset > offset) {
2743 if (new_offset < inode->i_size)
2744 offset = new_offset;
2745 else if (whence == SEEK_DATA)
2746 offset = -ENXIO;
2747 else
2748 offset = inode->i_size;
2749 }
2750 }
2751
2752 if (offset >= 0)
2753 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2754 inode_unlock(inode);
2755 return offset;
2756}
2757
2758static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2759 loff_t len)
2760{
2761 struct inode *inode = file_inode(file);
2762 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2763 struct shmem_inode_info *info = SHMEM_I(inode);
2764 struct shmem_falloc shmem_falloc;
2765 pgoff_t start, index, end;
2766 int error;
2767
2768 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2769 return -EOPNOTSUPP;
2770
2771 inode_lock(inode);
2772
2773 if (mode & FALLOC_FL_PUNCH_HOLE) {
2774 struct address_space *mapping = file->f_mapping;
2775 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2776 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2777 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2778
2779 /* protected by i_mutex */
2780 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2781 error = -EPERM;
2782 goto out;
2783 }
2784
2785 shmem_falloc.waitq = &shmem_falloc_waitq;
2786 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2787 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2788 spin_lock(&inode->i_lock);
2789 inode->i_private = &shmem_falloc;
2790 spin_unlock(&inode->i_lock);
2791
2792 if ((u64)unmap_end > (u64)unmap_start)
2793 unmap_mapping_range(mapping, unmap_start,
2794 1 + unmap_end - unmap_start, 0);
2795 shmem_truncate_range(inode, offset, offset + len - 1);
2796 /* No need to unmap again: hole-punching leaves COWed pages */
2797
2798 spin_lock(&inode->i_lock);
2799 inode->i_private = NULL;
2800 wake_up_all(&shmem_falloc_waitq);
2801 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2802 spin_unlock(&inode->i_lock);
2803 error = 0;
2804 goto out;
2805 }
2806
2807 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2808 error = inode_newsize_ok(inode, offset + len);
2809 if (error)
2810 goto out;
2811
2812 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2813 error = -EPERM;
2814 goto out;
2815 }
2816
2817 start = offset >> PAGE_SHIFT;
2818 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2819 /* Try to avoid a swapstorm if len is impossible to satisfy */
2820 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2821 error = -ENOSPC;
2822 goto out;
2823 }
2824
2825 shmem_falloc.waitq = NULL;
2826 shmem_falloc.start = start;
2827 shmem_falloc.next = start;
2828 shmem_falloc.nr_falloced = 0;
2829 shmem_falloc.nr_unswapped = 0;
2830 spin_lock(&inode->i_lock);
2831 inode->i_private = &shmem_falloc;
2832 spin_unlock(&inode->i_lock);
2833
2834 for (index = start; index < end; index++) {
2835 struct page *page;
2836
2837 /*
2838 * Good, the fallocate(2) manpage permits EINTR: we may have
2839 * been interrupted because we are using up too much memory.
2840 */
2841 if (signal_pending(current))
2842 error = -EINTR;
2843 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2844 error = -ENOMEM;
2845 else
2846 error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2847 if (error) {
2848 /* Remove the !PageUptodate pages we added */
2849 if (index > start) {
2850 shmem_undo_range(inode,
2851 (loff_t)start << PAGE_SHIFT,
2852 ((loff_t)index << PAGE_SHIFT) - 1, true);
2853 }
2854 goto undone;
2855 }
2856
2857 /*
2858 * Inform shmem_writepage() how far we have reached.
2859 * No need for lock or barrier: we have the page lock.
2860 */
2861 shmem_falloc.next++;
2862 if (!PageUptodate(page))
2863 shmem_falloc.nr_falloced++;
2864
2865 /*
2866 * If !PageUptodate, leave it that way so that freeable pages
2867 * can be recognized if we need to rollback on error later.
2868 * But set_page_dirty so that memory pressure will swap rather
2869 * than free the pages we are allocating (and SGP_CACHE pages
2870 * might still be clean: we now need to mark those dirty too).
2871 */
2872 set_page_dirty(page);
2873 unlock_page(page);
2874 put_page(page);
2875 cond_resched();
2876 }
2877
2878 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2879 i_size_write(inode, offset + len);
2880 inode->i_ctime = current_time(inode);
2881undone:
2882 spin_lock(&inode->i_lock);
2883 inode->i_private = NULL;
2884 spin_unlock(&inode->i_lock);
2885out:
2886 inode_unlock(inode);
2887 return error;
2888}
2889
2890static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2891{
2892 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2893
2894 buf->f_type = TMPFS_MAGIC;
2895 buf->f_bsize = PAGE_SIZE;
2896 buf->f_namelen = NAME_MAX;
2897 if (sbinfo->max_blocks) {
2898 buf->f_blocks = sbinfo->max_blocks;
2899 buf->f_bavail =
2900 buf->f_bfree = sbinfo->max_blocks -
2901 percpu_counter_sum(&sbinfo->used_blocks);
2902 }
2903 if (sbinfo->max_inodes) {
2904 buf->f_files = sbinfo->max_inodes;
2905 buf->f_ffree = sbinfo->free_inodes;
2906 }
2907 /* else leave those fields 0 like simple_statfs */
2908 return 0;
2909}
2910
2911/*
2912 * File creation. Allocate an inode, and we're done..
2913 */
2914static int
2915shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2916{
2917 struct inode *inode;
2918 int error = -ENOSPC;
2919
2920 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2921 if (inode) {
2922 error = simple_acl_create(dir, inode);
2923 if (error)
2924 goto out_iput;
2925 error = security_inode_init_security(inode, dir,
2926 &dentry->d_name,
2927 shmem_initxattrs, NULL);
2928 if (error && error != -EOPNOTSUPP)
2929 goto out_iput;
2930
2931 error = 0;
2932 dir->i_size += BOGO_DIRENT_SIZE;
2933 dir->i_ctime = dir->i_mtime = current_time(dir);
2934 d_instantiate(dentry, inode);
2935 dget(dentry); /* Extra count - pin the dentry in core */
2936 }
2937 return error;
2938out_iput:
2939 iput(inode);
2940 return error;
2941}
2942
2943static int
2944shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2945{
2946 struct inode *inode;
2947 int error = -ENOSPC;
2948
2949 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2950 if (inode) {
2951 error = security_inode_init_security(inode, dir,
2952 NULL,
2953 shmem_initxattrs, NULL);
2954 if (error && error != -EOPNOTSUPP)
2955 goto out_iput;
2956 error = simple_acl_create(dir, inode);
2957 if (error)
2958 goto out_iput;
2959 d_tmpfile(dentry, inode);
2960 }
2961 return error;
2962out_iput:
2963 iput(inode);
2964 return error;
2965}
2966
2967static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2968{
2969 int error;
2970
2971 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2972 return error;
2973 inc_nlink(dir);
2974 return 0;
2975}
2976
2977static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2978 bool excl)
2979{
2980 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2981}
2982
2983/*
2984 * Link a file..
2985 */
2986static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2987{
2988 struct inode *inode = d_inode(old_dentry);
2989 int ret = 0;
2990
2991 /*
2992 * No ordinary (disk based) filesystem counts links as inodes;
2993 * but each new link needs a new dentry, pinning lowmem, and
2994 * tmpfs dentries cannot be pruned until they are unlinked.
2995 * But if an O_TMPFILE file is linked into the tmpfs, the
2996 * first link must skip that, to get the accounting right.
2997 */
2998 if (inode->i_nlink) {
2999 ret = shmem_reserve_inode(inode->i_sb, NULL);
3000 if (ret)
3001 goto out;
3002 }
3003
3004 dir->i_size += BOGO_DIRENT_SIZE;
3005 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3006 inc_nlink(inode);
3007 ihold(inode); /* New dentry reference */
3008 dget(dentry); /* Extra pinning count for the created dentry */
3009 d_instantiate(dentry, inode);
3010out:
3011 return ret;
3012}
3013
3014static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3015{
3016 struct inode *inode = d_inode(dentry);
3017
3018 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3019 shmem_free_inode(inode->i_sb);
3020
3021 dir->i_size -= BOGO_DIRENT_SIZE;
3022 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3023 drop_nlink(inode);
3024 dput(dentry); /* Undo the count from "create" - this does all the work */
3025 return 0;
3026}
3027
3028static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3029{
3030 if (!simple_empty(dentry))
3031 return -ENOTEMPTY;
3032
3033 drop_nlink(d_inode(dentry));
3034 drop_nlink(dir);
3035 return shmem_unlink(dir, dentry);
3036}
3037
3038static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
3039{
3040 bool old_is_dir = d_is_dir(old_dentry);
3041 bool new_is_dir = d_is_dir(new_dentry);
3042
3043 if (old_dir != new_dir && old_is_dir != new_is_dir) {
3044 if (old_is_dir) {
3045 drop_nlink(old_dir);
3046 inc_nlink(new_dir);
3047 } else {
3048 drop_nlink(new_dir);
3049 inc_nlink(old_dir);
3050 }
3051 }
3052 old_dir->i_ctime = old_dir->i_mtime =
3053 new_dir->i_ctime = new_dir->i_mtime =
3054 d_inode(old_dentry)->i_ctime =
3055 d_inode(new_dentry)->i_ctime = current_time(old_dir);
3056
3057 return 0;
3058}
3059
3060static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
3061{
3062 struct dentry *whiteout;
3063 int error;
3064
3065 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3066 if (!whiteout)
3067 return -ENOMEM;
3068
3069 error = shmem_mknod(old_dir, whiteout,
3070 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3071 dput(whiteout);
3072 if (error)
3073 return error;
3074
3075 /*
3076 * Cheat and hash the whiteout while the old dentry is still in
3077 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3078 *
3079 * d_lookup() will consistently find one of them at this point,
3080 * not sure which one, but that isn't even important.
3081 */
3082 d_rehash(whiteout);
3083 return 0;
3084}
3085
3086/*
3087 * The VFS layer already does all the dentry stuff for rename,
3088 * we just have to decrement the usage count for the target if
3089 * it exists so that the VFS layer correctly free's it when it
3090 * gets overwritten.
3091 */
3092static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
3093{
3094 struct inode *inode = d_inode(old_dentry);
3095 int they_are_dirs = S_ISDIR(inode->i_mode);
3096
3097 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3098 return -EINVAL;
3099
3100 if (flags & RENAME_EXCHANGE)
3101 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3102
3103 if (!simple_empty(new_dentry))
3104 return -ENOTEMPTY;
3105
3106 if (flags & RENAME_WHITEOUT) {
3107 int error;
3108
3109 error = shmem_whiteout(old_dir, old_dentry);
3110 if (error)
3111 return error;
3112 }
3113
3114 if (d_really_is_positive(new_dentry)) {
3115 (void) shmem_unlink(new_dir, new_dentry);
3116 if (they_are_dirs) {
3117 drop_nlink(d_inode(new_dentry));
3118 drop_nlink(old_dir);
3119 }
3120 } else if (they_are_dirs) {
3121 drop_nlink(old_dir);
3122 inc_nlink(new_dir);
3123 }
3124
3125 old_dir->i_size -= BOGO_DIRENT_SIZE;
3126 new_dir->i_size += BOGO_DIRENT_SIZE;
3127 old_dir->i_ctime = old_dir->i_mtime =
3128 new_dir->i_ctime = new_dir->i_mtime =
3129 inode->i_ctime = current_time(old_dir);
3130 return 0;
3131}
3132
3133static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3134{
3135 int error;
3136 int len;
3137 struct inode *inode;
3138 struct page *page;
3139
3140 len = strlen(symname) + 1;
3141 if (len > PAGE_SIZE)
3142 return -ENAMETOOLONG;
3143
3144 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3145 VM_NORESERVE);
3146 if (!inode)
3147 return -ENOSPC;
3148
3149 error = security_inode_init_security(inode, dir, &dentry->d_name,
3150 shmem_initxattrs, NULL);
3151 if (error && error != -EOPNOTSUPP) {
3152 iput(inode);
3153 return error;
3154 }
3155
3156 inode->i_size = len-1;
3157 if (len <= SHORT_SYMLINK_LEN) {
3158 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3159 if (!inode->i_link) {
3160 iput(inode);
3161 return -ENOMEM;
3162 }
3163 inode->i_op = &shmem_short_symlink_operations;
3164 } else {
3165 inode_nohighmem(inode);
3166 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3167 if (error) {
3168 iput(inode);
3169 return error;
3170 }
3171 inode->i_mapping->a_ops = &shmem_aops;
3172 inode->i_op = &shmem_symlink_inode_operations;
3173 memcpy(page_address(page), symname, len);
3174 SetPageUptodate(page);
3175 set_page_dirty(page);
3176 unlock_page(page);
3177 put_page(page);
3178 }
3179 dir->i_size += BOGO_DIRENT_SIZE;
3180 dir->i_ctime = dir->i_mtime = current_time(dir);
3181 d_instantiate(dentry, inode);
3182 dget(dentry);
3183 return 0;
3184}
3185
3186static void shmem_put_link(void *arg)
3187{
3188 mark_page_accessed(arg);
3189 put_page(arg);
3190}
3191
3192static const char *shmem_get_link(struct dentry *dentry,
3193 struct inode *inode,
3194 struct delayed_call *done)
3195{
3196 struct page *page = NULL;
3197 int error;
3198 if (!dentry) {
3199 page = find_get_page(inode->i_mapping, 0);
3200 if (!page)
3201 return ERR_PTR(-ECHILD);
3202 if (!PageUptodate(page)) {
3203 put_page(page);
3204 return ERR_PTR(-ECHILD);
3205 }
3206 } else {
3207 error = shmem_getpage(inode, 0, &page, SGP_READ);
3208 if (error)
3209 return ERR_PTR(error);
3210 unlock_page(page);
3211 }
3212 set_delayed_call(done, shmem_put_link, page);
3213 return page_address(page);
3214}
3215
3216#ifdef CONFIG_TMPFS_XATTR
3217/*
3218 * Superblocks without xattr inode operations may get some security.* xattr
3219 * support from the LSM "for free". As soon as we have any other xattrs
3220 * like ACLs, we also need to implement the security.* handlers at
3221 * filesystem level, though.
3222 */
3223
3224/*
3225 * Callback for security_inode_init_security() for acquiring xattrs.
3226 */
3227static int shmem_initxattrs(struct inode *inode,
3228 const struct xattr *xattr_array,
3229 void *fs_info)
3230{
3231 struct shmem_inode_info *info = SHMEM_I(inode);
3232 const struct xattr *xattr;
3233 struct simple_xattr *new_xattr;
3234 size_t len;
3235
3236 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3237 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3238 if (!new_xattr)
3239 return -ENOMEM;
3240
3241 len = strlen(xattr->name) + 1;
3242 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3243 GFP_KERNEL);
3244 if (!new_xattr->name) {
3245 kvfree(new_xattr);
3246 return -ENOMEM;
3247 }
3248
3249 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3250 XATTR_SECURITY_PREFIX_LEN);
3251 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3252 xattr->name, len);
3253
3254 simple_xattr_list_add(&info->xattrs, new_xattr);
3255 }
3256
3257 return 0;
3258}
3259
3260static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3261 struct dentry *unused, struct inode *inode,
3262 const char *name, void *buffer, size_t size)
3263{
3264 struct shmem_inode_info *info = SHMEM_I(inode);
3265
3266 name = xattr_full_name(handler, name);
3267 return simple_xattr_get(&info->xattrs, name, buffer, size);
3268}
3269
3270static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3271 struct dentry *unused, struct inode *inode,
3272 const char *name, const void *value,
3273 size_t size, int flags)
3274{
3275 struct shmem_inode_info *info = SHMEM_I(inode);
3276
3277 name = xattr_full_name(handler, name);
3278 return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3279}
3280
3281static const struct xattr_handler shmem_security_xattr_handler = {
3282 .prefix = XATTR_SECURITY_PREFIX,
3283 .get = shmem_xattr_handler_get,
3284 .set = shmem_xattr_handler_set,
3285};
3286
3287static const struct xattr_handler shmem_trusted_xattr_handler = {
3288 .prefix = XATTR_TRUSTED_PREFIX,
3289 .get = shmem_xattr_handler_get,
3290 .set = shmem_xattr_handler_set,
3291};
3292
3293static const struct xattr_handler *shmem_xattr_handlers[] = {
3294#ifdef CONFIG_TMPFS_POSIX_ACL
3295 &posix_acl_access_xattr_handler,
3296 &posix_acl_default_xattr_handler,
3297#endif
3298 &shmem_security_xattr_handler,
3299 &shmem_trusted_xattr_handler,
3300 NULL
3301};
3302
3303static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3304{
3305 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3306 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3307}
3308#endif /* CONFIG_TMPFS_XATTR */
3309
3310static const struct inode_operations shmem_short_symlink_operations = {
3311 .get_link = simple_get_link,
3312#ifdef CONFIG_TMPFS_XATTR
3313 .listxattr = shmem_listxattr,
3314#endif
3315};
3316
3317static const struct inode_operations shmem_symlink_inode_operations = {
3318 .get_link = shmem_get_link,
3319#ifdef CONFIG_TMPFS_XATTR
3320 .listxattr = shmem_listxattr,
3321#endif
3322};
3323
3324static struct dentry *shmem_get_parent(struct dentry *child)
3325{
3326 return ERR_PTR(-ESTALE);
3327}
3328
3329static int shmem_match(struct inode *ino, void *vfh)
3330{
3331 __u32 *fh = vfh;
3332 __u64 inum = fh[2];
3333 inum = (inum << 32) | fh[1];
3334 return ino->i_ino == inum && fh[0] == ino->i_generation;
3335}
3336
3337/* Find any alias of inode, but prefer a hashed alias */
3338static struct dentry *shmem_find_alias(struct inode *inode)
3339{
3340 struct dentry *alias = d_find_alias(inode);
3341
3342 return alias ?: d_find_any_alias(inode);
3343}
3344
3345
3346static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3347 struct fid *fid, int fh_len, int fh_type)
3348{
3349 struct inode *inode;
3350 struct dentry *dentry = NULL;
3351 u64 inum;
3352
3353 if (fh_len < 3)
3354 return NULL;
3355
3356 inum = fid->raw[2];
3357 inum = (inum << 32) | fid->raw[1];
3358
3359 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3360 shmem_match, fid->raw);
3361 if (inode) {
3362 dentry = shmem_find_alias(inode);
3363 iput(inode);
3364 }
3365
3366 return dentry;
3367}
3368
3369static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3370 struct inode *parent)
3371{
3372 if (*len < 3) {
3373 *len = 3;
3374 return FILEID_INVALID;
3375 }
3376
3377 if (inode_unhashed(inode)) {
3378 /* Unfortunately insert_inode_hash is not idempotent,
3379 * so as we hash inodes here rather than at creation
3380 * time, we need a lock to ensure we only try
3381 * to do it once
3382 */
3383 static DEFINE_SPINLOCK(lock);
3384 spin_lock(&lock);
3385 if (inode_unhashed(inode))
3386 __insert_inode_hash(inode,
3387 inode->i_ino + inode->i_generation);
3388 spin_unlock(&lock);
3389 }
3390
3391 fh[0] = inode->i_generation;
3392 fh[1] = inode->i_ino;
3393 fh[2] = ((__u64)inode->i_ino) >> 32;
3394
3395 *len = 3;
3396 return 1;
3397}
3398
3399static const struct export_operations shmem_export_ops = {
3400 .get_parent = shmem_get_parent,
3401 .encode_fh = shmem_encode_fh,
3402 .fh_to_dentry = shmem_fh_to_dentry,
3403};
3404
3405enum shmem_param {
3406 Opt_gid,
3407 Opt_huge,
3408 Opt_mode,
3409 Opt_mpol,
3410 Opt_nr_blocks,
3411 Opt_nr_inodes,
3412 Opt_size,
3413 Opt_uid,
3414 Opt_inode32,
3415 Opt_inode64,
3416};
3417
3418static const struct constant_table shmem_param_enums_huge[] = {
3419 {"never", SHMEM_HUGE_NEVER },
3420 {"always", SHMEM_HUGE_ALWAYS },
3421 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3422 {"advise", SHMEM_HUGE_ADVISE },
3423 {}
3424};
3425
3426const struct fs_parameter_spec shmem_fs_parameters[] = {
3427 fsparam_u32 ("gid", Opt_gid),
3428 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
3429 fsparam_u32oct("mode", Opt_mode),
3430 fsparam_string("mpol", Opt_mpol),
3431 fsparam_string("nr_blocks", Opt_nr_blocks),
3432 fsparam_string("nr_inodes", Opt_nr_inodes),
3433 fsparam_string("size", Opt_size),
3434 fsparam_u32 ("uid", Opt_uid),
3435 fsparam_flag ("inode32", Opt_inode32),
3436 fsparam_flag ("inode64", Opt_inode64),
3437 {}
3438};
3439
3440static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3441{
3442 struct shmem_options *ctx = fc->fs_private;
3443 struct fs_parse_result result;
3444 unsigned long long size;
3445 char *rest;
3446 int opt;
3447
3448 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3449 if (opt < 0)
3450 return opt;
3451
3452 switch (opt) {
3453 case Opt_size:
3454 size = memparse(param->string, &rest);
3455 if (*rest == '%') {
3456 size <<= PAGE_SHIFT;
3457 size *= totalram_pages();
3458 do_div(size, 100);
3459 rest++;
3460 }
3461 if (*rest)
3462 goto bad_value;
3463 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3464 ctx->seen |= SHMEM_SEEN_BLOCKS;
3465 break;
3466 case Opt_nr_blocks:
3467 ctx->blocks = memparse(param->string, &rest);
3468 if (*rest)
3469 goto bad_value;
3470 ctx->seen |= SHMEM_SEEN_BLOCKS;
3471 break;
3472 case Opt_nr_inodes:
3473 ctx->inodes = memparse(param->string, &rest);
3474 if (*rest)
3475 goto bad_value;
3476 ctx->seen |= SHMEM_SEEN_INODES;
3477 break;
3478 case Opt_mode:
3479 ctx->mode = result.uint_32 & 07777;
3480 break;
3481 case Opt_uid:
3482 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3483 if (!uid_valid(ctx->uid))
3484 goto bad_value;
3485 break;
3486 case Opt_gid:
3487 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3488 if (!gid_valid(ctx->gid))
3489 goto bad_value;
3490 break;
3491 case Opt_huge:
3492 ctx->huge = result.uint_32;
3493 if (ctx->huge != SHMEM_HUGE_NEVER &&
3494 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3495 has_transparent_hugepage()))
3496 goto unsupported_parameter;
3497 ctx->seen |= SHMEM_SEEN_HUGE;
3498 break;
3499 case Opt_mpol:
3500 if (IS_ENABLED(CONFIG_NUMA)) {
3501 mpol_put(ctx->mpol);
3502 ctx->mpol = NULL;
3503 if (mpol_parse_str(param->string, &ctx->mpol))
3504 goto bad_value;
3505 break;
3506 }
3507 goto unsupported_parameter;
3508 case Opt_inode32:
3509 ctx->full_inums = false;
3510 ctx->seen |= SHMEM_SEEN_INUMS;
3511 break;
3512 case Opt_inode64:
3513 if (sizeof(ino_t) < 8) {
3514 return invalfc(fc,
3515 "Cannot use inode64 with <64bit inums in kernel\n");
3516 }
3517 ctx->full_inums = true;
3518 ctx->seen |= SHMEM_SEEN_INUMS;
3519 break;
3520 }
3521 return 0;
3522
3523unsupported_parameter:
3524 return invalfc(fc, "Unsupported parameter '%s'", param->key);
3525bad_value:
3526 return invalfc(fc, "Bad value for '%s'", param->key);
3527}
3528
3529static int shmem_parse_options(struct fs_context *fc, void *data)
3530{
3531 char *options = data;
3532
3533 if (options) {
3534 int err = security_sb_eat_lsm_opts(options, &fc->security);
3535 if (err)
3536 return err;
3537 }
3538
3539 while (options != NULL) {
3540 char *this_char = options;
3541 for (;;) {
3542 /*
3543 * NUL-terminate this option: unfortunately,
3544 * mount options form a comma-separated list,
3545 * but mpol's nodelist may also contain commas.
3546 */
3547 options = strchr(options, ',');
3548 if (options == NULL)
3549 break;
3550 options++;
3551 if (!isdigit(*options)) {
3552 options[-1] = '\0';
3553 break;
3554 }
3555 }
3556 if (*this_char) {
3557 char *value = strchr(this_char,'=');
3558 size_t len = 0;
3559 int err;
3560
3561 if (value) {
3562 *value++ = '\0';
3563 len = strlen(value);
3564 }
3565 err = vfs_parse_fs_string(fc, this_char, value, len);
3566 if (err < 0)
3567 return err;
3568 }
3569 }
3570 return 0;
3571}
3572
3573/*
3574 * Reconfigure a shmem filesystem.
3575 *
3576 * Note that we disallow change from limited->unlimited blocks/inodes while any
3577 * are in use; but we must separately disallow unlimited->limited, because in
3578 * that case we have no record of how much is already in use.
3579 */
3580static int shmem_reconfigure(struct fs_context *fc)
3581{
3582 struct shmem_options *ctx = fc->fs_private;
3583 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3584 unsigned long inodes;
3585 const char *err;
3586
3587 spin_lock(&sbinfo->stat_lock);
3588 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3589 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3590 if (!sbinfo->max_blocks) {
3591 err = "Cannot retroactively limit size";
3592 goto out;
3593 }
3594 if (percpu_counter_compare(&sbinfo->used_blocks,
3595 ctx->blocks) > 0) {
3596 err = "Too small a size for current use";
3597 goto out;
3598 }
3599 }
3600 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3601 if (!sbinfo->max_inodes) {
3602 err = "Cannot retroactively limit inodes";
3603 goto out;
3604 }
3605 if (ctx->inodes < inodes) {
3606 err = "Too few inodes for current use";
3607 goto out;
3608 }
3609 }
3610
3611 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3612 sbinfo->next_ino > UINT_MAX) {
3613 err = "Current inum too high to switch to 32-bit inums";
3614 goto out;
3615 }
3616
3617 if (ctx->seen & SHMEM_SEEN_HUGE)
3618 sbinfo->huge = ctx->huge;
3619 if (ctx->seen & SHMEM_SEEN_INUMS)
3620 sbinfo->full_inums = ctx->full_inums;
3621 if (ctx->seen & SHMEM_SEEN_BLOCKS)
3622 sbinfo->max_blocks = ctx->blocks;
3623 if (ctx->seen & SHMEM_SEEN_INODES) {
3624 sbinfo->max_inodes = ctx->inodes;
3625 sbinfo->free_inodes = ctx->inodes - inodes;
3626 }
3627
3628 /*
3629 * Preserve previous mempolicy unless mpol remount option was specified.
3630 */
3631 if (ctx->mpol) {
3632 mpol_put(sbinfo->mpol);
3633 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
3634 ctx->mpol = NULL;
3635 }
3636 spin_unlock(&sbinfo->stat_lock);
3637 return 0;
3638out:
3639 spin_unlock(&sbinfo->stat_lock);
3640 return invalfc(fc, "%s", err);
3641}
3642
3643static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3644{
3645 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3646
3647 if (sbinfo->max_blocks != shmem_default_max_blocks())
3648 seq_printf(seq, ",size=%luk",
3649 sbinfo->max_blocks << (PAGE_SHIFT - 10));
3650 if (sbinfo->max_inodes != shmem_default_max_inodes())
3651 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3652 if (sbinfo->mode != (0777 | S_ISVTX))
3653 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3654 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3655 seq_printf(seq, ",uid=%u",
3656 from_kuid_munged(&init_user_ns, sbinfo->uid));
3657 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3658 seq_printf(seq, ",gid=%u",
3659 from_kgid_munged(&init_user_ns, sbinfo->gid));
3660
3661 /*
3662 * Showing inode{64,32} might be useful even if it's the system default,
3663 * since then people don't have to resort to checking both here and
3664 * /proc/config.gz to confirm 64-bit inums were successfully applied
3665 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3666 *
3667 * We hide it when inode64 isn't the default and we are using 32-bit
3668 * inodes, since that probably just means the feature isn't even under
3669 * consideration.
3670 *
3671 * As such:
3672 *
3673 * +-----------------+-----------------+
3674 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
3675 * +------------------+-----------------+-----------------+
3676 * | full_inums=true | show | show |
3677 * | full_inums=false | show | hide |
3678 * +------------------+-----------------+-----------------+
3679 *
3680 */
3681 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3682 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3683#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3684 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3685 if (sbinfo->huge)
3686 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3687#endif
3688 shmem_show_mpol(seq, sbinfo->mpol);
3689 return 0;
3690}
3691
3692#endif /* CONFIG_TMPFS */
3693
3694static void shmem_put_super(struct super_block *sb)
3695{
3696 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3697
3698 free_percpu(sbinfo->ino_batch);
3699 percpu_counter_destroy(&sbinfo->used_blocks);
3700 mpol_put(sbinfo->mpol);
3701 kfree(sbinfo);
3702 sb->s_fs_info = NULL;
3703}
3704
3705static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3706{
3707 struct shmem_options *ctx = fc->fs_private;
3708 struct inode *inode;
3709 struct shmem_sb_info *sbinfo;
3710 int err = -ENOMEM;
3711
3712 /* Round up to L1_CACHE_BYTES to resist false sharing */
3713 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3714 L1_CACHE_BYTES), GFP_KERNEL);
3715 if (!sbinfo)
3716 return -ENOMEM;
3717
3718 sb->s_fs_info = sbinfo;
3719
3720#ifdef CONFIG_TMPFS
3721 /*
3722 * Per default we only allow half of the physical ram per
3723 * tmpfs instance, limiting inodes to one per page of lowmem;
3724 * but the internal instance is left unlimited.
3725 */
3726 if (!(sb->s_flags & SB_KERNMOUNT)) {
3727 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3728 ctx->blocks = shmem_default_max_blocks();
3729 if (!(ctx->seen & SHMEM_SEEN_INODES))
3730 ctx->inodes = shmem_default_max_inodes();
3731 if (!(ctx->seen & SHMEM_SEEN_INUMS))
3732 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3733 } else {
3734 sb->s_flags |= SB_NOUSER;
3735 }
3736 sb->s_export_op = &shmem_export_ops;
3737 sb->s_flags |= SB_NOSEC;
3738#else
3739 sb->s_flags |= SB_NOUSER;
3740#endif
3741 sbinfo->max_blocks = ctx->blocks;
3742 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3743 if (sb->s_flags & SB_KERNMOUNT) {
3744 sbinfo->ino_batch = alloc_percpu(ino_t);
3745 if (!sbinfo->ino_batch)
3746 goto failed;
3747 }
3748 sbinfo->uid = ctx->uid;
3749 sbinfo->gid = ctx->gid;
3750 sbinfo->full_inums = ctx->full_inums;
3751 sbinfo->mode = ctx->mode;
3752 sbinfo->huge = ctx->huge;
3753 sbinfo->mpol = ctx->mpol;
3754 ctx->mpol = NULL;
3755
3756 spin_lock_init(&sbinfo->stat_lock);
3757 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3758 goto failed;
3759 spin_lock_init(&sbinfo->shrinklist_lock);
3760 INIT_LIST_HEAD(&sbinfo->shrinklist);
3761
3762 sb->s_maxbytes = MAX_LFS_FILESIZE;
3763 sb->s_blocksize = PAGE_SIZE;
3764 sb->s_blocksize_bits = PAGE_SHIFT;
3765 sb->s_magic = TMPFS_MAGIC;
3766 sb->s_op = &shmem_ops;
3767 sb->s_time_gran = 1;
3768#ifdef CONFIG_TMPFS_XATTR
3769 sb->s_xattr = shmem_xattr_handlers;
3770#endif
3771#ifdef CONFIG_TMPFS_POSIX_ACL
3772 sb->s_flags |= SB_POSIXACL;
3773#endif
3774 uuid_gen(&sb->s_uuid);
3775
3776 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3777 if (!inode)
3778 goto failed;
3779 inode->i_uid = sbinfo->uid;
3780 inode->i_gid = sbinfo->gid;
3781 sb->s_root = d_make_root(inode);
3782 if (!sb->s_root)
3783 goto failed;
3784 return 0;
3785
3786failed:
3787 shmem_put_super(sb);
3788 return err;
3789}
3790
3791static int shmem_get_tree(struct fs_context *fc)
3792{
3793 return get_tree_nodev(fc, shmem_fill_super);
3794}
3795
3796static void shmem_free_fc(struct fs_context *fc)
3797{
3798 struct shmem_options *ctx = fc->fs_private;
3799
3800 if (ctx) {
3801 mpol_put(ctx->mpol);
3802 kfree(ctx);
3803 }
3804}
3805
3806static const struct fs_context_operations shmem_fs_context_ops = {
3807 .free = shmem_free_fc,
3808 .get_tree = shmem_get_tree,
3809#ifdef CONFIG_TMPFS
3810 .parse_monolithic = shmem_parse_options,
3811 .parse_param = shmem_parse_one,
3812 .reconfigure = shmem_reconfigure,
3813#endif
3814};
3815
3816static struct kmem_cache *shmem_inode_cachep;
3817
3818static struct inode *shmem_alloc_inode(struct super_block *sb)
3819{
3820 struct shmem_inode_info *info;
3821 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3822 if (!info)
3823 return NULL;
3824 return &info->vfs_inode;
3825}
3826
3827static void shmem_free_in_core_inode(struct inode *inode)
3828{
3829 if (S_ISLNK(inode->i_mode))
3830 kfree(inode->i_link);
3831 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3832}
3833
3834static void shmem_destroy_inode(struct inode *inode)
3835{
3836 if (S_ISREG(inode->i_mode))
3837 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3838}
3839
3840static void shmem_init_inode(void *foo)
3841{
3842 struct shmem_inode_info *info = foo;
3843 inode_init_once(&info->vfs_inode);
3844}
3845
3846static void shmem_init_inodecache(void)
3847{
3848 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3849 sizeof(struct shmem_inode_info),
3850 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3851}
3852
3853static void shmem_destroy_inodecache(void)
3854{
3855 kmem_cache_destroy(shmem_inode_cachep);
3856}
3857
3858static const struct address_space_operations shmem_aops = {
3859 .writepage = shmem_writepage,
3860 .set_page_dirty = __set_page_dirty_no_writeback,
3861#ifdef CONFIG_TMPFS
3862 .write_begin = shmem_write_begin,
3863 .write_end = shmem_write_end,
3864#endif
3865#ifdef CONFIG_MIGRATION
3866 .migratepage = migrate_page,
3867#endif
3868 .error_remove_page = generic_error_remove_page,
3869};
3870
3871static const struct file_operations shmem_file_operations = {
3872 .mmap = shmem_mmap,
3873 .get_unmapped_area = shmem_get_unmapped_area,
3874#ifdef CONFIG_TMPFS
3875 .llseek = shmem_file_llseek,
3876 .read_iter = shmem_file_read_iter,
3877 .write_iter = generic_file_write_iter,
3878 .fsync = noop_fsync,
3879 .splice_read = generic_file_splice_read,
3880 .splice_write = iter_file_splice_write,
3881 .fallocate = shmem_fallocate,
3882#endif
3883};
3884
3885static const struct inode_operations shmem_inode_operations = {
3886 .getattr = shmem_getattr,
3887 .setattr = shmem_setattr,
3888#ifdef CONFIG_TMPFS_XATTR
3889 .listxattr = shmem_listxattr,
3890 .set_acl = simple_set_acl,
3891#endif
3892};
3893
3894static const struct inode_operations shmem_dir_inode_operations = {
3895#ifdef CONFIG_TMPFS
3896 .create = shmem_create,
3897 .lookup = simple_lookup,
3898 .link = shmem_link,
3899 .unlink = shmem_unlink,
3900 .symlink = shmem_symlink,
3901 .mkdir = shmem_mkdir,
3902 .rmdir = shmem_rmdir,
3903 .mknod = shmem_mknod,
3904 .rename = shmem_rename2,
3905 .tmpfile = shmem_tmpfile,
3906#endif
3907#ifdef CONFIG_TMPFS_XATTR
3908 .listxattr = shmem_listxattr,
3909#endif
3910#ifdef CONFIG_TMPFS_POSIX_ACL
3911 .setattr = shmem_setattr,
3912 .set_acl = simple_set_acl,
3913#endif
3914};
3915
3916static const struct inode_operations shmem_special_inode_operations = {
3917#ifdef CONFIG_TMPFS_XATTR
3918 .listxattr = shmem_listxattr,
3919#endif
3920#ifdef CONFIG_TMPFS_POSIX_ACL
3921 .setattr = shmem_setattr,
3922 .set_acl = simple_set_acl,
3923#endif
3924};
3925
3926static const struct super_operations shmem_ops = {
3927 .alloc_inode = shmem_alloc_inode,
3928 .free_inode = shmem_free_in_core_inode,
3929 .destroy_inode = shmem_destroy_inode,
3930#ifdef CONFIG_TMPFS
3931 .statfs = shmem_statfs,
3932 .show_options = shmem_show_options,
3933#endif
3934 .evict_inode = shmem_evict_inode,
3935 .drop_inode = generic_delete_inode,
3936 .put_super = shmem_put_super,
3937#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3938 .nr_cached_objects = shmem_unused_huge_count,
3939 .free_cached_objects = shmem_unused_huge_scan,
3940#endif
3941};
3942
3943static const struct vm_operations_struct shmem_vm_ops = {
3944 .fault = shmem_fault,
3945 .map_pages = filemap_map_pages,
3946#ifdef CONFIG_NUMA
3947 .set_policy = shmem_set_policy,
3948 .get_policy = shmem_get_policy,
3949#endif
3950};
3951
3952int shmem_init_fs_context(struct fs_context *fc)
3953{
3954 struct shmem_options *ctx;
3955
3956 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3957 if (!ctx)
3958 return -ENOMEM;
3959
3960 ctx->mode = 0777 | S_ISVTX;
3961 ctx->uid = current_fsuid();
3962 ctx->gid = current_fsgid();
3963
3964 fc->fs_private = ctx;
3965 fc->ops = &shmem_fs_context_ops;
3966 return 0;
3967}
3968
3969static struct file_system_type shmem_fs_type = {
3970 .owner = THIS_MODULE,
3971 .name = "tmpfs",
3972 .init_fs_context = shmem_init_fs_context,
3973#ifdef CONFIG_TMPFS
3974 .parameters = shmem_fs_parameters,
3975#endif
3976 .kill_sb = kill_litter_super,
3977 .fs_flags = FS_USERNS_MOUNT,
3978};
3979
3980int __init shmem_init(void)
3981{
3982 int error;
3983
3984 shmem_init_inodecache();
3985
3986 error = register_filesystem(&shmem_fs_type);
3987 if (error) {
3988 pr_err("Could not register tmpfs\n");
3989 goto out2;
3990 }
3991
3992 shm_mnt = kern_mount(&shmem_fs_type);
3993 if (IS_ERR(shm_mnt)) {
3994 error = PTR_ERR(shm_mnt);
3995 pr_err("Could not kern_mount tmpfs\n");
3996 goto out1;
3997 }
3998
3999#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4000 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
4001 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4002 else
4003 shmem_huge = 0; /* just in case it was patched */
4004#endif
4005 return 0;
4006
4007out1:
4008 unregister_filesystem(&shmem_fs_type);
4009out2:
4010 shmem_destroy_inodecache();
4011 shm_mnt = ERR_PTR(error);
4012 return error;
4013}
4014
4015#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
4016static ssize_t shmem_enabled_show(struct kobject *kobj,
4017 struct kobj_attribute *attr, char *buf)
4018{
4019 static const int values[] = {
4020 SHMEM_HUGE_ALWAYS,
4021 SHMEM_HUGE_WITHIN_SIZE,
4022 SHMEM_HUGE_ADVISE,
4023 SHMEM_HUGE_NEVER,
4024 SHMEM_HUGE_DENY,
4025 SHMEM_HUGE_FORCE,
4026 };
4027 int i, count;
4028
4029 for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
4030 const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
4031
4032 count += sprintf(buf + count, fmt,
4033 shmem_format_huge(values[i]));
4034 }
4035 buf[count - 1] = '\n';
4036 return count;
4037}
4038
4039static ssize_t shmem_enabled_store(struct kobject *kobj,
4040 struct kobj_attribute *attr, const char *buf, size_t count)
4041{
4042 char tmp[16];
4043 int huge;
4044
4045 if (count + 1 > sizeof(tmp))
4046 return -EINVAL;
4047 memcpy(tmp, buf, count);
4048 tmp[count] = '\0';
4049 if (count && tmp[count - 1] == '\n')
4050 tmp[count - 1] = '\0';
4051
4052 huge = shmem_parse_huge(tmp);
4053 if (huge == -EINVAL)
4054 return -EINVAL;
4055 if (!has_transparent_hugepage() &&
4056 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4057 return -EINVAL;
4058
4059 shmem_huge = huge;
4060 if (shmem_huge > SHMEM_HUGE_DENY)
4061 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4062 return count;
4063}
4064
4065struct kobj_attribute shmem_enabled_attr =
4066 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
4067#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4068
4069#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4070bool shmem_huge_enabled(struct vm_area_struct *vma)
4071{
4072 struct inode *inode = file_inode(vma->vm_file);
4073 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4074 loff_t i_size;
4075 pgoff_t off;
4076
4077 if ((vma->vm_flags & VM_NOHUGEPAGE) ||
4078 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
4079 return false;
4080 if (shmem_huge == SHMEM_HUGE_FORCE)
4081 return true;
4082 if (shmem_huge == SHMEM_HUGE_DENY)
4083 return false;
4084 switch (sbinfo->huge) {
4085 case SHMEM_HUGE_NEVER:
4086 return false;
4087 case SHMEM_HUGE_ALWAYS:
4088 return true;
4089 case SHMEM_HUGE_WITHIN_SIZE:
4090 off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4091 i_size = round_up(i_size_read(inode), PAGE_SIZE);
4092 if (i_size >= HPAGE_PMD_SIZE &&
4093 i_size >> PAGE_SHIFT >= off)
4094 return true;
4095 fallthrough;
4096 case SHMEM_HUGE_ADVISE:
4097 /* TODO: implement fadvise() hints */
4098 return (vma->vm_flags & VM_HUGEPAGE);
4099 default:
4100 VM_BUG_ON(1);
4101 return false;
4102 }
4103}
4104#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4105
4106#else /* !CONFIG_SHMEM */
4107
4108/*
4109 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4110 *
4111 * This is intended for small system where the benefits of the full
4112 * shmem code (swap-backed and resource-limited) are outweighed by
4113 * their complexity. On systems without swap this code should be
4114 * effectively equivalent, but much lighter weight.
4115 */
4116
4117static struct file_system_type shmem_fs_type = {
4118 .name = "tmpfs",
4119 .init_fs_context = ramfs_init_fs_context,
4120 .parameters = ramfs_fs_parameters,
4121 .kill_sb = kill_litter_super,
4122 .fs_flags = FS_USERNS_MOUNT,
4123};
4124
4125int __init shmem_init(void)
4126{
4127 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4128
4129 shm_mnt = kern_mount(&shmem_fs_type);
4130 BUG_ON(IS_ERR(shm_mnt));
4131
4132 return 0;
4133}
4134
4135int shmem_unuse(unsigned int type, bool frontswap,
4136 unsigned long *fs_pages_to_unuse)
4137{
4138 return 0;
4139}
4140
4141int shmem_lock(struct file *file, int lock, struct user_struct *user)
4142{
4143 return 0;
4144}
4145
4146void shmem_unlock_mapping(struct address_space *mapping)
4147{
4148}
4149
4150#ifdef CONFIG_MMU
4151unsigned long shmem_get_unmapped_area(struct file *file,
4152 unsigned long addr, unsigned long len,
4153 unsigned long pgoff, unsigned long flags)
4154{
4155 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4156}
4157#endif
4158
4159void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4160{
4161 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4162}
4163EXPORT_SYMBOL_GPL(shmem_truncate_range);
4164
4165#define shmem_vm_ops generic_file_vm_ops
4166#define shmem_file_operations ramfs_file_operations
4167#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
4168#define shmem_acct_size(flags, size) 0
4169#define shmem_unacct_size(flags, size) do {} while (0)
4170
4171#endif /* CONFIG_SHMEM */
4172
4173/* common code */
4174
4175static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4176 unsigned long flags, unsigned int i_flags)
4177{
4178 struct inode *inode;
4179 struct file *res;
4180
4181 if (IS_ERR(mnt))
4182 return ERR_CAST(mnt);
4183
4184 if (size < 0 || size > MAX_LFS_FILESIZE)
4185 return ERR_PTR(-EINVAL);
4186
4187 if (shmem_acct_size(flags, size))
4188 return ERR_PTR(-ENOMEM);
4189
4190 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4191 flags);
4192 if (unlikely(!inode)) {
4193 shmem_unacct_size(flags, size);
4194 return ERR_PTR(-ENOSPC);
4195 }
4196 inode->i_flags |= i_flags;
4197 inode->i_size = size;
4198 clear_nlink(inode); /* It is unlinked */
4199 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4200 if (!IS_ERR(res))
4201 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4202 &shmem_file_operations);
4203 if (IS_ERR(res))
4204 iput(inode);
4205 return res;
4206}
4207
4208/**
4209 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4210 * kernel internal. There will be NO LSM permission checks against the
4211 * underlying inode. So users of this interface must do LSM checks at a
4212 * higher layer. The users are the big_key and shm implementations. LSM
4213 * checks are provided at the key or shm level rather than the inode.
4214 * @name: name for dentry (to be seen in /proc/<pid>/maps
4215 * @size: size to be set for the file
4216 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4217 */
4218struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4219{
4220 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4221}
4222
4223/**
4224 * shmem_file_setup - get an unlinked file living in tmpfs
4225 * @name: name for dentry (to be seen in /proc/<pid>/maps
4226 * @size: size to be set for the file
4227 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4228 */
4229struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4230{
4231 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4232}
4233EXPORT_SYMBOL_GPL(shmem_file_setup);
4234
4235/**
4236 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4237 * @mnt: the tmpfs mount where the file will be created
4238 * @name: name for dentry (to be seen in /proc/<pid>/maps
4239 * @size: size to be set for the file
4240 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4241 */
4242struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4243 loff_t size, unsigned long flags)
4244{
4245 return __shmem_file_setup(mnt, name, size, flags, 0);
4246}
4247EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4248
4249/**
4250 * shmem_zero_setup - setup a shared anonymous mapping
4251 * @vma: the vma to be mmapped is prepared by do_mmap
4252 */
4253int shmem_zero_setup(struct vm_area_struct *vma)
4254{
4255 struct file *file;
4256 loff_t size = vma->vm_end - vma->vm_start;
4257
4258 /*
4259 * Cloning a new file under mmap_lock leads to a lock ordering conflict
4260 * between XFS directory reading and selinux: since this file is only
4261 * accessible to the user through its mapping, use S_PRIVATE flag to
4262 * bypass file security, in the same way as shmem_kernel_file_setup().
4263 */
4264 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4265 if (IS_ERR(file))
4266 return PTR_ERR(file);
4267
4268 if (vma->vm_file)
4269 fput(vma->vm_file);
4270 vma->vm_file = file;
4271 vma->vm_ops = &shmem_vm_ops;
4272
4273 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4274 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4275 (vma->vm_end & HPAGE_PMD_MASK)) {
4276 khugepaged_enter(vma, vma->vm_flags);
4277 }
4278
4279 return 0;
4280}
4281
4282/**
4283 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4284 * @mapping: the page's address_space
4285 * @index: the page index
4286 * @gfp: the page allocator flags to use if allocating
4287 *
4288 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4289 * with any new page allocations done using the specified allocation flags.
4290 * But read_cache_page_gfp() uses the ->readpage() method: which does not
4291 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4292 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4293 *
4294 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4295 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4296 */
4297struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4298 pgoff_t index, gfp_t gfp)
4299{
4300#ifdef CONFIG_SHMEM
4301 struct inode *inode = mapping->host;
4302 struct page *page;
4303 int error;
4304
4305 BUG_ON(mapping->a_ops != &shmem_aops);
4306 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4307 gfp, NULL, NULL, NULL);
4308 if (error)
4309 page = ERR_PTR(error);
4310 else
4311 unlock_page(page);
4312 return page;
4313#else
4314 /*
4315 * The tiny !SHMEM case uses ramfs without swap
4316 */
4317 return read_cache_page_gfp(mapping, index, gfp);
4318#endif
4319}
4320EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
28#include <linux/ramfs.h>
29#include <linux/pagemap.h>
30#include <linux/file.h>
31#include <linux/mm.h>
32#include <linux/export.h>
33#include <linux/swap.h>
34#include <linux/uio.h>
35
36static struct vfsmount *shm_mnt;
37
38#ifdef CONFIG_SHMEM
39/*
40 * This virtual memory filesystem is heavily based on the ramfs. It
41 * extends ramfs by the ability to use swap and honor resource limits
42 * which makes it a completely usable filesystem.
43 */
44
45#include <linux/xattr.h>
46#include <linux/exportfs.h>
47#include <linux/posix_acl.h>
48#include <linux/posix_acl_xattr.h>
49#include <linux/mman.h>
50#include <linux/string.h>
51#include <linux/slab.h>
52#include <linux/backing-dev.h>
53#include <linux/shmem_fs.h>
54#include <linux/writeback.h>
55#include <linux/blkdev.h>
56#include <linux/pagevec.h>
57#include <linux/percpu_counter.h>
58#include <linux/falloc.h>
59#include <linux/splice.h>
60#include <linux/security.h>
61#include <linux/swapops.h>
62#include <linux/mempolicy.h>
63#include <linux/namei.h>
64#include <linux/ctype.h>
65#include <linux/migrate.h>
66#include <linux/highmem.h>
67#include <linux/seq_file.h>
68#include <linux/magic.h>
69#include <linux/syscalls.h>
70#include <linux/fcntl.h>
71#include <uapi/linux/memfd.h>
72
73#include <asm/uaccess.h>
74#include <asm/pgtable.h>
75
76#include "internal.h"
77
78#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
79#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
80
81/* Pretend that each entry is of this size in directory's i_size */
82#define BOGO_DIRENT_SIZE 20
83
84/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
85#define SHORT_SYMLINK_LEN 128
86
87/*
88 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
89 * inode->i_private (with i_mutex making sure that it has only one user at
90 * a time): we would prefer not to enlarge the shmem inode just for that.
91 */
92struct shmem_falloc {
93 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
94 pgoff_t start; /* start of range currently being fallocated */
95 pgoff_t next; /* the next page offset to be fallocated */
96 pgoff_t nr_falloced; /* how many new pages have been fallocated */
97 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
98};
99
100/* Flag allocation requirements to shmem_getpage */
101enum sgp_type {
102 SGP_READ, /* don't exceed i_size, don't allocate page */
103 SGP_CACHE, /* don't exceed i_size, may allocate page */
104 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
105 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
106 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
107};
108
109#ifdef CONFIG_TMPFS
110static unsigned long shmem_default_max_blocks(void)
111{
112 return totalram_pages / 2;
113}
114
115static unsigned long shmem_default_max_inodes(void)
116{
117 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
118}
119#endif
120
121static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
122static int shmem_replace_page(struct page **pagep, gfp_t gfp,
123 struct shmem_inode_info *info, pgoff_t index);
124static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
125 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
126
127static inline int shmem_getpage(struct inode *inode, pgoff_t index,
128 struct page **pagep, enum sgp_type sgp, int *fault_type)
129{
130 return shmem_getpage_gfp(inode, index, pagep, sgp,
131 mapping_gfp_mask(inode->i_mapping), fault_type);
132}
133
134static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
135{
136 return sb->s_fs_info;
137}
138
139/*
140 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
141 * for shared memory and for shared anonymous (/dev/zero) mappings
142 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
143 * consistent with the pre-accounting of private mappings ...
144 */
145static inline int shmem_acct_size(unsigned long flags, loff_t size)
146{
147 return (flags & VM_NORESERVE) ?
148 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
149}
150
151static inline void shmem_unacct_size(unsigned long flags, loff_t size)
152{
153 if (!(flags & VM_NORESERVE))
154 vm_unacct_memory(VM_ACCT(size));
155}
156
157static inline int shmem_reacct_size(unsigned long flags,
158 loff_t oldsize, loff_t newsize)
159{
160 if (!(flags & VM_NORESERVE)) {
161 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
162 return security_vm_enough_memory_mm(current->mm,
163 VM_ACCT(newsize) - VM_ACCT(oldsize));
164 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
165 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
166 }
167 return 0;
168}
169
170/*
171 * ... whereas tmpfs objects are accounted incrementally as
172 * pages are allocated, in order to allow huge sparse files.
173 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
174 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
175 */
176static inline int shmem_acct_block(unsigned long flags)
177{
178 return (flags & VM_NORESERVE) ?
179 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_SIZE)) : 0;
180}
181
182static inline void shmem_unacct_blocks(unsigned long flags, long pages)
183{
184 if (flags & VM_NORESERVE)
185 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
186}
187
188static const struct super_operations shmem_ops;
189static const struct address_space_operations shmem_aops;
190static const struct file_operations shmem_file_operations;
191static const struct inode_operations shmem_inode_operations;
192static const struct inode_operations shmem_dir_inode_operations;
193static const struct inode_operations shmem_special_inode_operations;
194static const struct vm_operations_struct shmem_vm_ops;
195
196static LIST_HEAD(shmem_swaplist);
197static DEFINE_MUTEX(shmem_swaplist_mutex);
198
199static int shmem_reserve_inode(struct super_block *sb)
200{
201 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
202 if (sbinfo->max_inodes) {
203 spin_lock(&sbinfo->stat_lock);
204 if (!sbinfo->free_inodes) {
205 spin_unlock(&sbinfo->stat_lock);
206 return -ENOSPC;
207 }
208 sbinfo->free_inodes--;
209 spin_unlock(&sbinfo->stat_lock);
210 }
211 return 0;
212}
213
214static void shmem_free_inode(struct super_block *sb)
215{
216 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
217 if (sbinfo->max_inodes) {
218 spin_lock(&sbinfo->stat_lock);
219 sbinfo->free_inodes++;
220 spin_unlock(&sbinfo->stat_lock);
221 }
222}
223
224/**
225 * shmem_recalc_inode - recalculate the block usage of an inode
226 * @inode: inode to recalc
227 *
228 * We have to calculate the free blocks since the mm can drop
229 * undirtied hole pages behind our back.
230 *
231 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
232 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
233 *
234 * It has to be called with the spinlock held.
235 */
236static void shmem_recalc_inode(struct inode *inode)
237{
238 struct shmem_inode_info *info = SHMEM_I(inode);
239 long freed;
240
241 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
242 if (freed > 0) {
243 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
244 if (sbinfo->max_blocks)
245 percpu_counter_add(&sbinfo->used_blocks, -freed);
246 info->alloced -= freed;
247 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
248 shmem_unacct_blocks(info->flags, freed);
249 }
250}
251
252/*
253 * Replace item expected in radix tree by a new item, while holding tree lock.
254 */
255static int shmem_radix_tree_replace(struct address_space *mapping,
256 pgoff_t index, void *expected, void *replacement)
257{
258 void **pslot;
259 void *item;
260
261 VM_BUG_ON(!expected);
262 VM_BUG_ON(!replacement);
263 pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
264 if (!pslot)
265 return -ENOENT;
266 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
267 if (item != expected)
268 return -ENOENT;
269 radix_tree_replace_slot(pslot, replacement);
270 return 0;
271}
272
273/*
274 * Sometimes, before we decide whether to proceed or to fail, we must check
275 * that an entry was not already brought back from swap by a racing thread.
276 *
277 * Checking page is not enough: by the time a SwapCache page is locked, it
278 * might be reused, and again be SwapCache, using the same swap as before.
279 */
280static bool shmem_confirm_swap(struct address_space *mapping,
281 pgoff_t index, swp_entry_t swap)
282{
283 void *item;
284
285 rcu_read_lock();
286 item = radix_tree_lookup(&mapping->page_tree, index);
287 rcu_read_unlock();
288 return item == swp_to_radix_entry(swap);
289}
290
291/*
292 * Like add_to_page_cache_locked, but error if expected item has gone.
293 */
294static int shmem_add_to_page_cache(struct page *page,
295 struct address_space *mapping,
296 pgoff_t index, void *expected)
297{
298 int error;
299
300 VM_BUG_ON_PAGE(!PageLocked(page), page);
301 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
302
303 get_page(page);
304 page->mapping = mapping;
305 page->index = index;
306
307 spin_lock_irq(&mapping->tree_lock);
308 if (!expected)
309 error = radix_tree_insert(&mapping->page_tree, index, page);
310 else
311 error = shmem_radix_tree_replace(mapping, index, expected,
312 page);
313 if (!error) {
314 mapping->nrpages++;
315 __inc_zone_page_state(page, NR_FILE_PAGES);
316 __inc_zone_page_state(page, NR_SHMEM);
317 spin_unlock_irq(&mapping->tree_lock);
318 } else {
319 page->mapping = NULL;
320 spin_unlock_irq(&mapping->tree_lock);
321 put_page(page);
322 }
323 return error;
324}
325
326/*
327 * Like delete_from_page_cache, but substitutes swap for page.
328 */
329static void shmem_delete_from_page_cache(struct page *page, void *radswap)
330{
331 struct address_space *mapping = page->mapping;
332 int error;
333
334 spin_lock_irq(&mapping->tree_lock);
335 error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
336 page->mapping = NULL;
337 mapping->nrpages--;
338 __dec_zone_page_state(page, NR_FILE_PAGES);
339 __dec_zone_page_state(page, NR_SHMEM);
340 spin_unlock_irq(&mapping->tree_lock);
341 put_page(page);
342 BUG_ON(error);
343}
344
345/*
346 * Remove swap entry from radix tree, free the swap and its page cache.
347 */
348static int shmem_free_swap(struct address_space *mapping,
349 pgoff_t index, void *radswap)
350{
351 void *old;
352
353 spin_lock_irq(&mapping->tree_lock);
354 old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
355 spin_unlock_irq(&mapping->tree_lock);
356 if (old != radswap)
357 return -ENOENT;
358 free_swap_and_cache(radix_to_swp_entry(radswap));
359 return 0;
360}
361
362/*
363 * Determine (in bytes) how many of the shmem object's pages mapped by the
364 * given offsets are swapped out.
365 *
366 * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
367 * as long as the inode doesn't go away and racy results are not a problem.
368 */
369unsigned long shmem_partial_swap_usage(struct address_space *mapping,
370 pgoff_t start, pgoff_t end)
371{
372 struct radix_tree_iter iter;
373 void **slot;
374 struct page *page;
375 unsigned long swapped = 0;
376
377 rcu_read_lock();
378
379 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
380 if (iter.index >= end)
381 break;
382
383 page = radix_tree_deref_slot(slot);
384
385 if (radix_tree_deref_retry(page)) {
386 slot = radix_tree_iter_retry(&iter);
387 continue;
388 }
389
390 if (radix_tree_exceptional_entry(page))
391 swapped++;
392
393 if (need_resched()) {
394 cond_resched_rcu();
395 slot = radix_tree_iter_next(&iter);
396 }
397 }
398
399 rcu_read_unlock();
400
401 return swapped << PAGE_SHIFT;
402}
403
404/*
405 * Determine (in bytes) how many of the shmem object's pages mapped by the
406 * given vma is swapped out.
407 *
408 * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
409 * as long as the inode doesn't go away and racy results are not a problem.
410 */
411unsigned long shmem_swap_usage(struct vm_area_struct *vma)
412{
413 struct inode *inode = file_inode(vma->vm_file);
414 struct shmem_inode_info *info = SHMEM_I(inode);
415 struct address_space *mapping = inode->i_mapping;
416 unsigned long swapped;
417
418 /* Be careful as we don't hold info->lock */
419 swapped = READ_ONCE(info->swapped);
420
421 /*
422 * The easier cases are when the shmem object has nothing in swap, or
423 * the vma maps it whole. Then we can simply use the stats that we
424 * already track.
425 */
426 if (!swapped)
427 return 0;
428
429 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
430 return swapped << PAGE_SHIFT;
431
432 /* Here comes the more involved part */
433 return shmem_partial_swap_usage(mapping,
434 linear_page_index(vma, vma->vm_start),
435 linear_page_index(vma, vma->vm_end));
436}
437
438/*
439 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
440 */
441void shmem_unlock_mapping(struct address_space *mapping)
442{
443 struct pagevec pvec;
444 pgoff_t indices[PAGEVEC_SIZE];
445 pgoff_t index = 0;
446
447 pagevec_init(&pvec, 0);
448 /*
449 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
450 */
451 while (!mapping_unevictable(mapping)) {
452 /*
453 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
454 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
455 */
456 pvec.nr = find_get_entries(mapping, index,
457 PAGEVEC_SIZE, pvec.pages, indices);
458 if (!pvec.nr)
459 break;
460 index = indices[pvec.nr - 1] + 1;
461 pagevec_remove_exceptionals(&pvec);
462 check_move_unevictable_pages(pvec.pages, pvec.nr);
463 pagevec_release(&pvec);
464 cond_resched();
465 }
466}
467
468/*
469 * Remove range of pages and swap entries from radix tree, and free them.
470 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
471 */
472static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
473 bool unfalloc)
474{
475 struct address_space *mapping = inode->i_mapping;
476 struct shmem_inode_info *info = SHMEM_I(inode);
477 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
478 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
479 unsigned int partial_start = lstart & (PAGE_SIZE - 1);
480 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
481 struct pagevec pvec;
482 pgoff_t indices[PAGEVEC_SIZE];
483 long nr_swaps_freed = 0;
484 pgoff_t index;
485 int i;
486
487 if (lend == -1)
488 end = -1; /* unsigned, so actually very big */
489
490 pagevec_init(&pvec, 0);
491 index = start;
492 while (index < end) {
493 pvec.nr = find_get_entries(mapping, index,
494 min(end - index, (pgoff_t)PAGEVEC_SIZE),
495 pvec.pages, indices);
496 if (!pvec.nr)
497 break;
498 for (i = 0; i < pagevec_count(&pvec); i++) {
499 struct page *page = pvec.pages[i];
500
501 index = indices[i];
502 if (index >= end)
503 break;
504
505 if (radix_tree_exceptional_entry(page)) {
506 if (unfalloc)
507 continue;
508 nr_swaps_freed += !shmem_free_swap(mapping,
509 index, page);
510 continue;
511 }
512
513 if (!trylock_page(page))
514 continue;
515 if (!unfalloc || !PageUptodate(page)) {
516 if (page->mapping == mapping) {
517 VM_BUG_ON_PAGE(PageWriteback(page), page);
518 truncate_inode_page(mapping, page);
519 }
520 }
521 unlock_page(page);
522 }
523 pagevec_remove_exceptionals(&pvec);
524 pagevec_release(&pvec);
525 cond_resched();
526 index++;
527 }
528
529 if (partial_start) {
530 struct page *page = NULL;
531 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
532 if (page) {
533 unsigned int top = PAGE_SIZE;
534 if (start > end) {
535 top = partial_end;
536 partial_end = 0;
537 }
538 zero_user_segment(page, partial_start, top);
539 set_page_dirty(page);
540 unlock_page(page);
541 put_page(page);
542 }
543 }
544 if (partial_end) {
545 struct page *page = NULL;
546 shmem_getpage(inode, end, &page, SGP_READ, NULL);
547 if (page) {
548 zero_user_segment(page, 0, partial_end);
549 set_page_dirty(page);
550 unlock_page(page);
551 put_page(page);
552 }
553 }
554 if (start >= end)
555 return;
556
557 index = start;
558 while (index < end) {
559 cond_resched();
560
561 pvec.nr = find_get_entries(mapping, index,
562 min(end - index, (pgoff_t)PAGEVEC_SIZE),
563 pvec.pages, indices);
564 if (!pvec.nr) {
565 /* If all gone or hole-punch or unfalloc, we're done */
566 if (index == start || end != -1)
567 break;
568 /* But if truncating, restart to make sure all gone */
569 index = start;
570 continue;
571 }
572 for (i = 0; i < pagevec_count(&pvec); i++) {
573 struct page *page = pvec.pages[i];
574
575 index = indices[i];
576 if (index >= end)
577 break;
578
579 if (radix_tree_exceptional_entry(page)) {
580 if (unfalloc)
581 continue;
582 if (shmem_free_swap(mapping, index, page)) {
583 /* Swap was replaced by page: retry */
584 index--;
585 break;
586 }
587 nr_swaps_freed++;
588 continue;
589 }
590
591 lock_page(page);
592 if (!unfalloc || !PageUptodate(page)) {
593 if (page->mapping == mapping) {
594 VM_BUG_ON_PAGE(PageWriteback(page), page);
595 truncate_inode_page(mapping, page);
596 } else {
597 /* Page was replaced by swap: retry */
598 unlock_page(page);
599 index--;
600 break;
601 }
602 }
603 unlock_page(page);
604 }
605 pagevec_remove_exceptionals(&pvec);
606 pagevec_release(&pvec);
607 index++;
608 }
609
610 spin_lock(&info->lock);
611 info->swapped -= nr_swaps_freed;
612 shmem_recalc_inode(inode);
613 spin_unlock(&info->lock);
614}
615
616void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
617{
618 shmem_undo_range(inode, lstart, lend, false);
619 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
620}
621EXPORT_SYMBOL_GPL(shmem_truncate_range);
622
623static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry,
624 struct kstat *stat)
625{
626 struct inode *inode = dentry->d_inode;
627 struct shmem_inode_info *info = SHMEM_I(inode);
628
629 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
630 spin_lock(&info->lock);
631 shmem_recalc_inode(inode);
632 spin_unlock(&info->lock);
633 }
634 generic_fillattr(inode, stat);
635 return 0;
636}
637
638static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
639{
640 struct inode *inode = d_inode(dentry);
641 struct shmem_inode_info *info = SHMEM_I(inode);
642 int error;
643
644 error = inode_change_ok(inode, attr);
645 if (error)
646 return error;
647
648 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
649 loff_t oldsize = inode->i_size;
650 loff_t newsize = attr->ia_size;
651
652 /* protected by i_mutex */
653 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
654 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
655 return -EPERM;
656
657 if (newsize != oldsize) {
658 error = shmem_reacct_size(SHMEM_I(inode)->flags,
659 oldsize, newsize);
660 if (error)
661 return error;
662 i_size_write(inode, newsize);
663 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
664 }
665 if (newsize <= oldsize) {
666 loff_t holebegin = round_up(newsize, PAGE_SIZE);
667 if (oldsize > holebegin)
668 unmap_mapping_range(inode->i_mapping,
669 holebegin, 0, 1);
670 if (info->alloced)
671 shmem_truncate_range(inode,
672 newsize, (loff_t)-1);
673 /* unmap again to remove racily COWed private pages */
674 if (oldsize > holebegin)
675 unmap_mapping_range(inode->i_mapping,
676 holebegin, 0, 1);
677 }
678 }
679
680 setattr_copy(inode, attr);
681 if (attr->ia_valid & ATTR_MODE)
682 error = posix_acl_chmod(inode, inode->i_mode);
683 return error;
684}
685
686static void shmem_evict_inode(struct inode *inode)
687{
688 struct shmem_inode_info *info = SHMEM_I(inode);
689
690 if (inode->i_mapping->a_ops == &shmem_aops) {
691 shmem_unacct_size(info->flags, inode->i_size);
692 inode->i_size = 0;
693 shmem_truncate_range(inode, 0, (loff_t)-1);
694 if (!list_empty(&info->swaplist)) {
695 mutex_lock(&shmem_swaplist_mutex);
696 list_del_init(&info->swaplist);
697 mutex_unlock(&shmem_swaplist_mutex);
698 }
699 }
700
701 simple_xattrs_free(&info->xattrs);
702 WARN_ON(inode->i_blocks);
703 shmem_free_inode(inode->i_sb);
704 clear_inode(inode);
705}
706
707/*
708 * If swap found in inode, free it and move page from swapcache to filecache.
709 */
710static int shmem_unuse_inode(struct shmem_inode_info *info,
711 swp_entry_t swap, struct page **pagep)
712{
713 struct address_space *mapping = info->vfs_inode.i_mapping;
714 void *radswap;
715 pgoff_t index;
716 gfp_t gfp;
717 int error = 0;
718
719 radswap = swp_to_radix_entry(swap);
720 index = radix_tree_locate_item(&mapping->page_tree, radswap);
721 if (index == -1)
722 return -EAGAIN; /* tell shmem_unuse we found nothing */
723
724 /*
725 * Move _head_ to start search for next from here.
726 * But be careful: shmem_evict_inode checks list_empty without taking
727 * mutex, and there's an instant in list_move_tail when info->swaplist
728 * would appear empty, if it were the only one on shmem_swaplist.
729 */
730 if (shmem_swaplist.next != &info->swaplist)
731 list_move_tail(&shmem_swaplist, &info->swaplist);
732
733 gfp = mapping_gfp_mask(mapping);
734 if (shmem_should_replace_page(*pagep, gfp)) {
735 mutex_unlock(&shmem_swaplist_mutex);
736 error = shmem_replace_page(pagep, gfp, info, index);
737 mutex_lock(&shmem_swaplist_mutex);
738 /*
739 * We needed to drop mutex to make that restrictive page
740 * allocation, but the inode might have been freed while we
741 * dropped it: although a racing shmem_evict_inode() cannot
742 * complete without emptying the radix_tree, our page lock
743 * on this swapcache page is not enough to prevent that -
744 * free_swap_and_cache() of our swap entry will only
745 * trylock_page(), removing swap from radix_tree whatever.
746 *
747 * We must not proceed to shmem_add_to_page_cache() if the
748 * inode has been freed, but of course we cannot rely on
749 * inode or mapping or info to check that. However, we can
750 * safely check if our swap entry is still in use (and here
751 * it can't have got reused for another page): if it's still
752 * in use, then the inode cannot have been freed yet, and we
753 * can safely proceed (if it's no longer in use, that tells
754 * nothing about the inode, but we don't need to unuse swap).
755 */
756 if (!page_swapcount(*pagep))
757 error = -ENOENT;
758 }
759
760 /*
761 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
762 * but also to hold up shmem_evict_inode(): so inode cannot be freed
763 * beneath us (pagelock doesn't help until the page is in pagecache).
764 */
765 if (!error)
766 error = shmem_add_to_page_cache(*pagep, mapping, index,
767 radswap);
768 if (error != -ENOMEM) {
769 /*
770 * Truncation and eviction use free_swap_and_cache(), which
771 * only does trylock page: if we raced, best clean up here.
772 */
773 delete_from_swap_cache(*pagep);
774 set_page_dirty(*pagep);
775 if (!error) {
776 spin_lock(&info->lock);
777 info->swapped--;
778 spin_unlock(&info->lock);
779 swap_free(swap);
780 }
781 }
782 return error;
783}
784
785/*
786 * Search through swapped inodes to find and replace swap by page.
787 */
788int shmem_unuse(swp_entry_t swap, struct page *page)
789{
790 struct list_head *this, *next;
791 struct shmem_inode_info *info;
792 struct mem_cgroup *memcg;
793 int error = 0;
794
795 /*
796 * There's a faint possibility that swap page was replaced before
797 * caller locked it: caller will come back later with the right page.
798 */
799 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
800 goto out;
801
802 /*
803 * Charge page using GFP_KERNEL while we can wait, before taking
804 * the shmem_swaplist_mutex which might hold up shmem_writepage().
805 * Charged back to the user (not to caller) when swap account is used.
806 */
807 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg,
808 false);
809 if (error)
810 goto out;
811 /* No radix_tree_preload: swap entry keeps a place for page in tree */
812 error = -EAGAIN;
813
814 mutex_lock(&shmem_swaplist_mutex);
815 list_for_each_safe(this, next, &shmem_swaplist) {
816 info = list_entry(this, struct shmem_inode_info, swaplist);
817 if (info->swapped)
818 error = shmem_unuse_inode(info, swap, &page);
819 else
820 list_del_init(&info->swaplist);
821 cond_resched();
822 if (error != -EAGAIN)
823 break;
824 /* found nothing in this: move on to search the next */
825 }
826 mutex_unlock(&shmem_swaplist_mutex);
827
828 if (error) {
829 if (error != -ENOMEM)
830 error = 0;
831 mem_cgroup_cancel_charge(page, memcg, false);
832 } else
833 mem_cgroup_commit_charge(page, memcg, true, false);
834out:
835 unlock_page(page);
836 put_page(page);
837 return error;
838}
839
840/*
841 * Move the page from the page cache to the swap cache.
842 */
843static int shmem_writepage(struct page *page, struct writeback_control *wbc)
844{
845 struct shmem_inode_info *info;
846 struct address_space *mapping;
847 struct inode *inode;
848 swp_entry_t swap;
849 pgoff_t index;
850
851 BUG_ON(!PageLocked(page));
852 mapping = page->mapping;
853 index = page->index;
854 inode = mapping->host;
855 info = SHMEM_I(inode);
856 if (info->flags & VM_LOCKED)
857 goto redirty;
858 if (!total_swap_pages)
859 goto redirty;
860
861 /*
862 * Our capabilities prevent regular writeback or sync from ever calling
863 * shmem_writepage; but a stacking filesystem might use ->writepage of
864 * its underlying filesystem, in which case tmpfs should write out to
865 * swap only in response to memory pressure, and not for the writeback
866 * threads or sync.
867 */
868 if (!wbc->for_reclaim) {
869 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
870 goto redirty;
871 }
872
873 /*
874 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
875 * value into swapfile.c, the only way we can correctly account for a
876 * fallocated page arriving here is now to initialize it and write it.
877 *
878 * That's okay for a page already fallocated earlier, but if we have
879 * not yet completed the fallocation, then (a) we want to keep track
880 * of this page in case we have to undo it, and (b) it may not be a
881 * good idea to continue anyway, once we're pushing into swap. So
882 * reactivate the page, and let shmem_fallocate() quit when too many.
883 */
884 if (!PageUptodate(page)) {
885 if (inode->i_private) {
886 struct shmem_falloc *shmem_falloc;
887 spin_lock(&inode->i_lock);
888 shmem_falloc = inode->i_private;
889 if (shmem_falloc &&
890 !shmem_falloc->waitq &&
891 index >= shmem_falloc->start &&
892 index < shmem_falloc->next)
893 shmem_falloc->nr_unswapped++;
894 else
895 shmem_falloc = NULL;
896 spin_unlock(&inode->i_lock);
897 if (shmem_falloc)
898 goto redirty;
899 }
900 clear_highpage(page);
901 flush_dcache_page(page);
902 SetPageUptodate(page);
903 }
904
905 swap = get_swap_page();
906 if (!swap.val)
907 goto redirty;
908
909 if (mem_cgroup_try_charge_swap(page, swap))
910 goto free_swap;
911
912 /*
913 * Add inode to shmem_unuse()'s list of swapped-out inodes,
914 * if it's not already there. Do it now before the page is
915 * moved to swap cache, when its pagelock no longer protects
916 * the inode from eviction. But don't unlock the mutex until
917 * we've incremented swapped, because shmem_unuse_inode() will
918 * prune a !swapped inode from the swaplist under this mutex.
919 */
920 mutex_lock(&shmem_swaplist_mutex);
921 if (list_empty(&info->swaplist))
922 list_add_tail(&info->swaplist, &shmem_swaplist);
923
924 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
925 spin_lock(&info->lock);
926 shmem_recalc_inode(inode);
927 info->swapped++;
928 spin_unlock(&info->lock);
929
930 swap_shmem_alloc(swap);
931 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
932
933 mutex_unlock(&shmem_swaplist_mutex);
934 BUG_ON(page_mapped(page));
935 swap_writepage(page, wbc);
936 return 0;
937 }
938
939 mutex_unlock(&shmem_swaplist_mutex);
940free_swap:
941 swapcache_free(swap);
942redirty:
943 set_page_dirty(page);
944 if (wbc->for_reclaim)
945 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
946 unlock_page(page);
947 return 0;
948}
949
950#ifdef CONFIG_NUMA
951#ifdef CONFIG_TMPFS
952static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
953{
954 char buffer[64];
955
956 if (!mpol || mpol->mode == MPOL_DEFAULT)
957 return; /* show nothing */
958
959 mpol_to_str(buffer, sizeof(buffer), mpol);
960
961 seq_printf(seq, ",mpol=%s", buffer);
962}
963
964static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
965{
966 struct mempolicy *mpol = NULL;
967 if (sbinfo->mpol) {
968 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
969 mpol = sbinfo->mpol;
970 mpol_get(mpol);
971 spin_unlock(&sbinfo->stat_lock);
972 }
973 return mpol;
974}
975#endif /* CONFIG_TMPFS */
976
977static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
978 struct shmem_inode_info *info, pgoff_t index)
979{
980 struct vm_area_struct pvma;
981 struct page *page;
982
983 /* Create a pseudo vma that just contains the policy */
984 pvma.vm_start = 0;
985 /* Bias interleave by inode number to distribute better across nodes */
986 pvma.vm_pgoff = index + info->vfs_inode.i_ino;
987 pvma.vm_ops = NULL;
988 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
989
990 page = swapin_readahead(swap, gfp, &pvma, 0);
991
992 /* Drop reference taken by mpol_shared_policy_lookup() */
993 mpol_cond_put(pvma.vm_policy);
994
995 return page;
996}
997
998static struct page *shmem_alloc_page(gfp_t gfp,
999 struct shmem_inode_info *info, pgoff_t index)
1000{
1001 struct vm_area_struct pvma;
1002 struct page *page;
1003
1004 /* Create a pseudo vma that just contains the policy */
1005 pvma.vm_start = 0;
1006 /* Bias interleave by inode number to distribute better across nodes */
1007 pvma.vm_pgoff = index + info->vfs_inode.i_ino;
1008 pvma.vm_ops = NULL;
1009 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1010
1011 page = alloc_page_vma(gfp, &pvma, 0);
1012
1013 /* Drop reference taken by mpol_shared_policy_lookup() */
1014 mpol_cond_put(pvma.vm_policy);
1015
1016 return page;
1017}
1018#else /* !CONFIG_NUMA */
1019#ifdef CONFIG_TMPFS
1020static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1021{
1022}
1023#endif /* CONFIG_TMPFS */
1024
1025static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1026 struct shmem_inode_info *info, pgoff_t index)
1027{
1028 return swapin_readahead(swap, gfp, NULL, 0);
1029}
1030
1031static inline struct page *shmem_alloc_page(gfp_t gfp,
1032 struct shmem_inode_info *info, pgoff_t index)
1033{
1034 return alloc_page(gfp);
1035}
1036#endif /* CONFIG_NUMA */
1037
1038#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1039static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1040{
1041 return NULL;
1042}
1043#endif
1044
1045/*
1046 * When a page is moved from swapcache to shmem filecache (either by the
1047 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1048 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1049 * ignorance of the mapping it belongs to. If that mapping has special
1050 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1051 * we may need to copy to a suitable page before moving to filecache.
1052 *
1053 * In a future release, this may well be extended to respect cpuset and
1054 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1055 * but for now it is a simple matter of zone.
1056 */
1057static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1058{
1059 return page_zonenum(page) > gfp_zone(gfp);
1060}
1061
1062static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1063 struct shmem_inode_info *info, pgoff_t index)
1064{
1065 struct page *oldpage, *newpage;
1066 struct address_space *swap_mapping;
1067 pgoff_t swap_index;
1068 int error;
1069
1070 oldpage = *pagep;
1071 swap_index = page_private(oldpage);
1072 swap_mapping = page_mapping(oldpage);
1073
1074 /*
1075 * We have arrived here because our zones are constrained, so don't
1076 * limit chance of success by further cpuset and node constraints.
1077 */
1078 gfp &= ~GFP_CONSTRAINT_MASK;
1079 newpage = shmem_alloc_page(gfp, info, index);
1080 if (!newpage)
1081 return -ENOMEM;
1082
1083 get_page(newpage);
1084 copy_highpage(newpage, oldpage);
1085 flush_dcache_page(newpage);
1086
1087 __SetPageLocked(newpage);
1088 SetPageUptodate(newpage);
1089 SetPageSwapBacked(newpage);
1090 set_page_private(newpage, swap_index);
1091 SetPageSwapCache(newpage);
1092
1093 /*
1094 * Our caller will very soon move newpage out of swapcache, but it's
1095 * a nice clean interface for us to replace oldpage by newpage there.
1096 */
1097 spin_lock_irq(&swap_mapping->tree_lock);
1098 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1099 newpage);
1100 if (!error) {
1101 __inc_zone_page_state(newpage, NR_FILE_PAGES);
1102 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
1103 }
1104 spin_unlock_irq(&swap_mapping->tree_lock);
1105
1106 if (unlikely(error)) {
1107 /*
1108 * Is this possible? I think not, now that our callers check
1109 * both PageSwapCache and page_private after getting page lock;
1110 * but be defensive. Reverse old to newpage for clear and free.
1111 */
1112 oldpage = newpage;
1113 } else {
1114 mem_cgroup_migrate(oldpage, newpage);
1115 lru_cache_add_anon(newpage);
1116 *pagep = newpage;
1117 }
1118
1119 ClearPageSwapCache(oldpage);
1120 set_page_private(oldpage, 0);
1121
1122 unlock_page(oldpage);
1123 put_page(oldpage);
1124 put_page(oldpage);
1125 return error;
1126}
1127
1128/*
1129 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1130 *
1131 * If we allocate a new one we do not mark it dirty. That's up to the
1132 * vm. If we swap it in we mark it dirty since we also free the swap
1133 * entry since a page cannot live in both the swap and page cache
1134 */
1135static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1136 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1137{
1138 struct address_space *mapping = inode->i_mapping;
1139 struct shmem_inode_info *info;
1140 struct shmem_sb_info *sbinfo;
1141 struct mem_cgroup *memcg;
1142 struct page *page;
1143 swp_entry_t swap;
1144 int error;
1145 int once = 0;
1146 int alloced = 0;
1147
1148 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1149 return -EFBIG;
1150repeat:
1151 swap.val = 0;
1152 page = find_lock_entry(mapping, index);
1153 if (radix_tree_exceptional_entry(page)) {
1154 swap = radix_to_swp_entry(page);
1155 page = NULL;
1156 }
1157
1158 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1159 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1160 error = -EINVAL;
1161 goto unlock;
1162 }
1163
1164 if (page && sgp == SGP_WRITE)
1165 mark_page_accessed(page);
1166
1167 /* fallocated page? */
1168 if (page && !PageUptodate(page)) {
1169 if (sgp != SGP_READ)
1170 goto clear;
1171 unlock_page(page);
1172 put_page(page);
1173 page = NULL;
1174 }
1175 if (page || (sgp == SGP_READ && !swap.val)) {
1176 *pagep = page;
1177 return 0;
1178 }
1179
1180 /*
1181 * Fast cache lookup did not find it:
1182 * bring it back from swap or allocate.
1183 */
1184 info = SHMEM_I(inode);
1185 sbinfo = SHMEM_SB(inode->i_sb);
1186
1187 if (swap.val) {
1188 /* Look it up and read it in.. */
1189 page = lookup_swap_cache(swap);
1190 if (!page) {
1191 /* here we actually do the io */
1192 if (fault_type)
1193 *fault_type |= VM_FAULT_MAJOR;
1194 page = shmem_swapin(swap, gfp, info, index);
1195 if (!page) {
1196 error = -ENOMEM;
1197 goto failed;
1198 }
1199 }
1200
1201 /* We have to do this with page locked to prevent races */
1202 lock_page(page);
1203 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1204 !shmem_confirm_swap(mapping, index, swap)) {
1205 error = -EEXIST; /* try again */
1206 goto unlock;
1207 }
1208 if (!PageUptodate(page)) {
1209 error = -EIO;
1210 goto failed;
1211 }
1212 wait_on_page_writeback(page);
1213
1214 if (shmem_should_replace_page(page, gfp)) {
1215 error = shmem_replace_page(&page, gfp, info, index);
1216 if (error)
1217 goto failed;
1218 }
1219
1220 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg,
1221 false);
1222 if (!error) {
1223 error = shmem_add_to_page_cache(page, mapping, index,
1224 swp_to_radix_entry(swap));
1225 /*
1226 * We already confirmed swap under page lock, and make
1227 * no memory allocation here, so usually no possibility
1228 * of error; but free_swap_and_cache() only trylocks a
1229 * page, so it is just possible that the entry has been
1230 * truncated or holepunched since swap was confirmed.
1231 * shmem_undo_range() will have done some of the
1232 * unaccounting, now delete_from_swap_cache() will do
1233 * the rest.
1234 * Reset swap.val? No, leave it so "failed" goes back to
1235 * "repeat": reading a hole and writing should succeed.
1236 */
1237 if (error) {
1238 mem_cgroup_cancel_charge(page, memcg, false);
1239 delete_from_swap_cache(page);
1240 }
1241 }
1242 if (error)
1243 goto failed;
1244
1245 mem_cgroup_commit_charge(page, memcg, true, false);
1246
1247 spin_lock(&info->lock);
1248 info->swapped--;
1249 shmem_recalc_inode(inode);
1250 spin_unlock(&info->lock);
1251
1252 if (sgp == SGP_WRITE)
1253 mark_page_accessed(page);
1254
1255 delete_from_swap_cache(page);
1256 set_page_dirty(page);
1257 swap_free(swap);
1258
1259 } else {
1260 if (shmem_acct_block(info->flags)) {
1261 error = -ENOSPC;
1262 goto failed;
1263 }
1264 if (sbinfo->max_blocks) {
1265 if (percpu_counter_compare(&sbinfo->used_blocks,
1266 sbinfo->max_blocks) >= 0) {
1267 error = -ENOSPC;
1268 goto unacct;
1269 }
1270 percpu_counter_inc(&sbinfo->used_blocks);
1271 }
1272
1273 page = shmem_alloc_page(gfp, info, index);
1274 if (!page) {
1275 error = -ENOMEM;
1276 goto decused;
1277 }
1278
1279 __SetPageSwapBacked(page);
1280 __SetPageLocked(page);
1281 if (sgp == SGP_WRITE)
1282 __SetPageReferenced(page);
1283
1284 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg,
1285 false);
1286 if (error)
1287 goto decused;
1288 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
1289 if (!error) {
1290 error = shmem_add_to_page_cache(page, mapping, index,
1291 NULL);
1292 radix_tree_preload_end();
1293 }
1294 if (error) {
1295 mem_cgroup_cancel_charge(page, memcg, false);
1296 goto decused;
1297 }
1298 mem_cgroup_commit_charge(page, memcg, false, false);
1299 lru_cache_add_anon(page);
1300
1301 spin_lock(&info->lock);
1302 info->alloced++;
1303 inode->i_blocks += BLOCKS_PER_PAGE;
1304 shmem_recalc_inode(inode);
1305 spin_unlock(&info->lock);
1306 alloced = true;
1307
1308 /*
1309 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1310 */
1311 if (sgp == SGP_FALLOC)
1312 sgp = SGP_WRITE;
1313clear:
1314 /*
1315 * Let SGP_WRITE caller clear ends if write does not fill page;
1316 * but SGP_FALLOC on a page fallocated earlier must initialize
1317 * it now, lest undo on failure cancel our earlier guarantee.
1318 */
1319 if (sgp != SGP_WRITE) {
1320 clear_highpage(page);
1321 flush_dcache_page(page);
1322 SetPageUptodate(page);
1323 }
1324 if (sgp == SGP_DIRTY)
1325 set_page_dirty(page);
1326 }
1327
1328 /* Perhaps the file has been truncated since we checked */
1329 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1330 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1331 if (alloced) {
1332 ClearPageDirty(page);
1333 delete_from_page_cache(page);
1334 spin_lock(&info->lock);
1335 shmem_recalc_inode(inode);
1336 spin_unlock(&info->lock);
1337 }
1338 error = -EINVAL;
1339 goto unlock;
1340 }
1341 *pagep = page;
1342 return 0;
1343
1344 /*
1345 * Error recovery.
1346 */
1347decused:
1348 if (sbinfo->max_blocks)
1349 percpu_counter_add(&sbinfo->used_blocks, -1);
1350unacct:
1351 shmem_unacct_blocks(info->flags, 1);
1352failed:
1353 if (swap.val && !shmem_confirm_swap(mapping, index, swap))
1354 error = -EEXIST;
1355unlock:
1356 if (page) {
1357 unlock_page(page);
1358 put_page(page);
1359 }
1360 if (error == -ENOSPC && !once++) {
1361 info = SHMEM_I(inode);
1362 spin_lock(&info->lock);
1363 shmem_recalc_inode(inode);
1364 spin_unlock(&info->lock);
1365 goto repeat;
1366 }
1367 if (error == -EEXIST) /* from above or from radix_tree_insert */
1368 goto repeat;
1369 return error;
1370}
1371
1372static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1373{
1374 struct inode *inode = file_inode(vma->vm_file);
1375 int error;
1376 int ret = VM_FAULT_LOCKED;
1377
1378 /*
1379 * Trinity finds that probing a hole which tmpfs is punching can
1380 * prevent the hole-punch from ever completing: which in turn
1381 * locks writers out with its hold on i_mutex. So refrain from
1382 * faulting pages into the hole while it's being punched. Although
1383 * shmem_undo_range() does remove the additions, it may be unable to
1384 * keep up, as each new page needs its own unmap_mapping_range() call,
1385 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1386 *
1387 * It does not matter if we sometimes reach this check just before the
1388 * hole-punch begins, so that one fault then races with the punch:
1389 * we just need to make racing faults a rare case.
1390 *
1391 * The implementation below would be much simpler if we just used a
1392 * standard mutex or completion: but we cannot take i_mutex in fault,
1393 * and bloating every shmem inode for this unlikely case would be sad.
1394 */
1395 if (unlikely(inode->i_private)) {
1396 struct shmem_falloc *shmem_falloc;
1397
1398 spin_lock(&inode->i_lock);
1399 shmem_falloc = inode->i_private;
1400 if (shmem_falloc &&
1401 shmem_falloc->waitq &&
1402 vmf->pgoff >= shmem_falloc->start &&
1403 vmf->pgoff < shmem_falloc->next) {
1404 wait_queue_head_t *shmem_falloc_waitq;
1405 DEFINE_WAIT(shmem_fault_wait);
1406
1407 ret = VM_FAULT_NOPAGE;
1408 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1409 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1410 /* It's polite to up mmap_sem if we can */
1411 up_read(&vma->vm_mm->mmap_sem);
1412 ret = VM_FAULT_RETRY;
1413 }
1414
1415 shmem_falloc_waitq = shmem_falloc->waitq;
1416 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1417 TASK_UNINTERRUPTIBLE);
1418 spin_unlock(&inode->i_lock);
1419 schedule();
1420
1421 /*
1422 * shmem_falloc_waitq points into the shmem_fallocate()
1423 * stack of the hole-punching task: shmem_falloc_waitq
1424 * is usually invalid by the time we reach here, but
1425 * finish_wait() does not dereference it in that case;
1426 * though i_lock needed lest racing with wake_up_all().
1427 */
1428 spin_lock(&inode->i_lock);
1429 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1430 spin_unlock(&inode->i_lock);
1431 return ret;
1432 }
1433 spin_unlock(&inode->i_lock);
1434 }
1435
1436 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1437 if (error)
1438 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1439
1440 if (ret & VM_FAULT_MAJOR) {
1441 count_vm_event(PGMAJFAULT);
1442 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1443 }
1444 return ret;
1445}
1446
1447#ifdef CONFIG_NUMA
1448static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1449{
1450 struct inode *inode = file_inode(vma->vm_file);
1451 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1452}
1453
1454static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1455 unsigned long addr)
1456{
1457 struct inode *inode = file_inode(vma->vm_file);
1458 pgoff_t index;
1459
1460 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1461 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1462}
1463#endif
1464
1465int shmem_lock(struct file *file, int lock, struct user_struct *user)
1466{
1467 struct inode *inode = file_inode(file);
1468 struct shmem_inode_info *info = SHMEM_I(inode);
1469 int retval = -ENOMEM;
1470
1471 spin_lock(&info->lock);
1472 if (lock && !(info->flags & VM_LOCKED)) {
1473 if (!user_shm_lock(inode->i_size, user))
1474 goto out_nomem;
1475 info->flags |= VM_LOCKED;
1476 mapping_set_unevictable(file->f_mapping);
1477 }
1478 if (!lock && (info->flags & VM_LOCKED) && user) {
1479 user_shm_unlock(inode->i_size, user);
1480 info->flags &= ~VM_LOCKED;
1481 mapping_clear_unevictable(file->f_mapping);
1482 }
1483 retval = 0;
1484
1485out_nomem:
1486 spin_unlock(&info->lock);
1487 return retval;
1488}
1489
1490static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1491{
1492 file_accessed(file);
1493 vma->vm_ops = &shmem_vm_ops;
1494 return 0;
1495}
1496
1497static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1498 umode_t mode, dev_t dev, unsigned long flags)
1499{
1500 struct inode *inode;
1501 struct shmem_inode_info *info;
1502 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1503
1504 if (shmem_reserve_inode(sb))
1505 return NULL;
1506
1507 inode = new_inode(sb);
1508 if (inode) {
1509 inode->i_ino = get_next_ino();
1510 inode_init_owner(inode, dir, mode);
1511 inode->i_blocks = 0;
1512 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1513 inode->i_generation = get_seconds();
1514 info = SHMEM_I(inode);
1515 memset(info, 0, (char *)inode - (char *)info);
1516 spin_lock_init(&info->lock);
1517 info->seals = F_SEAL_SEAL;
1518 info->flags = flags & VM_NORESERVE;
1519 INIT_LIST_HEAD(&info->swaplist);
1520 simple_xattrs_init(&info->xattrs);
1521 cache_no_acl(inode);
1522
1523 switch (mode & S_IFMT) {
1524 default:
1525 inode->i_op = &shmem_special_inode_operations;
1526 init_special_inode(inode, mode, dev);
1527 break;
1528 case S_IFREG:
1529 inode->i_mapping->a_ops = &shmem_aops;
1530 inode->i_op = &shmem_inode_operations;
1531 inode->i_fop = &shmem_file_operations;
1532 mpol_shared_policy_init(&info->policy,
1533 shmem_get_sbmpol(sbinfo));
1534 break;
1535 case S_IFDIR:
1536 inc_nlink(inode);
1537 /* Some things misbehave if size == 0 on a directory */
1538 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1539 inode->i_op = &shmem_dir_inode_operations;
1540 inode->i_fop = &simple_dir_operations;
1541 break;
1542 case S_IFLNK:
1543 /*
1544 * Must not load anything in the rbtree,
1545 * mpol_free_shared_policy will not be called.
1546 */
1547 mpol_shared_policy_init(&info->policy, NULL);
1548 break;
1549 }
1550 } else
1551 shmem_free_inode(sb);
1552 return inode;
1553}
1554
1555bool shmem_mapping(struct address_space *mapping)
1556{
1557 if (!mapping->host)
1558 return false;
1559
1560 return mapping->host->i_sb->s_op == &shmem_ops;
1561}
1562
1563#ifdef CONFIG_TMPFS
1564static const struct inode_operations shmem_symlink_inode_operations;
1565static const struct inode_operations shmem_short_symlink_operations;
1566
1567#ifdef CONFIG_TMPFS_XATTR
1568static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
1569#else
1570#define shmem_initxattrs NULL
1571#endif
1572
1573static int
1574shmem_write_begin(struct file *file, struct address_space *mapping,
1575 loff_t pos, unsigned len, unsigned flags,
1576 struct page **pagep, void **fsdata)
1577{
1578 struct inode *inode = mapping->host;
1579 struct shmem_inode_info *info = SHMEM_I(inode);
1580 pgoff_t index = pos >> PAGE_SHIFT;
1581
1582 /* i_mutex is held by caller */
1583 if (unlikely(info->seals)) {
1584 if (info->seals & F_SEAL_WRITE)
1585 return -EPERM;
1586 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
1587 return -EPERM;
1588 }
1589
1590 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1591}
1592
1593static int
1594shmem_write_end(struct file *file, struct address_space *mapping,
1595 loff_t pos, unsigned len, unsigned copied,
1596 struct page *page, void *fsdata)
1597{
1598 struct inode *inode = mapping->host;
1599
1600 if (pos + copied > inode->i_size)
1601 i_size_write(inode, pos + copied);
1602
1603 if (!PageUptodate(page)) {
1604 if (copied < PAGE_SIZE) {
1605 unsigned from = pos & (PAGE_SIZE - 1);
1606 zero_user_segments(page, 0, from,
1607 from + copied, PAGE_SIZE);
1608 }
1609 SetPageUptodate(page);
1610 }
1611 set_page_dirty(page);
1612 unlock_page(page);
1613 put_page(page);
1614
1615 return copied;
1616}
1617
1618static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1619{
1620 struct file *file = iocb->ki_filp;
1621 struct inode *inode = file_inode(file);
1622 struct address_space *mapping = inode->i_mapping;
1623 pgoff_t index;
1624 unsigned long offset;
1625 enum sgp_type sgp = SGP_READ;
1626 int error = 0;
1627 ssize_t retval = 0;
1628 loff_t *ppos = &iocb->ki_pos;
1629
1630 /*
1631 * Might this read be for a stacking filesystem? Then when reading
1632 * holes of a sparse file, we actually need to allocate those pages,
1633 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1634 */
1635 if (!iter_is_iovec(to))
1636 sgp = SGP_DIRTY;
1637
1638 index = *ppos >> PAGE_SHIFT;
1639 offset = *ppos & ~PAGE_MASK;
1640
1641 for (;;) {
1642 struct page *page = NULL;
1643 pgoff_t end_index;
1644 unsigned long nr, ret;
1645 loff_t i_size = i_size_read(inode);
1646
1647 end_index = i_size >> PAGE_SHIFT;
1648 if (index > end_index)
1649 break;
1650 if (index == end_index) {
1651 nr = i_size & ~PAGE_MASK;
1652 if (nr <= offset)
1653 break;
1654 }
1655
1656 error = shmem_getpage(inode, index, &page, sgp, NULL);
1657 if (error) {
1658 if (error == -EINVAL)
1659 error = 0;
1660 break;
1661 }
1662 if (page)
1663 unlock_page(page);
1664
1665 /*
1666 * We must evaluate after, since reads (unlike writes)
1667 * are called without i_mutex protection against truncate
1668 */
1669 nr = PAGE_SIZE;
1670 i_size = i_size_read(inode);
1671 end_index = i_size >> PAGE_SHIFT;
1672 if (index == end_index) {
1673 nr = i_size & ~PAGE_MASK;
1674 if (nr <= offset) {
1675 if (page)
1676 put_page(page);
1677 break;
1678 }
1679 }
1680 nr -= offset;
1681
1682 if (page) {
1683 /*
1684 * If users can be writing to this page using arbitrary
1685 * virtual addresses, take care about potential aliasing
1686 * before reading the page on the kernel side.
1687 */
1688 if (mapping_writably_mapped(mapping))
1689 flush_dcache_page(page);
1690 /*
1691 * Mark the page accessed if we read the beginning.
1692 */
1693 if (!offset)
1694 mark_page_accessed(page);
1695 } else {
1696 page = ZERO_PAGE(0);
1697 get_page(page);
1698 }
1699
1700 /*
1701 * Ok, we have the page, and it's up-to-date, so
1702 * now we can copy it to user space...
1703 */
1704 ret = copy_page_to_iter(page, offset, nr, to);
1705 retval += ret;
1706 offset += ret;
1707 index += offset >> PAGE_SHIFT;
1708 offset &= ~PAGE_MASK;
1709
1710 put_page(page);
1711 if (!iov_iter_count(to))
1712 break;
1713 if (ret < nr) {
1714 error = -EFAULT;
1715 break;
1716 }
1717 cond_resched();
1718 }
1719
1720 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
1721 file_accessed(file);
1722 return retval ? retval : error;
1723}
1724
1725static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1726 struct pipe_inode_info *pipe, size_t len,
1727 unsigned int flags)
1728{
1729 struct address_space *mapping = in->f_mapping;
1730 struct inode *inode = mapping->host;
1731 unsigned int loff, nr_pages, req_pages;
1732 struct page *pages[PIPE_DEF_BUFFERS];
1733 struct partial_page partial[PIPE_DEF_BUFFERS];
1734 struct page *page;
1735 pgoff_t index, end_index;
1736 loff_t isize, left;
1737 int error, page_nr;
1738 struct splice_pipe_desc spd = {
1739 .pages = pages,
1740 .partial = partial,
1741 .nr_pages_max = PIPE_DEF_BUFFERS,
1742 .flags = flags,
1743 .ops = &page_cache_pipe_buf_ops,
1744 .spd_release = spd_release_page,
1745 };
1746
1747 isize = i_size_read(inode);
1748 if (unlikely(*ppos >= isize))
1749 return 0;
1750
1751 left = isize - *ppos;
1752 if (unlikely(left < len))
1753 len = left;
1754
1755 if (splice_grow_spd(pipe, &spd))
1756 return -ENOMEM;
1757
1758 index = *ppos >> PAGE_SHIFT;
1759 loff = *ppos & ~PAGE_MASK;
1760 req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
1761 nr_pages = min(req_pages, spd.nr_pages_max);
1762
1763 spd.nr_pages = find_get_pages_contig(mapping, index,
1764 nr_pages, spd.pages);
1765 index += spd.nr_pages;
1766 error = 0;
1767
1768 while (spd.nr_pages < nr_pages) {
1769 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1770 if (error)
1771 break;
1772 unlock_page(page);
1773 spd.pages[spd.nr_pages++] = page;
1774 index++;
1775 }
1776
1777 index = *ppos >> PAGE_SHIFT;
1778 nr_pages = spd.nr_pages;
1779 spd.nr_pages = 0;
1780
1781 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1782 unsigned int this_len;
1783
1784 if (!len)
1785 break;
1786
1787 this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
1788 page = spd.pages[page_nr];
1789
1790 if (!PageUptodate(page) || page->mapping != mapping) {
1791 error = shmem_getpage(inode, index, &page,
1792 SGP_CACHE, NULL);
1793 if (error)
1794 break;
1795 unlock_page(page);
1796 put_page(spd.pages[page_nr]);
1797 spd.pages[page_nr] = page;
1798 }
1799
1800 isize = i_size_read(inode);
1801 end_index = (isize - 1) >> PAGE_SHIFT;
1802 if (unlikely(!isize || index > end_index))
1803 break;
1804
1805 if (end_index == index) {
1806 unsigned int plen;
1807
1808 plen = ((isize - 1) & ~PAGE_MASK) + 1;
1809 if (plen <= loff)
1810 break;
1811
1812 this_len = min(this_len, plen - loff);
1813 len = this_len;
1814 }
1815
1816 spd.partial[page_nr].offset = loff;
1817 spd.partial[page_nr].len = this_len;
1818 len -= this_len;
1819 loff = 0;
1820 spd.nr_pages++;
1821 index++;
1822 }
1823
1824 while (page_nr < nr_pages)
1825 put_page(spd.pages[page_nr++]);
1826
1827 if (spd.nr_pages)
1828 error = splice_to_pipe(pipe, &spd);
1829
1830 splice_shrink_spd(&spd);
1831
1832 if (error > 0) {
1833 *ppos += error;
1834 file_accessed(in);
1835 }
1836 return error;
1837}
1838
1839/*
1840 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
1841 */
1842static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
1843 pgoff_t index, pgoff_t end, int whence)
1844{
1845 struct page *page;
1846 struct pagevec pvec;
1847 pgoff_t indices[PAGEVEC_SIZE];
1848 bool done = false;
1849 int i;
1850
1851 pagevec_init(&pvec, 0);
1852 pvec.nr = 1; /* start small: we may be there already */
1853 while (!done) {
1854 pvec.nr = find_get_entries(mapping, index,
1855 pvec.nr, pvec.pages, indices);
1856 if (!pvec.nr) {
1857 if (whence == SEEK_DATA)
1858 index = end;
1859 break;
1860 }
1861 for (i = 0; i < pvec.nr; i++, index++) {
1862 if (index < indices[i]) {
1863 if (whence == SEEK_HOLE) {
1864 done = true;
1865 break;
1866 }
1867 index = indices[i];
1868 }
1869 page = pvec.pages[i];
1870 if (page && !radix_tree_exceptional_entry(page)) {
1871 if (!PageUptodate(page))
1872 page = NULL;
1873 }
1874 if (index >= end ||
1875 (page && whence == SEEK_DATA) ||
1876 (!page && whence == SEEK_HOLE)) {
1877 done = true;
1878 break;
1879 }
1880 }
1881 pagevec_remove_exceptionals(&pvec);
1882 pagevec_release(&pvec);
1883 pvec.nr = PAGEVEC_SIZE;
1884 cond_resched();
1885 }
1886 return index;
1887}
1888
1889static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
1890{
1891 struct address_space *mapping = file->f_mapping;
1892 struct inode *inode = mapping->host;
1893 pgoff_t start, end;
1894 loff_t new_offset;
1895
1896 if (whence != SEEK_DATA && whence != SEEK_HOLE)
1897 return generic_file_llseek_size(file, offset, whence,
1898 MAX_LFS_FILESIZE, i_size_read(inode));
1899 inode_lock(inode);
1900 /* We're holding i_mutex so we can access i_size directly */
1901
1902 if (offset < 0)
1903 offset = -EINVAL;
1904 else if (offset >= inode->i_size)
1905 offset = -ENXIO;
1906 else {
1907 start = offset >> PAGE_SHIFT;
1908 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1909 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
1910 new_offset <<= PAGE_SHIFT;
1911 if (new_offset > offset) {
1912 if (new_offset < inode->i_size)
1913 offset = new_offset;
1914 else if (whence == SEEK_DATA)
1915 offset = -ENXIO;
1916 else
1917 offset = inode->i_size;
1918 }
1919 }
1920
1921 if (offset >= 0)
1922 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
1923 inode_unlock(inode);
1924 return offset;
1925}
1926
1927/*
1928 * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
1929 * so reuse a tag which we firmly believe is never set or cleared on shmem.
1930 */
1931#define SHMEM_TAG_PINNED PAGECACHE_TAG_TOWRITE
1932#define LAST_SCAN 4 /* about 150ms max */
1933
1934static void shmem_tag_pins(struct address_space *mapping)
1935{
1936 struct radix_tree_iter iter;
1937 void **slot;
1938 pgoff_t start;
1939 struct page *page;
1940
1941 lru_add_drain();
1942 start = 0;
1943 rcu_read_lock();
1944
1945 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1946 page = radix_tree_deref_slot(slot);
1947 if (!page || radix_tree_exception(page)) {
1948 if (radix_tree_deref_retry(page)) {
1949 slot = radix_tree_iter_retry(&iter);
1950 continue;
1951 }
1952 } else if (page_count(page) - page_mapcount(page) > 1) {
1953 spin_lock_irq(&mapping->tree_lock);
1954 radix_tree_tag_set(&mapping->page_tree, iter.index,
1955 SHMEM_TAG_PINNED);
1956 spin_unlock_irq(&mapping->tree_lock);
1957 }
1958
1959 if (need_resched()) {
1960 cond_resched_rcu();
1961 slot = radix_tree_iter_next(&iter);
1962 }
1963 }
1964 rcu_read_unlock();
1965}
1966
1967/*
1968 * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
1969 * via get_user_pages(), drivers might have some pending I/O without any active
1970 * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
1971 * and see whether it has an elevated ref-count. If so, we tag them and wait for
1972 * them to be dropped.
1973 * The caller must guarantee that no new user will acquire writable references
1974 * to those pages to avoid races.
1975 */
1976static int shmem_wait_for_pins(struct address_space *mapping)
1977{
1978 struct radix_tree_iter iter;
1979 void **slot;
1980 pgoff_t start;
1981 struct page *page;
1982 int error, scan;
1983
1984 shmem_tag_pins(mapping);
1985
1986 error = 0;
1987 for (scan = 0; scan <= LAST_SCAN; scan++) {
1988 if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED))
1989 break;
1990
1991 if (!scan)
1992 lru_add_drain_all();
1993 else if (schedule_timeout_killable((HZ << scan) / 200))
1994 scan = LAST_SCAN;
1995
1996 start = 0;
1997 rcu_read_lock();
1998 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
1999 start, SHMEM_TAG_PINNED) {
2000
2001 page = radix_tree_deref_slot(slot);
2002 if (radix_tree_exception(page)) {
2003 if (radix_tree_deref_retry(page)) {
2004 slot = radix_tree_iter_retry(&iter);
2005 continue;
2006 }
2007
2008 page = NULL;
2009 }
2010
2011 if (page &&
2012 page_count(page) - page_mapcount(page) != 1) {
2013 if (scan < LAST_SCAN)
2014 goto continue_resched;
2015
2016 /*
2017 * On the last scan, we clean up all those tags
2018 * we inserted; but make a note that we still
2019 * found pages pinned.
2020 */
2021 error = -EBUSY;
2022 }
2023
2024 spin_lock_irq(&mapping->tree_lock);
2025 radix_tree_tag_clear(&mapping->page_tree,
2026 iter.index, SHMEM_TAG_PINNED);
2027 spin_unlock_irq(&mapping->tree_lock);
2028continue_resched:
2029 if (need_resched()) {
2030 cond_resched_rcu();
2031 slot = radix_tree_iter_next(&iter);
2032 }
2033 }
2034 rcu_read_unlock();
2035 }
2036
2037 return error;
2038}
2039
2040#define F_ALL_SEALS (F_SEAL_SEAL | \
2041 F_SEAL_SHRINK | \
2042 F_SEAL_GROW | \
2043 F_SEAL_WRITE)
2044
2045int shmem_add_seals(struct file *file, unsigned int seals)
2046{
2047 struct inode *inode = file_inode(file);
2048 struct shmem_inode_info *info = SHMEM_I(inode);
2049 int error;
2050
2051 /*
2052 * SEALING
2053 * Sealing allows multiple parties to share a shmem-file but restrict
2054 * access to a specific subset of file operations. Seals can only be
2055 * added, but never removed. This way, mutually untrusted parties can
2056 * share common memory regions with a well-defined policy. A malicious
2057 * peer can thus never perform unwanted operations on a shared object.
2058 *
2059 * Seals are only supported on special shmem-files and always affect
2060 * the whole underlying inode. Once a seal is set, it may prevent some
2061 * kinds of access to the file. Currently, the following seals are
2062 * defined:
2063 * SEAL_SEAL: Prevent further seals from being set on this file
2064 * SEAL_SHRINK: Prevent the file from shrinking
2065 * SEAL_GROW: Prevent the file from growing
2066 * SEAL_WRITE: Prevent write access to the file
2067 *
2068 * As we don't require any trust relationship between two parties, we
2069 * must prevent seals from being removed. Therefore, sealing a file
2070 * only adds a given set of seals to the file, it never touches
2071 * existing seals. Furthermore, the "setting seals"-operation can be
2072 * sealed itself, which basically prevents any further seal from being
2073 * added.
2074 *
2075 * Semantics of sealing are only defined on volatile files. Only
2076 * anonymous shmem files support sealing. More importantly, seals are
2077 * never written to disk. Therefore, there's no plan to support it on
2078 * other file types.
2079 */
2080
2081 if (file->f_op != &shmem_file_operations)
2082 return -EINVAL;
2083 if (!(file->f_mode & FMODE_WRITE))
2084 return -EPERM;
2085 if (seals & ~(unsigned int)F_ALL_SEALS)
2086 return -EINVAL;
2087
2088 inode_lock(inode);
2089
2090 if (info->seals & F_SEAL_SEAL) {
2091 error = -EPERM;
2092 goto unlock;
2093 }
2094
2095 if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
2096 error = mapping_deny_writable(file->f_mapping);
2097 if (error)
2098 goto unlock;
2099
2100 error = shmem_wait_for_pins(file->f_mapping);
2101 if (error) {
2102 mapping_allow_writable(file->f_mapping);
2103 goto unlock;
2104 }
2105 }
2106
2107 info->seals |= seals;
2108 error = 0;
2109
2110unlock:
2111 inode_unlock(inode);
2112 return error;
2113}
2114EXPORT_SYMBOL_GPL(shmem_add_seals);
2115
2116int shmem_get_seals(struct file *file)
2117{
2118 if (file->f_op != &shmem_file_operations)
2119 return -EINVAL;
2120
2121 return SHMEM_I(file_inode(file))->seals;
2122}
2123EXPORT_SYMBOL_GPL(shmem_get_seals);
2124
2125long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
2126{
2127 long error;
2128
2129 switch (cmd) {
2130 case F_ADD_SEALS:
2131 /* disallow upper 32bit */
2132 if (arg > UINT_MAX)
2133 return -EINVAL;
2134
2135 error = shmem_add_seals(file, arg);
2136 break;
2137 case F_GET_SEALS:
2138 error = shmem_get_seals(file);
2139 break;
2140 default:
2141 error = -EINVAL;
2142 break;
2143 }
2144
2145 return error;
2146}
2147
2148static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2149 loff_t len)
2150{
2151 struct inode *inode = file_inode(file);
2152 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2153 struct shmem_inode_info *info = SHMEM_I(inode);
2154 struct shmem_falloc shmem_falloc;
2155 pgoff_t start, index, end;
2156 int error;
2157
2158 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2159 return -EOPNOTSUPP;
2160
2161 inode_lock(inode);
2162
2163 if (mode & FALLOC_FL_PUNCH_HOLE) {
2164 struct address_space *mapping = file->f_mapping;
2165 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2166 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2167 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2168
2169 /* protected by i_mutex */
2170 if (info->seals & F_SEAL_WRITE) {
2171 error = -EPERM;
2172 goto out;
2173 }
2174
2175 shmem_falloc.waitq = &shmem_falloc_waitq;
2176 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2177 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2178 spin_lock(&inode->i_lock);
2179 inode->i_private = &shmem_falloc;
2180 spin_unlock(&inode->i_lock);
2181
2182 if ((u64)unmap_end > (u64)unmap_start)
2183 unmap_mapping_range(mapping, unmap_start,
2184 1 + unmap_end - unmap_start, 0);
2185 shmem_truncate_range(inode, offset, offset + len - 1);
2186 /* No need to unmap again: hole-punching leaves COWed pages */
2187
2188 spin_lock(&inode->i_lock);
2189 inode->i_private = NULL;
2190 wake_up_all(&shmem_falloc_waitq);
2191 spin_unlock(&inode->i_lock);
2192 error = 0;
2193 goto out;
2194 }
2195
2196 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2197 error = inode_newsize_ok(inode, offset + len);
2198 if (error)
2199 goto out;
2200
2201 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2202 error = -EPERM;
2203 goto out;
2204 }
2205
2206 start = offset >> PAGE_SHIFT;
2207 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2208 /* Try to avoid a swapstorm if len is impossible to satisfy */
2209 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2210 error = -ENOSPC;
2211 goto out;
2212 }
2213
2214 shmem_falloc.waitq = NULL;
2215 shmem_falloc.start = start;
2216 shmem_falloc.next = start;
2217 shmem_falloc.nr_falloced = 0;
2218 shmem_falloc.nr_unswapped = 0;
2219 spin_lock(&inode->i_lock);
2220 inode->i_private = &shmem_falloc;
2221 spin_unlock(&inode->i_lock);
2222
2223 for (index = start; index < end; index++) {
2224 struct page *page;
2225
2226 /*
2227 * Good, the fallocate(2) manpage permits EINTR: we may have
2228 * been interrupted because we are using up too much memory.
2229 */
2230 if (signal_pending(current))
2231 error = -EINTR;
2232 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2233 error = -ENOMEM;
2234 else
2235 error = shmem_getpage(inode, index, &page, SGP_FALLOC,
2236 NULL);
2237 if (error) {
2238 /* Remove the !PageUptodate pages we added */
2239 shmem_undo_range(inode,
2240 (loff_t)start << PAGE_SHIFT,
2241 (loff_t)index << PAGE_SHIFT, true);
2242 goto undone;
2243 }
2244
2245 /*
2246 * Inform shmem_writepage() how far we have reached.
2247 * No need for lock or barrier: we have the page lock.
2248 */
2249 shmem_falloc.next++;
2250 if (!PageUptodate(page))
2251 shmem_falloc.nr_falloced++;
2252
2253 /*
2254 * If !PageUptodate, leave it that way so that freeable pages
2255 * can be recognized if we need to rollback on error later.
2256 * But set_page_dirty so that memory pressure will swap rather
2257 * than free the pages we are allocating (and SGP_CACHE pages
2258 * might still be clean: we now need to mark those dirty too).
2259 */
2260 set_page_dirty(page);
2261 unlock_page(page);
2262 put_page(page);
2263 cond_resched();
2264 }
2265
2266 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2267 i_size_write(inode, offset + len);
2268 inode->i_ctime = CURRENT_TIME;
2269undone:
2270 spin_lock(&inode->i_lock);
2271 inode->i_private = NULL;
2272 spin_unlock(&inode->i_lock);
2273out:
2274 inode_unlock(inode);
2275 return error;
2276}
2277
2278static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2279{
2280 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2281
2282 buf->f_type = TMPFS_MAGIC;
2283 buf->f_bsize = PAGE_SIZE;
2284 buf->f_namelen = NAME_MAX;
2285 if (sbinfo->max_blocks) {
2286 buf->f_blocks = sbinfo->max_blocks;
2287 buf->f_bavail =
2288 buf->f_bfree = sbinfo->max_blocks -
2289 percpu_counter_sum(&sbinfo->used_blocks);
2290 }
2291 if (sbinfo->max_inodes) {
2292 buf->f_files = sbinfo->max_inodes;
2293 buf->f_ffree = sbinfo->free_inodes;
2294 }
2295 /* else leave those fields 0 like simple_statfs */
2296 return 0;
2297}
2298
2299/*
2300 * File creation. Allocate an inode, and we're done..
2301 */
2302static int
2303shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2304{
2305 struct inode *inode;
2306 int error = -ENOSPC;
2307
2308 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2309 if (inode) {
2310 error = simple_acl_create(dir, inode);
2311 if (error)
2312 goto out_iput;
2313 error = security_inode_init_security(inode, dir,
2314 &dentry->d_name,
2315 shmem_initxattrs, NULL);
2316 if (error && error != -EOPNOTSUPP)
2317 goto out_iput;
2318
2319 error = 0;
2320 dir->i_size += BOGO_DIRENT_SIZE;
2321 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2322 d_instantiate(dentry, inode);
2323 dget(dentry); /* Extra count - pin the dentry in core */
2324 }
2325 return error;
2326out_iput:
2327 iput(inode);
2328 return error;
2329}
2330
2331static int
2332shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2333{
2334 struct inode *inode;
2335 int error = -ENOSPC;
2336
2337 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2338 if (inode) {
2339 error = security_inode_init_security(inode, dir,
2340 NULL,
2341 shmem_initxattrs, NULL);
2342 if (error && error != -EOPNOTSUPP)
2343 goto out_iput;
2344 error = simple_acl_create(dir, inode);
2345 if (error)
2346 goto out_iput;
2347 d_tmpfile(dentry, inode);
2348 }
2349 return error;
2350out_iput:
2351 iput(inode);
2352 return error;
2353}
2354
2355static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2356{
2357 int error;
2358
2359 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2360 return error;
2361 inc_nlink(dir);
2362 return 0;
2363}
2364
2365static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2366 bool excl)
2367{
2368 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2369}
2370
2371/*
2372 * Link a file..
2373 */
2374static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2375{
2376 struct inode *inode = d_inode(old_dentry);
2377 int ret;
2378
2379 /*
2380 * No ordinary (disk based) filesystem counts links as inodes;
2381 * but each new link needs a new dentry, pinning lowmem, and
2382 * tmpfs dentries cannot be pruned until they are unlinked.
2383 */
2384 ret = shmem_reserve_inode(inode->i_sb);
2385 if (ret)
2386 goto out;
2387
2388 dir->i_size += BOGO_DIRENT_SIZE;
2389 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2390 inc_nlink(inode);
2391 ihold(inode); /* New dentry reference */
2392 dget(dentry); /* Extra pinning count for the created dentry */
2393 d_instantiate(dentry, inode);
2394out:
2395 return ret;
2396}
2397
2398static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2399{
2400 struct inode *inode = d_inode(dentry);
2401
2402 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2403 shmem_free_inode(inode->i_sb);
2404
2405 dir->i_size -= BOGO_DIRENT_SIZE;
2406 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2407 drop_nlink(inode);
2408 dput(dentry); /* Undo the count from "create" - this does all the work */
2409 return 0;
2410}
2411
2412static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2413{
2414 if (!simple_empty(dentry))
2415 return -ENOTEMPTY;
2416
2417 drop_nlink(d_inode(dentry));
2418 drop_nlink(dir);
2419 return shmem_unlink(dir, dentry);
2420}
2421
2422static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2423{
2424 bool old_is_dir = d_is_dir(old_dentry);
2425 bool new_is_dir = d_is_dir(new_dentry);
2426
2427 if (old_dir != new_dir && old_is_dir != new_is_dir) {
2428 if (old_is_dir) {
2429 drop_nlink(old_dir);
2430 inc_nlink(new_dir);
2431 } else {
2432 drop_nlink(new_dir);
2433 inc_nlink(old_dir);
2434 }
2435 }
2436 old_dir->i_ctime = old_dir->i_mtime =
2437 new_dir->i_ctime = new_dir->i_mtime =
2438 d_inode(old_dentry)->i_ctime =
2439 d_inode(new_dentry)->i_ctime = CURRENT_TIME;
2440
2441 return 0;
2442}
2443
2444static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
2445{
2446 struct dentry *whiteout;
2447 int error;
2448
2449 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
2450 if (!whiteout)
2451 return -ENOMEM;
2452
2453 error = shmem_mknod(old_dir, whiteout,
2454 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
2455 dput(whiteout);
2456 if (error)
2457 return error;
2458
2459 /*
2460 * Cheat and hash the whiteout while the old dentry is still in
2461 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
2462 *
2463 * d_lookup() will consistently find one of them at this point,
2464 * not sure which one, but that isn't even important.
2465 */
2466 d_rehash(whiteout);
2467 return 0;
2468}
2469
2470/*
2471 * The VFS layer already does all the dentry stuff for rename,
2472 * we just have to decrement the usage count for the target if
2473 * it exists so that the VFS layer correctly free's it when it
2474 * gets overwritten.
2475 */
2476static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
2477{
2478 struct inode *inode = d_inode(old_dentry);
2479 int they_are_dirs = S_ISDIR(inode->i_mode);
2480
2481 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
2482 return -EINVAL;
2483
2484 if (flags & RENAME_EXCHANGE)
2485 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
2486
2487 if (!simple_empty(new_dentry))
2488 return -ENOTEMPTY;
2489
2490 if (flags & RENAME_WHITEOUT) {
2491 int error;
2492
2493 error = shmem_whiteout(old_dir, old_dentry);
2494 if (error)
2495 return error;
2496 }
2497
2498 if (d_really_is_positive(new_dentry)) {
2499 (void) shmem_unlink(new_dir, new_dentry);
2500 if (they_are_dirs) {
2501 drop_nlink(d_inode(new_dentry));
2502 drop_nlink(old_dir);
2503 }
2504 } else if (they_are_dirs) {
2505 drop_nlink(old_dir);
2506 inc_nlink(new_dir);
2507 }
2508
2509 old_dir->i_size -= BOGO_DIRENT_SIZE;
2510 new_dir->i_size += BOGO_DIRENT_SIZE;
2511 old_dir->i_ctime = old_dir->i_mtime =
2512 new_dir->i_ctime = new_dir->i_mtime =
2513 inode->i_ctime = CURRENT_TIME;
2514 return 0;
2515}
2516
2517static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2518{
2519 int error;
2520 int len;
2521 struct inode *inode;
2522 struct page *page;
2523 struct shmem_inode_info *info;
2524
2525 len = strlen(symname) + 1;
2526 if (len > PAGE_SIZE)
2527 return -ENAMETOOLONG;
2528
2529 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
2530 if (!inode)
2531 return -ENOSPC;
2532
2533 error = security_inode_init_security(inode, dir, &dentry->d_name,
2534 shmem_initxattrs, NULL);
2535 if (error) {
2536 if (error != -EOPNOTSUPP) {
2537 iput(inode);
2538 return error;
2539 }
2540 error = 0;
2541 }
2542
2543 info = SHMEM_I(inode);
2544 inode->i_size = len-1;
2545 if (len <= SHORT_SYMLINK_LEN) {
2546 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
2547 if (!inode->i_link) {
2548 iput(inode);
2549 return -ENOMEM;
2550 }
2551 inode->i_op = &shmem_short_symlink_operations;
2552 } else {
2553 inode_nohighmem(inode);
2554 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
2555 if (error) {
2556 iput(inode);
2557 return error;
2558 }
2559 inode->i_mapping->a_ops = &shmem_aops;
2560 inode->i_op = &shmem_symlink_inode_operations;
2561 memcpy(page_address(page), symname, len);
2562 SetPageUptodate(page);
2563 set_page_dirty(page);
2564 unlock_page(page);
2565 put_page(page);
2566 }
2567 dir->i_size += BOGO_DIRENT_SIZE;
2568 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2569 d_instantiate(dentry, inode);
2570 dget(dentry);
2571 return 0;
2572}
2573
2574static void shmem_put_link(void *arg)
2575{
2576 mark_page_accessed(arg);
2577 put_page(arg);
2578}
2579
2580static const char *shmem_get_link(struct dentry *dentry,
2581 struct inode *inode,
2582 struct delayed_call *done)
2583{
2584 struct page *page = NULL;
2585 int error;
2586 if (!dentry) {
2587 page = find_get_page(inode->i_mapping, 0);
2588 if (!page)
2589 return ERR_PTR(-ECHILD);
2590 if (!PageUptodate(page)) {
2591 put_page(page);
2592 return ERR_PTR(-ECHILD);
2593 }
2594 } else {
2595 error = shmem_getpage(inode, 0, &page, SGP_READ, NULL);
2596 if (error)
2597 return ERR_PTR(error);
2598 unlock_page(page);
2599 }
2600 set_delayed_call(done, shmem_put_link, page);
2601 return page_address(page);
2602}
2603
2604#ifdef CONFIG_TMPFS_XATTR
2605/*
2606 * Superblocks without xattr inode operations may get some security.* xattr
2607 * support from the LSM "for free". As soon as we have any other xattrs
2608 * like ACLs, we also need to implement the security.* handlers at
2609 * filesystem level, though.
2610 */
2611
2612/*
2613 * Callback for security_inode_init_security() for acquiring xattrs.
2614 */
2615static int shmem_initxattrs(struct inode *inode,
2616 const struct xattr *xattr_array,
2617 void *fs_info)
2618{
2619 struct shmem_inode_info *info = SHMEM_I(inode);
2620 const struct xattr *xattr;
2621 struct simple_xattr *new_xattr;
2622 size_t len;
2623
2624 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
2625 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
2626 if (!new_xattr)
2627 return -ENOMEM;
2628
2629 len = strlen(xattr->name) + 1;
2630 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
2631 GFP_KERNEL);
2632 if (!new_xattr->name) {
2633 kfree(new_xattr);
2634 return -ENOMEM;
2635 }
2636
2637 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
2638 XATTR_SECURITY_PREFIX_LEN);
2639 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
2640 xattr->name, len);
2641
2642 simple_xattr_list_add(&info->xattrs, new_xattr);
2643 }
2644
2645 return 0;
2646}
2647
2648static int shmem_xattr_handler_get(const struct xattr_handler *handler,
2649 struct dentry *dentry, const char *name,
2650 void *buffer, size_t size)
2651{
2652 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
2653
2654 name = xattr_full_name(handler, name);
2655 return simple_xattr_get(&info->xattrs, name, buffer, size);
2656}
2657
2658static int shmem_xattr_handler_set(const struct xattr_handler *handler,
2659 struct dentry *dentry, const char *name,
2660 const void *value, size_t size, int flags)
2661{
2662 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
2663
2664 name = xattr_full_name(handler, name);
2665 return simple_xattr_set(&info->xattrs, name, value, size, flags);
2666}
2667
2668static const struct xattr_handler shmem_security_xattr_handler = {
2669 .prefix = XATTR_SECURITY_PREFIX,
2670 .get = shmem_xattr_handler_get,
2671 .set = shmem_xattr_handler_set,
2672};
2673
2674static const struct xattr_handler shmem_trusted_xattr_handler = {
2675 .prefix = XATTR_TRUSTED_PREFIX,
2676 .get = shmem_xattr_handler_get,
2677 .set = shmem_xattr_handler_set,
2678};
2679
2680static const struct xattr_handler *shmem_xattr_handlers[] = {
2681#ifdef CONFIG_TMPFS_POSIX_ACL
2682 &posix_acl_access_xattr_handler,
2683 &posix_acl_default_xattr_handler,
2684#endif
2685 &shmem_security_xattr_handler,
2686 &shmem_trusted_xattr_handler,
2687 NULL
2688};
2689
2690static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2691{
2692 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
2693 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
2694}
2695#endif /* CONFIG_TMPFS_XATTR */
2696
2697static const struct inode_operations shmem_short_symlink_operations = {
2698 .readlink = generic_readlink,
2699 .get_link = simple_get_link,
2700#ifdef CONFIG_TMPFS_XATTR
2701 .setxattr = generic_setxattr,
2702 .getxattr = generic_getxattr,
2703 .listxattr = shmem_listxattr,
2704 .removexattr = generic_removexattr,
2705#endif
2706};
2707
2708static const struct inode_operations shmem_symlink_inode_operations = {
2709 .readlink = generic_readlink,
2710 .get_link = shmem_get_link,
2711#ifdef CONFIG_TMPFS_XATTR
2712 .setxattr = generic_setxattr,
2713 .getxattr = generic_getxattr,
2714 .listxattr = shmem_listxattr,
2715 .removexattr = generic_removexattr,
2716#endif
2717};
2718
2719static struct dentry *shmem_get_parent(struct dentry *child)
2720{
2721 return ERR_PTR(-ESTALE);
2722}
2723
2724static int shmem_match(struct inode *ino, void *vfh)
2725{
2726 __u32 *fh = vfh;
2727 __u64 inum = fh[2];
2728 inum = (inum << 32) | fh[1];
2729 return ino->i_ino == inum && fh[0] == ino->i_generation;
2730}
2731
2732static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2733 struct fid *fid, int fh_len, int fh_type)
2734{
2735 struct inode *inode;
2736 struct dentry *dentry = NULL;
2737 u64 inum;
2738
2739 if (fh_len < 3)
2740 return NULL;
2741
2742 inum = fid->raw[2];
2743 inum = (inum << 32) | fid->raw[1];
2744
2745 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2746 shmem_match, fid->raw);
2747 if (inode) {
2748 dentry = d_find_alias(inode);
2749 iput(inode);
2750 }
2751
2752 return dentry;
2753}
2754
2755static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
2756 struct inode *parent)
2757{
2758 if (*len < 3) {
2759 *len = 3;
2760 return FILEID_INVALID;
2761 }
2762
2763 if (inode_unhashed(inode)) {
2764 /* Unfortunately insert_inode_hash is not idempotent,
2765 * so as we hash inodes here rather than at creation
2766 * time, we need a lock to ensure we only try
2767 * to do it once
2768 */
2769 static DEFINE_SPINLOCK(lock);
2770 spin_lock(&lock);
2771 if (inode_unhashed(inode))
2772 __insert_inode_hash(inode,
2773 inode->i_ino + inode->i_generation);
2774 spin_unlock(&lock);
2775 }
2776
2777 fh[0] = inode->i_generation;
2778 fh[1] = inode->i_ino;
2779 fh[2] = ((__u64)inode->i_ino) >> 32;
2780
2781 *len = 3;
2782 return 1;
2783}
2784
2785static const struct export_operations shmem_export_ops = {
2786 .get_parent = shmem_get_parent,
2787 .encode_fh = shmem_encode_fh,
2788 .fh_to_dentry = shmem_fh_to_dentry,
2789};
2790
2791static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2792 bool remount)
2793{
2794 char *this_char, *value, *rest;
2795 struct mempolicy *mpol = NULL;
2796 uid_t uid;
2797 gid_t gid;
2798
2799 while (options != NULL) {
2800 this_char = options;
2801 for (;;) {
2802 /*
2803 * NUL-terminate this option: unfortunately,
2804 * mount options form a comma-separated list,
2805 * but mpol's nodelist may also contain commas.
2806 */
2807 options = strchr(options, ',');
2808 if (options == NULL)
2809 break;
2810 options++;
2811 if (!isdigit(*options)) {
2812 options[-1] = '\0';
2813 break;
2814 }
2815 }
2816 if (!*this_char)
2817 continue;
2818 if ((value = strchr(this_char,'=')) != NULL) {
2819 *value++ = 0;
2820 } else {
2821 pr_err("tmpfs: No value for mount option '%s'\n",
2822 this_char);
2823 goto error;
2824 }
2825
2826 if (!strcmp(this_char,"size")) {
2827 unsigned long long size;
2828 size = memparse(value,&rest);
2829 if (*rest == '%') {
2830 size <<= PAGE_SHIFT;
2831 size *= totalram_pages;
2832 do_div(size, 100);
2833 rest++;
2834 }
2835 if (*rest)
2836 goto bad_val;
2837 sbinfo->max_blocks =
2838 DIV_ROUND_UP(size, PAGE_SIZE);
2839 } else if (!strcmp(this_char,"nr_blocks")) {
2840 sbinfo->max_blocks = memparse(value, &rest);
2841 if (*rest)
2842 goto bad_val;
2843 } else if (!strcmp(this_char,"nr_inodes")) {
2844 sbinfo->max_inodes = memparse(value, &rest);
2845 if (*rest)
2846 goto bad_val;
2847 } else if (!strcmp(this_char,"mode")) {
2848 if (remount)
2849 continue;
2850 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2851 if (*rest)
2852 goto bad_val;
2853 } else if (!strcmp(this_char,"uid")) {
2854 if (remount)
2855 continue;
2856 uid = simple_strtoul(value, &rest, 0);
2857 if (*rest)
2858 goto bad_val;
2859 sbinfo->uid = make_kuid(current_user_ns(), uid);
2860 if (!uid_valid(sbinfo->uid))
2861 goto bad_val;
2862 } else if (!strcmp(this_char,"gid")) {
2863 if (remount)
2864 continue;
2865 gid = simple_strtoul(value, &rest, 0);
2866 if (*rest)
2867 goto bad_val;
2868 sbinfo->gid = make_kgid(current_user_ns(), gid);
2869 if (!gid_valid(sbinfo->gid))
2870 goto bad_val;
2871 } else if (!strcmp(this_char,"mpol")) {
2872 mpol_put(mpol);
2873 mpol = NULL;
2874 if (mpol_parse_str(value, &mpol))
2875 goto bad_val;
2876 } else {
2877 pr_err("tmpfs: Bad mount option %s\n", this_char);
2878 goto error;
2879 }
2880 }
2881 sbinfo->mpol = mpol;
2882 return 0;
2883
2884bad_val:
2885 pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
2886 value, this_char);
2887error:
2888 mpol_put(mpol);
2889 return 1;
2890
2891}
2892
2893static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2894{
2895 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2896 struct shmem_sb_info config = *sbinfo;
2897 unsigned long inodes;
2898 int error = -EINVAL;
2899
2900 config.mpol = NULL;
2901 if (shmem_parse_options(data, &config, true))
2902 return error;
2903
2904 spin_lock(&sbinfo->stat_lock);
2905 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2906 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2907 goto out;
2908 if (config.max_inodes < inodes)
2909 goto out;
2910 /*
2911 * Those tests disallow limited->unlimited while any are in use;
2912 * but we must separately disallow unlimited->limited, because
2913 * in that case we have no record of how much is already in use.
2914 */
2915 if (config.max_blocks && !sbinfo->max_blocks)
2916 goto out;
2917 if (config.max_inodes && !sbinfo->max_inodes)
2918 goto out;
2919
2920 error = 0;
2921 sbinfo->max_blocks = config.max_blocks;
2922 sbinfo->max_inodes = config.max_inodes;
2923 sbinfo->free_inodes = config.max_inodes - inodes;
2924
2925 /*
2926 * Preserve previous mempolicy unless mpol remount option was specified.
2927 */
2928 if (config.mpol) {
2929 mpol_put(sbinfo->mpol);
2930 sbinfo->mpol = config.mpol; /* transfers initial ref */
2931 }
2932out:
2933 spin_unlock(&sbinfo->stat_lock);
2934 return error;
2935}
2936
2937static int shmem_show_options(struct seq_file *seq, struct dentry *root)
2938{
2939 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
2940
2941 if (sbinfo->max_blocks != shmem_default_max_blocks())
2942 seq_printf(seq, ",size=%luk",
2943 sbinfo->max_blocks << (PAGE_SHIFT - 10));
2944 if (sbinfo->max_inodes != shmem_default_max_inodes())
2945 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2946 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2947 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
2948 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
2949 seq_printf(seq, ",uid=%u",
2950 from_kuid_munged(&init_user_ns, sbinfo->uid));
2951 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
2952 seq_printf(seq, ",gid=%u",
2953 from_kgid_munged(&init_user_ns, sbinfo->gid));
2954 shmem_show_mpol(seq, sbinfo->mpol);
2955 return 0;
2956}
2957
2958#define MFD_NAME_PREFIX "memfd:"
2959#define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
2960#define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
2961
2962#define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING)
2963
2964SYSCALL_DEFINE2(memfd_create,
2965 const char __user *, uname,
2966 unsigned int, flags)
2967{
2968 struct shmem_inode_info *info;
2969 struct file *file;
2970 int fd, error;
2971 char *name;
2972 long len;
2973
2974 if (flags & ~(unsigned int)MFD_ALL_FLAGS)
2975 return -EINVAL;
2976
2977 /* length includes terminating zero */
2978 len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
2979 if (len <= 0)
2980 return -EFAULT;
2981 if (len > MFD_NAME_MAX_LEN + 1)
2982 return -EINVAL;
2983
2984 name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY);
2985 if (!name)
2986 return -ENOMEM;
2987
2988 strcpy(name, MFD_NAME_PREFIX);
2989 if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
2990 error = -EFAULT;
2991 goto err_name;
2992 }
2993
2994 /* terminating-zero may have changed after strnlen_user() returned */
2995 if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
2996 error = -EFAULT;
2997 goto err_name;
2998 }
2999
3000 fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
3001 if (fd < 0) {
3002 error = fd;
3003 goto err_name;
3004 }
3005
3006 file = shmem_file_setup(name, 0, VM_NORESERVE);
3007 if (IS_ERR(file)) {
3008 error = PTR_ERR(file);
3009 goto err_fd;
3010 }
3011 info = SHMEM_I(file_inode(file));
3012 file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
3013 file->f_flags |= O_RDWR | O_LARGEFILE;
3014 if (flags & MFD_ALLOW_SEALING)
3015 info->seals &= ~F_SEAL_SEAL;
3016
3017 fd_install(fd, file);
3018 kfree(name);
3019 return fd;
3020
3021err_fd:
3022 put_unused_fd(fd);
3023err_name:
3024 kfree(name);
3025 return error;
3026}
3027
3028#endif /* CONFIG_TMPFS */
3029
3030static void shmem_put_super(struct super_block *sb)
3031{
3032 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3033
3034 percpu_counter_destroy(&sbinfo->used_blocks);
3035 mpol_put(sbinfo->mpol);
3036 kfree(sbinfo);
3037 sb->s_fs_info = NULL;
3038}
3039
3040int shmem_fill_super(struct super_block *sb, void *data, int silent)
3041{
3042 struct inode *inode;
3043 struct shmem_sb_info *sbinfo;
3044 int err = -ENOMEM;
3045
3046 /* Round up to L1_CACHE_BYTES to resist false sharing */
3047 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3048 L1_CACHE_BYTES), GFP_KERNEL);
3049 if (!sbinfo)
3050 return -ENOMEM;
3051
3052 sbinfo->mode = S_IRWXUGO | S_ISVTX;
3053 sbinfo->uid = current_fsuid();
3054 sbinfo->gid = current_fsgid();
3055 sb->s_fs_info = sbinfo;
3056
3057#ifdef CONFIG_TMPFS
3058 /*
3059 * Per default we only allow half of the physical ram per
3060 * tmpfs instance, limiting inodes to one per page of lowmem;
3061 * but the internal instance is left unlimited.
3062 */
3063 if (!(sb->s_flags & MS_KERNMOUNT)) {
3064 sbinfo->max_blocks = shmem_default_max_blocks();
3065 sbinfo->max_inodes = shmem_default_max_inodes();
3066 if (shmem_parse_options(data, sbinfo, false)) {
3067 err = -EINVAL;
3068 goto failed;
3069 }
3070 } else {
3071 sb->s_flags |= MS_NOUSER;
3072 }
3073 sb->s_export_op = &shmem_export_ops;
3074 sb->s_flags |= MS_NOSEC;
3075#else
3076 sb->s_flags |= MS_NOUSER;
3077#endif
3078
3079 spin_lock_init(&sbinfo->stat_lock);
3080 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3081 goto failed;
3082 sbinfo->free_inodes = sbinfo->max_inodes;
3083
3084 sb->s_maxbytes = MAX_LFS_FILESIZE;
3085 sb->s_blocksize = PAGE_SIZE;
3086 sb->s_blocksize_bits = PAGE_SHIFT;
3087 sb->s_magic = TMPFS_MAGIC;
3088 sb->s_op = &shmem_ops;
3089 sb->s_time_gran = 1;
3090#ifdef CONFIG_TMPFS_XATTR
3091 sb->s_xattr = shmem_xattr_handlers;
3092#endif
3093#ifdef CONFIG_TMPFS_POSIX_ACL
3094 sb->s_flags |= MS_POSIXACL;
3095#endif
3096
3097 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3098 if (!inode)
3099 goto failed;
3100 inode->i_uid = sbinfo->uid;
3101 inode->i_gid = sbinfo->gid;
3102 sb->s_root = d_make_root(inode);
3103 if (!sb->s_root)
3104 goto failed;
3105 return 0;
3106
3107failed:
3108 shmem_put_super(sb);
3109 return err;
3110}
3111
3112static struct kmem_cache *shmem_inode_cachep;
3113
3114static struct inode *shmem_alloc_inode(struct super_block *sb)
3115{
3116 struct shmem_inode_info *info;
3117 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3118 if (!info)
3119 return NULL;
3120 return &info->vfs_inode;
3121}
3122
3123static void shmem_destroy_callback(struct rcu_head *head)
3124{
3125 struct inode *inode = container_of(head, struct inode, i_rcu);
3126 kfree(inode->i_link);
3127 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3128}
3129
3130static void shmem_destroy_inode(struct inode *inode)
3131{
3132 if (S_ISREG(inode->i_mode))
3133 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3134 call_rcu(&inode->i_rcu, shmem_destroy_callback);
3135}
3136
3137static void shmem_init_inode(void *foo)
3138{
3139 struct shmem_inode_info *info = foo;
3140 inode_init_once(&info->vfs_inode);
3141}
3142
3143static int shmem_init_inodecache(void)
3144{
3145 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3146 sizeof(struct shmem_inode_info),
3147 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3148 return 0;
3149}
3150
3151static void shmem_destroy_inodecache(void)
3152{
3153 kmem_cache_destroy(shmem_inode_cachep);
3154}
3155
3156static const struct address_space_operations shmem_aops = {
3157 .writepage = shmem_writepage,
3158 .set_page_dirty = __set_page_dirty_no_writeback,
3159#ifdef CONFIG_TMPFS
3160 .write_begin = shmem_write_begin,
3161 .write_end = shmem_write_end,
3162#endif
3163#ifdef CONFIG_MIGRATION
3164 .migratepage = migrate_page,
3165#endif
3166 .error_remove_page = generic_error_remove_page,
3167};
3168
3169static const struct file_operations shmem_file_operations = {
3170 .mmap = shmem_mmap,
3171#ifdef CONFIG_TMPFS
3172 .llseek = shmem_file_llseek,
3173 .read_iter = shmem_file_read_iter,
3174 .write_iter = generic_file_write_iter,
3175 .fsync = noop_fsync,
3176 .splice_read = shmem_file_splice_read,
3177 .splice_write = iter_file_splice_write,
3178 .fallocate = shmem_fallocate,
3179#endif
3180};
3181
3182static const struct inode_operations shmem_inode_operations = {
3183 .getattr = shmem_getattr,
3184 .setattr = shmem_setattr,
3185#ifdef CONFIG_TMPFS_XATTR
3186 .setxattr = generic_setxattr,
3187 .getxattr = generic_getxattr,
3188 .listxattr = shmem_listxattr,
3189 .removexattr = generic_removexattr,
3190 .set_acl = simple_set_acl,
3191#endif
3192};
3193
3194static const struct inode_operations shmem_dir_inode_operations = {
3195#ifdef CONFIG_TMPFS
3196 .create = shmem_create,
3197 .lookup = simple_lookup,
3198 .link = shmem_link,
3199 .unlink = shmem_unlink,
3200 .symlink = shmem_symlink,
3201 .mkdir = shmem_mkdir,
3202 .rmdir = shmem_rmdir,
3203 .mknod = shmem_mknod,
3204 .rename2 = shmem_rename2,
3205 .tmpfile = shmem_tmpfile,
3206#endif
3207#ifdef CONFIG_TMPFS_XATTR
3208 .setxattr = generic_setxattr,
3209 .getxattr = generic_getxattr,
3210 .listxattr = shmem_listxattr,
3211 .removexattr = generic_removexattr,
3212#endif
3213#ifdef CONFIG_TMPFS_POSIX_ACL
3214 .setattr = shmem_setattr,
3215 .set_acl = simple_set_acl,
3216#endif
3217};
3218
3219static const struct inode_operations shmem_special_inode_operations = {
3220#ifdef CONFIG_TMPFS_XATTR
3221 .setxattr = generic_setxattr,
3222 .getxattr = generic_getxattr,
3223 .listxattr = shmem_listxattr,
3224 .removexattr = generic_removexattr,
3225#endif
3226#ifdef CONFIG_TMPFS_POSIX_ACL
3227 .setattr = shmem_setattr,
3228 .set_acl = simple_set_acl,
3229#endif
3230};
3231
3232static const struct super_operations shmem_ops = {
3233 .alloc_inode = shmem_alloc_inode,
3234 .destroy_inode = shmem_destroy_inode,
3235#ifdef CONFIG_TMPFS
3236 .statfs = shmem_statfs,
3237 .remount_fs = shmem_remount_fs,
3238 .show_options = shmem_show_options,
3239#endif
3240 .evict_inode = shmem_evict_inode,
3241 .drop_inode = generic_delete_inode,
3242 .put_super = shmem_put_super,
3243};
3244
3245static const struct vm_operations_struct shmem_vm_ops = {
3246 .fault = shmem_fault,
3247 .map_pages = filemap_map_pages,
3248#ifdef CONFIG_NUMA
3249 .set_policy = shmem_set_policy,
3250 .get_policy = shmem_get_policy,
3251#endif
3252};
3253
3254static struct dentry *shmem_mount(struct file_system_type *fs_type,
3255 int flags, const char *dev_name, void *data)
3256{
3257 return mount_nodev(fs_type, flags, data, shmem_fill_super);
3258}
3259
3260static struct file_system_type shmem_fs_type = {
3261 .owner = THIS_MODULE,
3262 .name = "tmpfs",
3263 .mount = shmem_mount,
3264 .kill_sb = kill_litter_super,
3265 .fs_flags = FS_USERNS_MOUNT,
3266};
3267
3268int __init shmem_init(void)
3269{
3270 int error;
3271
3272 /* If rootfs called this, don't re-init */
3273 if (shmem_inode_cachep)
3274 return 0;
3275
3276 error = shmem_init_inodecache();
3277 if (error)
3278 goto out3;
3279
3280 error = register_filesystem(&shmem_fs_type);
3281 if (error) {
3282 pr_err("Could not register tmpfs\n");
3283 goto out2;
3284 }
3285
3286 shm_mnt = kern_mount(&shmem_fs_type);
3287 if (IS_ERR(shm_mnt)) {
3288 error = PTR_ERR(shm_mnt);
3289 pr_err("Could not kern_mount tmpfs\n");
3290 goto out1;
3291 }
3292 return 0;
3293
3294out1:
3295 unregister_filesystem(&shmem_fs_type);
3296out2:
3297 shmem_destroy_inodecache();
3298out3:
3299 shm_mnt = ERR_PTR(error);
3300 return error;
3301}
3302
3303#else /* !CONFIG_SHMEM */
3304
3305/*
3306 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
3307 *
3308 * This is intended for small system where the benefits of the full
3309 * shmem code (swap-backed and resource-limited) are outweighed by
3310 * their complexity. On systems without swap this code should be
3311 * effectively equivalent, but much lighter weight.
3312 */
3313
3314static struct file_system_type shmem_fs_type = {
3315 .name = "tmpfs",
3316 .mount = ramfs_mount,
3317 .kill_sb = kill_litter_super,
3318 .fs_flags = FS_USERNS_MOUNT,
3319};
3320
3321int __init shmem_init(void)
3322{
3323 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3324
3325 shm_mnt = kern_mount(&shmem_fs_type);
3326 BUG_ON(IS_ERR(shm_mnt));
3327
3328 return 0;
3329}
3330
3331int shmem_unuse(swp_entry_t swap, struct page *page)
3332{
3333 return 0;
3334}
3335
3336int shmem_lock(struct file *file, int lock, struct user_struct *user)
3337{
3338 return 0;
3339}
3340
3341void shmem_unlock_mapping(struct address_space *mapping)
3342{
3343}
3344
3345void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
3346{
3347 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
3348}
3349EXPORT_SYMBOL_GPL(shmem_truncate_range);
3350
3351#define shmem_vm_ops generic_file_vm_ops
3352#define shmem_file_operations ramfs_file_operations
3353#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
3354#define shmem_acct_size(flags, size) 0
3355#define shmem_unacct_size(flags, size) do {} while (0)
3356
3357#endif /* CONFIG_SHMEM */
3358
3359/* common code */
3360
3361static struct dentry_operations anon_ops = {
3362 .d_dname = simple_dname
3363};
3364
3365static struct file *__shmem_file_setup(const char *name, loff_t size,
3366 unsigned long flags, unsigned int i_flags)
3367{
3368 struct file *res;
3369 struct inode *inode;
3370 struct path path;
3371 struct super_block *sb;
3372 struct qstr this;
3373
3374 if (IS_ERR(shm_mnt))
3375 return ERR_CAST(shm_mnt);
3376
3377 if (size < 0 || size > MAX_LFS_FILESIZE)
3378 return ERR_PTR(-EINVAL);
3379
3380 if (shmem_acct_size(flags, size))
3381 return ERR_PTR(-ENOMEM);
3382
3383 res = ERR_PTR(-ENOMEM);
3384 this.name = name;
3385 this.len = strlen(name);
3386 this.hash = 0; /* will go */
3387 sb = shm_mnt->mnt_sb;
3388 path.mnt = mntget(shm_mnt);
3389 path.dentry = d_alloc_pseudo(sb, &this);
3390 if (!path.dentry)
3391 goto put_memory;
3392 d_set_d_op(path.dentry, &anon_ops);
3393
3394 res = ERR_PTR(-ENOSPC);
3395 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
3396 if (!inode)
3397 goto put_memory;
3398
3399 inode->i_flags |= i_flags;
3400 d_instantiate(path.dentry, inode);
3401 inode->i_size = size;
3402 clear_nlink(inode); /* It is unlinked */
3403 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
3404 if (IS_ERR(res))
3405 goto put_path;
3406
3407 res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
3408 &shmem_file_operations);
3409 if (IS_ERR(res))
3410 goto put_path;
3411
3412 return res;
3413
3414put_memory:
3415 shmem_unacct_size(flags, size);
3416put_path:
3417 path_put(&path);
3418 return res;
3419}
3420
3421/**
3422 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3423 * kernel internal. There will be NO LSM permission checks against the
3424 * underlying inode. So users of this interface must do LSM checks at a
3425 * higher layer. The users are the big_key and shm implementations. LSM
3426 * checks are provided at the key or shm level rather than the inode.
3427 * @name: name for dentry (to be seen in /proc/<pid>/maps
3428 * @size: size to be set for the file
3429 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3430 */
3431struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
3432{
3433 return __shmem_file_setup(name, size, flags, S_PRIVATE);
3434}
3435
3436/**
3437 * shmem_file_setup - get an unlinked file living in tmpfs
3438 * @name: name for dentry (to be seen in /proc/<pid>/maps
3439 * @size: size to be set for the file
3440 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3441 */
3442struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
3443{
3444 return __shmem_file_setup(name, size, flags, 0);
3445}
3446EXPORT_SYMBOL_GPL(shmem_file_setup);
3447
3448/**
3449 * shmem_zero_setup - setup a shared anonymous mapping
3450 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
3451 */
3452int shmem_zero_setup(struct vm_area_struct *vma)
3453{
3454 struct file *file;
3455 loff_t size = vma->vm_end - vma->vm_start;
3456
3457 /*
3458 * Cloning a new file under mmap_sem leads to a lock ordering conflict
3459 * between XFS directory reading and selinux: since this file is only
3460 * accessible to the user through its mapping, use S_PRIVATE flag to
3461 * bypass file security, in the same way as shmem_kernel_file_setup().
3462 */
3463 file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE);
3464 if (IS_ERR(file))
3465 return PTR_ERR(file);
3466
3467 if (vma->vm_file)
3468 fput(vma->vm_file);
3469 vma->vm_file = file;
3470 vma->vm_ops = &shmem_vm_ops;
3471 return 0;
3472}
3473
3474/**
3475 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3476 * @mapping: the page's address_space
3477 * @index: the page index
3478 * @gfp: the page allocator flags to use if allocating
3479 *
3480 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3481 * with any new page allocations done using the specified allocation flags.
3482 * But read_cache_page_gfp() uses the ->readpage() method: which does not
3483 * suit tmpfs, since it may have pages in swapcache, and needs to find those
3484 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3485 *
3486 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
3487 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
3488 */
3489struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3490 pgoff_t index, gfp_t gfp)
3491{
3492#ifdef CONFIG_SHMEM
3493 struct inode *inode = mapping->host;
3494 struct page *page;
3495 int error;
3496
3497 BUG_ON(mapping->a_ops != &shmem_aops);
3498 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
3499 if (error)
3500 page = ERR_PTR(error);
3501 else
3502 unlock_page(page);
3503 return page;
3504#else
3505 /*
3506 * The tiny !SHMEM case uses ramfs without swap
3507 */
3508 return read_cache_page_gfp(mapping, index, gfp);
3509#endif
3510}
3511EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);