Loading...
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
28#include <linux/ramfs.h>
29#include <linux/pagemap.h>
30#include <linux/file.h>
31#include <linux/fileattr.h>
32#include <linux/mm.h>
33#include <linux/random.h>
34#include <linux/sched/signal.h>
35#include <linux/export.h>
36#include <linux/shmem_fs.h>
37#include <linux/swap.h>
38#include <linux/uio.h>
39#include <linux/hugetlb.h>
40#include <linux/fs_parser.h>
41#include <linux/swapfile.h>
42#include <linux/iversion.h>
43#include <linux/unicode.h>
44#include "swap.h"
45
46static struct vfsmount *shm_mnt __ro_after_init;
47
48#ifdef CONFIG_SHMEM
49/*
50 * This virtual memory filesystem is heavily based on the ramfs. It
51 * extends ramfs by the ability to use swap and honor resource limits
52 * which makes it a completely usable filesystem.
53 */
54
55#include <linux/xattr.h>
56#include <linux/exportfs.h>
57#include <linux/posix_acl.h>
58#include <linux/posix_acl_xattr.h>
59#include <linux/mman.h>
60#include <linux/string.h>
61#include <linux/slab.h>
62#include <linux/backing-dev.h>
63#include <linux/writeback.h>
64#include <linux/pagevec.h>
65#include <linux/percpu_counter.h>
66#include <linux/falloc.h>
67#include <linux/splice.h>
68#include <linux/security.h>
69#include <linux/swapops.h>
70#include <linux/mempolicy.h>
71#include <linux/namei.h>
72#include <linux/ctype.h>
73#include <linux/migrate.h>
74#include <linux/highmem.h>
75#include <linux/seq_file.h>
76#include <linux/magic.h>
77#include <linux/syscalls.h>
78#include <linux/fcntl.h>
79#include <uapi/linux/memfd.h>
80#include <linux/rmap.h>
81#include <linux/uuid.h>
82#include <linux/quotaops.h>
83#include <linux/rcupdate_wait.h>
84
85#include <linux/uaccess.h>
86
87#include "internal.h"
88
89#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
90#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
91
92/* Pretend that each entry is of this size in directory's i_size */
93#define BOGO_DIRENT_SIZE 20
94
95/* Pretend that one inode + its dentry occupy this much memory */
96#define BOGO_INODE_SIZE 1024
97
98/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
99#define SHORT_SYMLINK_LEN 128
100
101/*
102 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
103 * inode->i_private (with i_rwsem making sure that it has only one user at
104 * a time): we would prefer not to enlarge the shmem inode just for that.
105 */
106struct shmem_falloc {
107 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
108 pgoff_t start; /* start of range currently being fallocated */
109 pgoff_t next; /* the next page offset to be fallocated */
110 pgoff_t nr_falloced; /* how many new pages have been fallocated */
111 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
112};
113
114struct shmem_options {
115 unsigned long long blocks;
116 unsigned long long inodes;
117 struct mempolicy *mpol;
118 kuid_t uid;
119 kgid_t gid;
120 umode_t mode;
121 bool full_inums;
122 int huge;
123 int seen;
124 bool noswap;
125 unsigned short quota_types;
126 struct shmem_quota_limits qlimits;
127#if IS_ENABLED(CONFIG_UNICODE)
128 struct unicode_map *encoding;
129 bool strict_encoding;
130#endif
131#define SHMEM_SEEN_BLOCKS 1
132#define SHMEM_SEEN_INODES 2
133#define SHMEM_SEEN_HUGE 4
134#define SHMEM_SEEN_INUMS 8
135#define SHMEM_SEEN_NOSWAP 16
136#define SHMEM_SEEN_QUOTA 32
137};
138
139#ifdef CONFIG_TRANSPARENT_HUGEPAGE
140static unsigned long huge_shmem_orders_always __read_mostly;
141static unsigned long huge_shmem_orders_madvise __read_mostly;
142static unsigned long huge_shmem_orders_inherit __read_mostly;
143static unsigned long huge_shmem_orders_within_size __read_mostly;
144static bool shmem_orders_configured __initdata;
145#endif
146
147#ifdef CONFIG_TMPFS
148static unsigned long shmem_default_max_blocks(void)
149{
150 return totalram_pages() / 2;
151}
152
153static unsigned long shmem_default_max_inodes(void)
154{
155 unsigned long nr_pages = totalram_pages();
156
157 return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
158 ULONG_MAX / BOGO_INODE_SIZE);
159}
160#endif
161
162static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
163 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
164 struct vm_area_struct *vma, vm_fault_t *fault_type);
165
166static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
167{
168 return sb->s_fs_info;
169}
170
171/*
172 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
173 * for shared memory and for shared anonymous (/dev/zero) mappings
174 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
175 * consistent with the pre-accounting of private mappings ...
176 */
177static inline int shmem_acct_size(unsigned long flags, loff_t size)
178{
179 return (flags & VM_NORESERVE) ?
180 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
181}
182
183static inline void shmem_unacct_size(unsigned long flags, loff_t size)
184{
185 if (!(flags & VM_NORESERVE))
186 vm_unacct_memory(VM_ACCT(size));
187}
188
189static inline int shmem_reacct_size(unsigned long flags,
190 loff_t oldsize, loff_t newsize)
191{
192 if (!(flags & VM_NORESERVE)) {
193 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
194 return security_vm_enough_memory_mm(current->mm,
195 VM_ACCT(newsize) - VM_ACCT(oldsize));
196 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
197 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
198 }
199 return 0;
200}
201
202/*
203 * ... whereas tmpfs objects are accounted incrementally as
204 * pages are allocated, in order to allow large sparse files.
205 * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
206 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
207 */
208static inline int shmem_acct_blocks(unsigned long flags, long pages)
209{
210 if (!(flags & VM_NORESERVE))
211 return 0;
212
213 return security_vm_enough_memory_mm(current->mm,
214 pages * VM_ACCT(PAGE_SIZE));
215}
216
217static inline void shmem_unacct_blocks(unsigned long flags, long pages)
218{
219 if (flags & VM_NORESERVE)
220 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
221}
222
223static int shmem_inode_acct_blocks(struct inode *inode, long pages)
224{
225 struct shmem_inode_info *info = SHMEM_I(inode);
226 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
227 int err = -ENOSPC;
228
229 if (shmem_acct_blocks(info->flags, pages))
230 return err;
231
232 might_sleep(); /* when quotas */
233 if (sbinfo->max_blocks) {
234 if (!percpu_counter_limited_add(&sbinfo->used_blocks,
235 sbinfo->max_blocks, pages))
236 goto unacct;
237
238 err = dquot_alloc_block_nodirty(inode, pages);
239 if (err) {
240 percpu_counter_sub(&sbinfo->used_blocks, pages);
241 goto unacct;
242 }
243 } else {
244 err = dquot_alloc_block_nodirty(inode, pages);
245 if (err)
246 goto unacct;
247 }
248
249 return 0;
250
251unacct:
252 shmem_unacct_blocks(info->flags, pages);
253 return err;
254}
255
256static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
257{
258 struct shmem_inode_info *info = SHMEM_I(inode);
259 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
260
261 might_sleep(); /* when quotas */
262 dquot_free_block_nodirty(inode, pages);
263
264 if (sbinfo->max_blocks)
265 percpu_counter_sub(&sbinfo->used_blocks, pages);
266 shmem_unacct_blocks(info->flags, pages);
267}
268
269static const struct super_operations shmem_ops;
270static const struct address_space_operations shmem_aops;
271static const struct file_operations shmem_file_operations;
272static const struct inode_operations shmem_inode_operations;
273static const struct inode_operations shmem_dir_inode_operations;
274static const struct inode_operations shmem_special_inode_operations;
275static const struct vm_operations_struct shmem_vm_ops;
276static const struct vm_operations_struct shmem_anon_vm_ops;
277static struct file_system_type shmem_fs_type;
278
279bool shmem_mapping(struct address_space *mapping)
280{
281 return mapping->a_ops == &shmem_aops;
282}
283EXPORT_SYMBOL_GPL(shmem_mapping);
284
285bool vma_is_anon_shmem(struct vm_area_struct *vma)
286{
287 return vma->vm_ops == &shmem_anon_vm_ops;
288}
289
290bool vma_is_shmem(struct vm_area_struct *vma)
291{
292 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
293}
294
295static LIST_HEAD(shmem_swaplist);
296static DEFINE_MUTEX(shmem_swaplist_mutex);
297
298#ifdef CONFIG_TMPFS_QUOTA
299
300static int shmem_enable_quotas(struct super_block *sb,
301 unsigned short quota_types)
302{
303 int type, err = 0;
304
305 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
306 for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
307 if (!(quota_types & (1 << type)))
308 continue;
309 err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
310 DQUOT_USAGE_ENABLED |
311 DQUOT_LIMITS_ENABLED);
312 if (err)
313 goto out_err;
314 }
315 return 0;
316
317out_err:
318 pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
319 type, err);
320 for (type--; type >= 0; type--)
321 dquot_quota_off(sb, type);
322 return err;
323}
324
325static void shmem_disable_quotas(struct super_block *sb)
326{
327 int type;
328
329 for (type = 0; type < SHMEM_MAXQUOTAS; type++)
330 dquot_quota_off(sb, type);
331}
332
333static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
334{
335 return SHMEM_I(inode)->i_dquot;
336}
337#endif /* CONFIG_TMPFS_QUOTA */
338
339/*
340 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
341 * produces a novel ino for the newly allocated inode.
342 *
343 * It may also be called when making a hard link to permit the space needed by
344 * each dentry. However, in that case, no new inode number is needed since that
345 * internally draws from another pool of inode numbers (currently global
346 * get_next_ino()). This case is indicated by passing NULL as inop.
347 */
348#define SHMEM_INO_BATCH 1024
349static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
350{
351 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
352 ino_t ino;
353
354 if (!(sb->s_flags & SB_KERNMOUNT)) {
355 raw_spin_lock(&sbinfo->stat_lock);
356 if (sbinfo->max_inodes) {
357 if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
358 raw_spin_unlock(&sbinfo->stat_lock);
359 return -ENOSPC;
360 }
361 sbinfo->free_ispace -= BOGO_INODE_SIZE;
362 }
363 if (inop) {
364 ino = sbinfo->next_ino++;
365 if (unlikely(is_zero_ino(ino)))
366 ino = sbinfo->next_ino++;
367 if (unlikely(!sbinfo->full_inums &&
368 ino > UINT_MAX)) {
369 /*
370 * Emulate get_next_ino uint wraparound for
371 * compatibility
372 */
373 if (IS_ENABLED(CONFIG_64BIT))
374 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
375 __func__, MINOR(sb->s_dev));
376 sbinfo->next_ino = 1;
377 ino = sbinfo->next_ino++;
378 }
379 *inop = ino;
380 }
381 raw_spin_unlock(&sbinfo->stat_lock);
382 } else if (inop) {
383 /*
384 * __shmem_file_setup, one of our callers, is lock-free: it
385 * doesn't hold stat_lock in shmem_reserve_inode since
386 * max_inodes is always 0, and is called from potentially
387 * unknown contexts. As such, use a per-cpu batched allocator
388 * which doesn't require the per-sb stat_lock unless we are at
389 * the batch boundary.
390 *
391 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
392 * shmem mounts are not exposed to userspace, so we don't need
393 * to worry about things like glibc compatibility.
394 */
395 ino_t *next_ino;
396
397 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
398 ino = *next_ino;
399 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
400 raw_spin_lock(&sbinfo->stat_lock);
401 ino = sbinfo->next_ino;
402 sbinfo->next_ino += SHMEM_INO_BATCH;
403 raw_spin_unlock(&sbinfo->stat_lock);
404 if (unlikely(is_zero_ino(ino)))
405 ino++;
406 }
407 *inop = ino;
408 *next_ino = ++ino;
409 put_cpu();
410 }
411
412 return 0;
413}
414
415static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
416{
417 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
418 if (sbinfo->max_inodes) {
419 raw_spin_lock(&sbinfo->stat_lock);
420 sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
421 raw_spin_unlock(&sbinfo->stat_lock);
422 }
423}
424
425/**
426 * shmem_recalc_inode - recalculate the block usage of an inode
427 * @inode: inode to recalc
428 * @alloced: the change in number of pages allocated to inode
429 * @swapped: the change in number of pages swapped from inode
430 *
431 * We have to calculate the free blocks since the mm can drop
432 * undirtied hole pages behind our back.
433 *
434 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
435 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
436 */
437static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
438{
439 struct shmem_inode_info *info = SHMEM_I(inode);
440 long freed;
441
442 spin_lock(&info->lock);
443 info->alloced += alloced;
444 info->swapped += swapped;
445 freed = info->alloced - info->swapped -
446 READ_ONCE(inode->i_mapping->nrpages);
447 /*
448 * Special case: whereas normally shmem_recalc_inode() is called
449 * after i_mapping->nrpages has already been adjusted (up or down),
450 * shmem_writepage() has to raise swapped before nrpages is lowered -
451 * to stop a racing shmem_recalc_inode() from thinking that a page has
452 * been freed. Compensate here, to avoid the need for a followup call.
453 */
454 if (swapped > 0)
455 freed += swapped;
456 if (freed > 0)
457 info->alloced -= freed;
458 spin_unlock(&info->lock);
459
460 /* The quota case may block */
461 if (freed > 0)
462 shmem_inode_unacct_blocks(inode, freed);
463}
464
465bool shmem_charge(struct inode *inode, long pages)
466{
467 struct address_space *mapping = inode->i_mapping;
468
469 if (shmem_inode_acct_blocks(inode, pages))
470 return false;
471
472 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
473 xa_lock_irq(&mapping->i_pages);
474 mapping->nrpages += pages;
475 xa_unlock_irq(&mapping->i_pages);
476
477 shmem_recalc_inode(inode, pages, 0);
478 return true;
479}
480
481void shmem_uncharge(struct inode *inode, long pages)
482{
483 /* pages argument is currently unused: keep it to help debugging */
484 /* nrpages adjustment done by __filemap_remove_folio() or caller */
485
486 shmem_recalc_inode(inode, 0, 0);
487}
488
489/*
490 * Replace item expected in xarray by a new item, while holding xa_lock.
491 */
492static int shmem_replace_entry(struct address_space *mapping,
493 pgoff_t index, void *expected, void *replacement)
494{
495 XA_STATE(xas, &mapping->i_pages, index);
496 void *item;
497
498 VM_BUG_ON(!expected);
499 VM_BUG_ON(!replacement);
500 item = xas_load(&xas);
501 if (item != expected)
502 return -ENOENT;
503 xas_store(&xas, replacement);
504 return 0;
505}
506
507/*
508 * Sometimes, before we decide whether to proceed or to fail, we must check
509 * that an entry was not already brought back from swap by a racing thread.
510 *
511 * Checking folio is not enough: by the time a swapcache folio is locked, it
512 * might be reused, and again be swapcache, using the same swap as before.
513 */
514static bool shmem_confirm_swap(struct address_space *mapping,
515 pgoff_t index, swp_entry_t swap)
516{
517 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
518}
519
520/*
521 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
522 *
523 * SHMEM_HUGE_NEVER:
524 * disables huge pages for the mount;
525 * SHMEM_HUGE_ALWAYS:
526 * enables huge pages for the mount;
527 * SHMEM_HUGE_WITHIN_SIZE:
528 * only allocate huge pages if the page will be fully within i_size,
529 * also respect fadvise()/madvise() hints;
530 * SHMEM_HUGE_ADVISE:
531 * only allocate huge pages if requested with fadvise()/madvise();
532 */
533
534#define SHMEM_HUGE_NEVER 0
535#define SHMEM_HUGE_ALWAYS 1
536#define SHMEM_HUGE_WITHIN_SIZE 2
537#define SHMEM_HUGE_ADVISE 3
538
539/*
540 * Special values.
541 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
542 *
543 * SHMEM_HUGE_DENY:
544 * disables huge on shm_mnt and all mounts, for emergency use;
545 * SHMEM_HUGE_FORCE:
546 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
547 *
548 */
549#define SHMEM_HUGE_DENY (-1)
550#define SHMEM_HUGE_FORCE (-2)
551
552#ifdef CONFIG_TRANSPARENT_HUGEPAGE
553/* ifdef here to avoid bloating shmem.o when not necessary */
554
555static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
556
557static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
558 loff_t write_end, bool shmem_huge_force,
559 unsigned long vm_flags)
560{
561 loff_t i_size;
562
563 if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
564 return false;
565 if (!S_ISREG(inode->i_mode))
566 return false;
567 if (shmem_huge == SHMEM_HUGE_DENY)
568 return false;
569 if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
570 return true;
571
572 switch (SHMEM_SB(inode->i_sb)->huge) {
573 case SHMEM_HUGE_ALWAYS:
574 return true;
575 case SHMEM_HUGE_WITHIN_SIZE:
576 index = round_up(index + 1, HPAGE_PMD_NR);
577 i_size = max(write_end, i_size_read(inode));
578 i_size = round_up(i_size, PAGE_SIZE);
579 if (i_size >> PAGE_SHIFT >= index)
580 return true;
581 fallthrough;
582 case SHMEM_HUGE_ADVISE:
583 if (vm_flags & VM_HUGEPAGE)
584 return true;
585 fallthrough;
586 default:
587 return false;
588 }
589}
590
591static int shmem_parse_huge(const char *str)
592{
593 int huge;
594
595 if (!str)
596 return -EINVAL;
597
598 if (!strcmp(str, "never"))
599 huge = SHMEM_HUGE_NEVER;
600 else if (!strcmp(str, "always"))
601 huge = SHMEM_HUGE_ALWAYS;
602 else if (!strcmp(str, "within_size"))
603 huge = SHMEM_HUGE_WITHIN_SIZE;
604 else if (!strcmp(str, "advise"))
605 huge = SHMEM_HUGE_ADVISE;
606 else if (!strcmp(str, "deny"))
607 huge = SHMEM_HUGE_DENY;
608 else if (!strcmp(str, "force"))
609 huge = SHMEM_HUGE_FORCE;
610 else
611 return -EINVAL;
612
613 if (!has_transparent_hugepage() &&
614 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
615 return -EINVAL;
616
617 /* Do not override huge allocation policy with non-PMD sized mTHP */
618 if (huge == SHMEM_HUGE_FORCE &&
619 huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER))
620 return -EINVAL;
621
622 return huge;
623}
624
625#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
626static const char *shmem_format_huge(int huge)
627{
628 switch (huge) {
629 case SHMEM_HUGE_NEVER:
630 return "never";
631 case SHMEM_HUGE_ALWAYS:
632 return "always";
633 case SHMEM_HUGE_WITHIN_SIZE:
634 return "within_size";
635 case SHMEM_HUGE_ADVISE:
636 return "advise";
637 case SHMEM_HUGE_DENY:
638 return "deny";
639 case SHMEM_HUGE_FORCE:
640 return "force";
641 default:
642 VM_BUG_ON(1);
643 return "bad_val";
644 }
645}
646#endif
647
648static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
649 struct shrink_control *sc, unsigned long nr_to_free)
650{
651 LIST_HEAD(list), *pos, *next;
652 struct inode *inode;
653 struct shmem_inode_info *info;
654 struct folio *folio;
655 unsigned long batch = sc ? sc->nr_to_scan : 128;
656 unsigned long split = 0, freed = 0;
657
658 if (list_empty(&sbinfo->shrinklist))
659 return SHRINK_STOP;
660
661 spin_lock(&sbinfo->shrinklist_lock);
662 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
663 info = list_entry(pos, struct shmem_inode_info, shrinklist);
664
665 /* pin the inode */
666 inode = igrab(&info->vfs_inode);
667
668 /* inode is about to be evicted */
669 if (!inode) {
670 list_del_init(&info->shrinklist);
671 goto next;
672 }
673
674 list_move(&info->shrinklist, &list);
675next:
676 sbinfo->shrinklist_len--;
677 if (!--batch)
678 break;
679 }
680 spin_unlock(&sbinfo->shrinklist_lock);
681
682 list_for_each_safe(pos, next, &list) {
683 pgoff_t next, end;
684 loff_t i_size;
685 int ret;
686
687 info = list_entry(pos, struct shmem_inode_info, shrinklist);
688 inode = &info->vfs_inode;
689
690 if (nr_to_free && freed >= nr_to_free)
691 goto move_back;
692
693 i_size = i_size_read(inode);
694 folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
695 if (!folio || xa_is_value(folio))
696 goto drop;
697
698 /* No large folio at the end of the file: nothing to split */
699 if (!folio_test_large(folio)) {
700 folio_put(folio);
701 goto drop;
702 }
703
704 /* Check if there is anything to gain from splitting */
705 next = folio_next_index(folio);
706 end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
707 if (end <= folio->index || end >= next) {
708 folio_put(folio);
709 goto drop;
710 }
711
712 /*
713 * Move the inode on the list back to shrinklist if we failed
714 * to lock the page at this time.
715 *
716 * Waiting for the lock may lead to deadlock in the
717 * reclaim path.
718 */
719 if (!folio_trylock(folio)) {
720 folio_put(folio);
721 goto move_back;
722 }
723
724 ret = split_folio(folio);
725 folio_unlock(folio);
726 folio_put(folio);
727
728 /* If split failed move the inode on the list back to shrinklist */
729 if (ret)
730 goto move_back;
731
732 freed += next - end;
733 split++;
734drop:
735 list_del_init(&info->shrinklist);
736 goto put;
737move_back:
738 /*
739 * Make sure the inode is either on the global list or deleted
740 * from any local list before iput() since it could be deleted
741 * in another thread once we put the inode (then the local list
742 * is corrupted).
743 */
744 spin_lock(&sbinfo->shrinklist_lock);
745 list_move(&info->shrinklist, &sbinfo->shrinklist);
746 sbinfo->shrinklist_len++;
747 spin_unlock(&sbinfo->shrinklist_lock);
748put:
749 iput(inode);
750 }
751
752 return split;
753}
754
755static long shmem_unused_huge_scan(struct super_block *sb,
756 struct shrink_control *sc)
757{
758 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
759
760 if (!READ_ONCE(sbinfo->shrinklist_len))
761 return SHRINK_STOP;
762
763 return shmem_unused_huge_shrink(sbinfo, sc, 0);
764}
765
766static long shmem_unused_huge_count(struct super_block *sb,
767 struct shrink_control *sc)
768{
769 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
770 return READ_ONCE(sbinfo->shrinklist_len);
771}
772#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
773
774#define shmem_huge SHMEM_HUGE_DENY
775
776static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
777 struct shrink_control *sc, unsigned long nr_to_free)
778{
779 return 0;
780}
781
782static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
783 loff_t write_end, bool shmem_huge_force,
784 unsigned long vm_flags)
785{
786 return false;
787}
788#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
789
790static void shmem_update_stats(struct folio *folio, int nr_pages)
791{
792 if (folio_test_pmd_mappable(folio))
793 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
794 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
795 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
796}
797
798/*
799 * Somewhat like filemap_add_folio, but error if expected item has gone.
800 */
801static int shmem_add_to_page_cache(struct folio *folio,
802 struct address_space *mapping,
803 pgoff_t index, void *expected, gfp_t gfp)
804{
805 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
806 long nr = folio_nr_pages(folio);
807
808 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
809 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
810 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
811
812 folio_ref_add(folio, nr);
813 folio->mapping = mapping;
814 folio->index = index;
815
816 gfp &= GFP_RECLAIM_MASK;
817 folio_throttle_swaprate(folio, gfp);
818
819 do {
820 xas_lock_irq(&xas);
821 if (expected != xas_find_conflict(&xas)) {
822 xas_set_err(&xas, -EEXIST);
823 goto unlock;
824 }
825 if (expected && xas_find_conflict(&xas)) {
826 xas_set_err(&xas, -EEXIST);
827 goto unlock;
828 }
829 xas_store(&xas, folio);
830 if (xas_error(&xas))
831 goto unlock;
832 shmem_update_stats(folio, nr);
833 mapping->nrpages += nr;
834unlock:
835 xas_unlock_irq(&xas);
836 } while (xas_nomem(&xas, gfp));
837
838 if (xas_error(&xas)) {
839 folio->mapping = NULL;
840 folio_ref_sub(folio, nr);
841 return xas_error(&xas);
842 }
843
844 return 0;
845}
846
847/*
848 * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
849 */
850static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
851{
852 struct address_space *mapping = folio->mapping;
853 long nr = folio_nr_pages(folio);
854 int error;
855
856 xa_lock_irq(&mapping->i_pages);
857 error = shmem_replace_entry(mapping, folio->index, folio, radswap);
858 folio->mapping = NULL;
859 mapping->nrpages -= nr;
860 shmem_update_stats(folio, -nr);
861 xa_unlock_irq(&mapping->i_pages);
862 folio_put_refs(folio, nr);
863 BUG_ON(error);
864}
865
866/*
867 * Remove swap entry from page cache, free the swap and its page cache. Returns
868 * the number of pages being freed. 0 means entry not found in XArray (0 pages
869 * being freed).
870 */
871static long shmem_free_swap(struct address_space *mapping,
872 pgoff_t index, void *radswap)
873{
874 int order = xa_get_order(&mapping->i_pages, index);
875 void *old;
876
877 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
878 if (old != radswap)
879 return 0;
880 free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);
881
882 return 1 << order;
883}
884
885/*
886 * Determine (in bytes) how many of the shmem object's pages mapped by the
887 * given offsets are swapped out.
888 *
889 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
890 * as long as the inode doesn't go away and racy results are not a problem.
891 */
892unsigned long shmem_partial_swap_usage(struct address_space *mapping,
893 pgoff_t start, pgoff_t end)
894{
895 XA_STATE(xas, &mapping->i_pages, start);
896 struct page *page;
897 unsigned long swapped = 0;
898 unsigned long max = end - 1;
899
900 rcu_read_lock();
901 xas_for_each(&xas, page, max) {
902 if (xas_retry(&xas, page))
903 continue;
904 if (xa_is_value(page))
905 swapped += 1 << xas_get_order(&xas);
906 if (xas.xa_index == max)
907 break;
908 if (need_resched()) {
909 xas_pause(&xas);
910 cond_resched_rcu();
911 }
912 }
913 rcu_read_unlock();
914
915 return swapped << PAGE_SHIFT;
916}
917
918/*
919 * Determine (in bytes) how many of the shmem object's pages mapped by the
920 * given vma is swapped out.
921 *
922 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
923 * as long as the inode doesn't go away and racy results are not a problem.
924 */
925unsigned long shmem_swap_usage(struct vm_area_struct *vma)
926{
927 struct inode *inode = file_inode(vma->vm_file);
928 struct shmem_inode_info *info = SHMEM_I(inode);
929 struct address_space *mapping = inode->i_mapping;
930 unsigned long swapped;
931
932 /* Be careful as we don't hold info->lock */
933 swapped = READ_ONCE(info->swapped);
934
935 /*
936 * The easier cases are when the shmem object has nothing in swap, or
937 * the vma maps it whole. Then we can simply use the stats that we
938 * already track.
939 */
940 if (!swapped)
941 return 0;
942
943 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
944 return swapped << PAGE_SHIFT;
945
946 /* Here comes the more involved part */
947 return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
948 vma->vm_pgoff + vma_pages(vma));
949}
950
951/*
952 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
953 */
954void shmem_unlock_mapping(struct address_space *mapping)
955{
956 struct folio_batch fbatch;
957 pgoff_t index = 0;
958
959 folio_batch_init(&fbatch);
960 /*
961 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
962 */
963 while (!mapping_unevictable(mapping) &&
964 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
965 check_move_unevictable_folios(&fbatch);
966 folio_batch_release(&fbatch);
967 cond_resched();
968 }
969}
970
971static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
972{
973 struct folio *folio;
974
975 /*
976 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
977 * beyond i_size, and reports fallocated folios as holes.
978 */
979 folio = filemap_get_entry(inode->i_mapping, index);
980 if (!folio)
981 return folio;
982 if (!xa_is_value(folio)) {
983 folio_lock(folio);
984 if (folio->mapping == inode->i_mapping)
985 return folio;
986 /* The folio has been swapped out */
987 folio_unlock(folio);
988 folio_put(folio);
989 }
990 /*
991 * But read a folio back from swap if any of it is within i_size
992 * (although in some cases this is just a waste of time).
993 */
994 folio = NULL;
995 shmem_get_folio(inode, index, 0, &folio, SGP_READ);
996 return folio;
997}
998
999/*
1000 * Remove range of pages and swap entries from page cache, and free them.
1001 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
1002 */
1003static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1004 bool unfalloc)
1005{
1006 struct address_space *mapping = inode->i_mapping;
1007 struct shmem_inode_info *info = SHMEM_I(inode);
1008 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
1009 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
1010 struct folio_batch fbatch;
1011 pgoff_t indices[PAGEVEC_SIZE];
1012 struct folio *folio;
1013 bool same_folio;
1014 long nr_swaps_freed = 0;
1015 pgoff_t index;
1016 int i;
1017
1018 if (lend == -1)
1019 end = -1; /* unsigned, so actually very big */
1020
1021 if (info->fallocend > start && info->fallocend <= end && !unfalloc)
1022 info->fallocend = start;
1023
1024 folio_batch_init(&fbatch);
1025 index = start;
1026 while (index < end && find_lock_entries(mapping, &index, end - 1,
1027 &fbatch, indices)) {
1028 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1029 folio = fbatch.folios[i];
1030
1031 if (xa_is_value(folio)) {
1032 if (unfalloc)
1033 continue;
1034 nr_swaps_freed += shmem_free_swap(mapping,
1035 indices[i], folio);
1036 continue;
1037 }
1038
1039 if (!unfalloc || !folio_test_uptodate(folio))
1040 truncate_inode_folio(mapping, folio);
1041 folio_unlock(folio);
1042 }
1043 folio_batch_remove_exceptionals(&fbatch);
1044 folio_batch_release(&fbatch);
1045 cond_resched();
1046 }
1047
1048 /*
1049 * When undoing a failed fallocate, we want none of the partial folio
1050 * zeroing and splitting below, but shall want to truncate the whole
1051 * folio when !uptodate indicates that it was added by this fallocate,
1052 * even when [lstart, lend] covers only a part of the folio.
1053 */
1054 if (unfalloc)
1055 goto whole_folios;
1056
1057 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1058 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1059 if (folio) {
1060 same_folio = lend < folio_pos(folio) + folio_size(folio);
1061 folio_mark_dirty(folio);
1062 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1063 start = folio_next_index(folio);
1064 if (same_folio)
1065 end = folio->index;
1066 }
1067 folio_unlock(folio);
1068 folio_put(folio);
1069 folio = NULL;
1070 }
1071
1072 if (!same_folio)
1073 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1074 if (folio) {
1075 folio_mark_dirty(folio);
1076 if (!truncate_inode_partial_folio(folio, lstart, lend))
1077 end = folio->index;
1078 folio_unlock(folio);
1079 folio_put(folio);
1080 }
1081
1082whole_folios:
1083
1084 index = start;
1085 while (index < end) {
1086 cond_resched();
1087
1088 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1089 indices)) {
1090 /* If all gone or hole-punch or unfalloc, we're done */
1091 if (index == start || end != -1)
1092 break;
1093 /* But if truncating, restart to make sure all gone */
1094 index = start;
1095 continue;
1096 }
1097 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1098 folio = fbatch.folios[i];
1099
1100 if (xa_is_value(folio)) {
1101 long swaps_freed;
1102
1103 if (unfalloc)
1104 continue;
1105 swaps_freed = shmem_free_swap(mapping, indices[i], folio);
1106 if (!swaps_freed) {
1107 /* Swap was replaced by page: retry */
1108 index = indices[i];
1109 break;
1110 }
1111 nr_swaps_freed += swaps_freed;
1112 continue;
1113 }
1114
1115 folio_lock(folio);
1116
1117 if (!unfalloc || !folio_test_uptodate(folio)) {
1118 if (folio_mapping(folio) != mapping) {
1119 /* Page was replaced by swap: retry */
1120 folio_unlock(folio);
1121 index = indices[i];
1122 break;
1123 }
1124 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1125 folio);
1126
1127 if (!folio_test_large(folio)) {
1128 truncate_inode_folio(mapping, folio);
1129 } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1130 /*
1131 * If we split a page, reset the loop so
1132 * that we pick up the new sub pages.
1133 * Otherwise the THP was entirely
1134 * dropped or the target range was
1135 * zeroed, so just continue the loop as
1136 * is.
1137 */
1138 if (!folio_test_large(folio)) {
1139 folio_unlock(folio);
1140 index = start;
1141 break;
1142 }
1143 }
1144 }
1145 folio_unlock(folio);
1146 }
1147 folio_batch_remove_exceptionals(&fbatch);
1148 folio_batch_release(&fbatch);
1149 }
1150
1151 shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1152}
1153
1154void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1155{
1156 shmem_undo_range(inode, lstart, lend, false);
1157 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1158 inode_inc_iversion(inode);
1159}
1160EXPORT_SYMBOL_GPL(shmem_truncate_range);
1161
1162static int shmem_getattr(struct mnt_idmap *idmap,
1163 const struct path *path, struct kstat *stat,
1164 u32 request_mask, unsigned int query_flags)
1165{
1166 struct inode *inode = path->dentry->d_inode;
1167 struct shmem_inode_info *info = SHMEM_I(inode);
1168
1169 if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1170 shmem_recalc_inode(inode, 0, 0);
1171
1172 if (info->fsflags & FS_APPEND_FL)
1173 stat->attributes |= STATX_ATTR_APPEND;
1174 if (info->fsflags & FS_IMMUTABLE_FL)
1175 stat->attributes |= STATX_ATTR_IMMUTABLE;
1176 if (info->fsflags & FS_NODUMP_FL)
1177 stat->attributes |= STATX_ATTR_NODUMP;
1178 stat->attributes_mask |= (STATX_ATTR_APPEND |
1179 STATX_ATTR_IMMUTABLE |
1180 STATX_ATTR_NODUMP);
1181 generic_fillattr(idmap, request_mask, inode, stat);
1182
1183 if (shmem_huge_global_enabled(inode, 0, 0, false, 0))
1184 stat->blksize = HPAGE_PMD_SIZE;
1185
1186 if (request_mask & STATX_BTIME) {
1187 stat->result_mask |= STATX_BTIME;
1188 stat->btime.tv_sec = info->i_crtime.tv_sec;
1189 stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1190 }
1191
1192 return 0;
1193}
1194
1195static int shmem_setattr(struct mnt_idmap *idmap,
1196 struct dentry *dentry, struct iattr *attr)
1197{
1198 struct inode *inode = d_inode(dentry);
1199 struct shmem_inode_info *info = SHMEM_I(inode);
1200 int error;
1201 bool update_mtime = false;
1202 bool update_ctime = true;
1203
1204 error = setattr_prepare(idmap, dentry, attr);
1205 if (error)
1206 return error;
1207
1208 if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1209 if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1210 return -EPERM;
1211 }
1212 }
1213
1214 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1215 loff_t oldsize = inode->i_size;
1216 loff_t newsize = attr->ia_size;
1217
1218 /* protected by i_rwsem */
1219 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1220 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1221 return -EPERM;
1222
1223 if (newsize != oldsize) {
1224 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1225 oldsize, newsize);
1226 if (error)
1227 return error;
1228 i_size_write(inode, newsize);
1229 update_mtime = true;
1230 } else {
1231 update_ctime = false;
1232 }
1233 if (newsize <= oldsize) {
1234 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1235 if (oldsize > holebegin)
1236 unmap_mapping_range(inode->i_mapping,
1237 holebegin, 0, 1);
1238 if (info->alloced)
1239 shmem_truncate_range(inode,
1240 newsize, (loff_t)-1);
1241 /* unmap again to remove racily COWed private pages */
1242 if (oldsize > holebegin)
1243 unmap_mapping_range(inode->i_mapping,
1244 holebegin, 0, 1);
1245 }
1246 }
1247
1248 if (is_quota_modification(idmap, inode, attr)) {
1249 error = dquot_initialize(inode);
1250 if (error)
1251 return error;
1252 }
1253
1254 /* Transfer quota accounting */
1255 if (i_uid_needs_update(idmap, attr, inode) ||
1256 i_gid_needs_update(idmap, attr, inode)) {
1257 error = dquot_transfer(idmap, inode, attr);
1258 if (error)
1259 return error;
1260 }
1261
1262 setattr_copy(idmap, inode, attr);
1263 if (attr->ia_valid & ATTR_MODE)
1264 error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1265 if (!error && update_ctime) {
1266 inode_set_ctime_current(inode);
1267 if (update_mtime)
1268 inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1269 inode_inc_iversion(inode);
1270 }
1271 return error;
1272}
1273
1274static void shmem_evict_inode(struct inode *inode)
1275{
1276 struct shmem_inode_info *info = SHMEM_I(inode);
1277 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1278 size_t freed = 0;
1279
1280 if (shmem_mapping(inode->i_mapping)) {
1281 shmem_unacct_size(info->flags, inode->i_size);
1282 inode->i_size = 0;
1283 mapping_set_exiting(inode->i_mapping);
1284 shmem_truncate_range(inode, 0, (loff_t)-1);
1285 if (!list_empty(&info->shrinklist)) {
1286 spin_lock(&sbinfo->shrinklist_lock);
1287 if (!list_empty(&info->shrinklist)) {
1288 list_del_init(&info->shrinklist);
1289 sbinfo->shrinklist_len--;
1290 }
1291 spin_unlock(&sbinfo->shrinklist_lock);
1292 }
1293 while (!list_empty(&info->swaplist)) {
1294 /* Wait while shmem_unuse() is scanning this inode... */
1295 wait_var_event(&info->stop_eviction,
1296 !atomic_read(&info->stop_eviction));
1297 mutex_lock(&shmem_swaplist_mutex);
1298 /* ...but beware of the race if we peeked too early */
1299 if (!atomic_read(&info->stop_eviction))
1300 list_del_init(&info->swaplist);
1301 mutex_unlock(&shmem_swaplist_mutex);
1302 }
1303 }
1304
1305 simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1306 shmem_free_inode(inode->i_sb, freed);
1307 WARN_ON(inode->i_blocks);
1308 clear_inode(inode);
1309#ifdef CONFIG_TMPFS_QUOTA
1310 dquot_free_inode(inode);
1311 dquot_drop(inode);
1312#endif
1313}
1314
1315static int shmem_find_swap_entries(struct address_space *mapping,
1316 pgoff_t start, struct folio_batch *fbatch,
1317 pgoff_t *indices, unsigned int type)
1318{
1319 XA_STATE(xas, &mapping->i_pages, start);
1320 struct folio *folio;
1321 swp_entry_t entry;
1322
1323 rcu_read_lock();
1324 xas_for_each(&xas, folio, ULONG_MAX) {
1325 if (xas_retry(&xas, folio))
1326 continue;
1327
1328 if (!xa_is_value(folio))
1329 continue;
1330
1331 entry = radix_to_swp_entry(folio);
1332 /*
1333 * swapin error entries can be found in the mapping. But they're
1334 * deliberately ignored here as we've done everything we can do.
1335 */
1336 if (swp_type(entry) != type)
1337 continue;
1338
1339 indices[folio_batch_count(fbatch)] = xas.xa_index;
1340 if (!folio_batch_add(fbatch, folio))
1341 break;
1342
1343 if (need_resched()) {
1344 xas_pause(&xas);
1345 cond_resched_rcu();
1346 }
1347 }
1348 rcu_read_unlock();
1349
1350 return xas.xa_index;
1351}
1352
1353/*
1354 * Move the swapped pages for an inode to page cache. Returns the count
1355 * of pages swapped in, or the error in case of failure.
1356 */
1357static int shmem_unuse_swap_entries(struct inode *inode,
1358 struct folio_batch *fbatch, pgoff_t *indices)
1359{
1360 int i = 0;
1361 int ret = 0;
1362 int error = 0;
1363 struct address_space *mapping = inode->i_mapping;
1364
1365 for (i = 0; i < folio_batch_count(fbatch); i++) {
1366 struct folio *folio = fbatch->folios[i];
1367
1368 if (!xa_is_value(folio))
1369 continue;
1370 error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1371 mapping_gfp_mask(mapping), NULL, NULL);
1372 if (error == 0) {
1373 folio_unlock(folio);
1374 folio_put(folio);
1375 ret++;
1376 }
1377 if (error == -ENOMEM)
1378 break;
1379 error = 0;
1380 }
1381 return error ? error : ret;
1382}
1383
1384/*
1385 * If swap found in inode, free it and move page from swapcache to filecache.
1386 */
1387static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1388{
1389 struct address_space *mapping = inode->i_mapping;
1390 pgoff_t start = 0;
1391 struct folio_batch fbatch;
1392 pgoff_t indices[PAGEVEC_SIZE];
1393 int ret = 0;
1394
1395 do {
1396 folio_batch_init(&fbatch);
1397 shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1398 if (folio_batch_count(&fbatch) == 0) {
1399 ret = 0;
1400 break;
1401 }
1402
1403 ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1404 if (ret < 0)
1405 break;
1406
1407 start = indices[folio_batch_count(&fbatch) - 1];
1408 } while (true);
1409
1410 return ret;
1411}
1412
1413/*
1414 * Read all the shared memory data that resides in the swap
1415 * device 'type' back into memory, so the swap device can be
1416 * unused.
1417 */
1418int shmem_unuse(unsigned int type)
1419{
1420 struct shmem_inode_info *info, *next;
1421 int error = 0;
1422
1423 if (list_empty(&shmem_swaplist))
1424 return 0;
1425
1426 mutex_lock(&shmem_swaplist_mutex);
1427 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1428 if (!info->swapped) {
1429 list_del_init(&info->swaplist);
1430 continue;
1431 }
1432 /*
1433 * Drop the swaplist mutex while searching the inode for swap;
1434 * but before doing so, make sure shmem_evict_inode() will not
1435 * remove placeholder inode from swaplist, nor let it be freed
1436 * (igrab() would protect from unlink, but not from unmount).
1437 */
1438 atomic_inc(&info->stop_eviction);
1439 mutex_unlock(&shmem_swaplist_mutex);
1440
1441 error = shmem_unuse_inode(&info->vfs_inode, type);
1442 cond_resched();
1443
1444 mutex_lock(&shmem_swaplist_mutex);
1445 next = list_next_entry(info, swaplist);
1446 if (!info->swapped)
1447 list_del_init(&info->swaplist);
1448 if (atomic_dec_and_test(&info->stop_eviction))
1449 wake_up_var(&info->stop_eviction);
1450 if (error)
1451 break;
1452 }
1453 mutex_unlock(&shmem_swaplist_mutex);
1454
1455 return error;
1456}
1457
1458/*
1459 * Move the page from the page cache to the swap cache.
1460 */
1461static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1462{
1463 struct folio *folio = page_folio(page);
1464 struct address_space *mapping = folio->mapping;
1465 struct inode *inode = mapping->host;
1466 struct shmem_inode_info *info = SHMEM_I(inode);
1467 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1468 swp_entry_t swap;
1469 pgoff_t index;
1470 int nr_pages;
1471 bool split = false;
1472
1473 /*
1474 * Our capabilities prevent regular writeback or sync from ever calling
1475 * shmem_writepage; but a stacking filesystem might use ->writepage of
1476 * its underlying filesystem, in which case tmpfs should write out to
1477 * swap only in response to memory pressure, and not for the writeback
1478 * threads or sync.
1479 */
1480 if (WARN_ON_ONCE(!wbc->for_reclaim))
1481 goto redirty;
1482
1483 if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap))
1484 goto redirty;
1485
1486 if (!total_swap_pages)
1487 goto redirty;
1488
1489 /*
1490 * If CONFIG_THP_SWAP is not enabled, the large folio should be
1491 * split when swapping.
1492 *
1493 * And shrinkage of pages beyond i_size does not split swap, so
1494 * swapout of a large folio crossing i_size needs to split too
1495 * (unless fallocate has been used to preallocate beyond EOF).
1496 */
1497 if (folio_test_large(folio)) {
1498 index = shmem_fallocend(inode,
1499 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
1500 if ((index > folio->index && index < folio_next_index(folio)) ||
1501 !IS_ENABLED(CONFIG_THP_SWAP))
1502 split = true;
1503 }
1504
1505 if (split) {
1506try_split:
1507 /* Ensure the subpages are still dirty */
1508 folio_test_set_dirty(folio);
1509 if (split_huge_page_to_list_to_order(page, wbc->list, 0))
1510 goto redirty;
1511 folio = page_folio(page);
1512 folio_clear_dirty(folio);
1513 }
1514
1515 index = folio->index;
1516 nr_pages = folio_nr_pages(folio);
1517
1518 /*
1519 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1520 * value into swapfile.c, the only way we can correctly account for a
1521 * fallocated folio arriving here is now to initialize it and write it.
1522 *
1523 * That's okay for a folio already fallocated earlier, but if we have
1524 * not yet completed the fallocation, then (a) we want to keep track
1525 * of this folio in case we have to undo it, and (b) it may not be a
1526 * good idea to continue anyway, once we're pushing into swap. So
1527 * reactivate the folio, and let shmem_fallocate() quit when too many.
1528 */
1529 if (!folio_test_uptodate(folio)) {
1530 if (inode->i_private) {
1531 struct shmem_falloc *shmem_falloc;
1532 spin_lock(&inode->i_lock);
1533 shmem_falloc = inode->i_private;
1534 if (shmem_falloc &&
1535 !shmem_falloc->waitq &&
1536 index >= shmem_falloc->start &&
1537 index < shmem_falloc->next)
1538 shmem_falloc->nr_unswapped += nr_pages;
1539 else
1540 shmem_falloc = NULL;
1541 spin_unlock(&inode->i_lock);
1542 if (shmem_falloc)
1543 goto redirty;
1544 }
1545 folio_zero_range(folio, 0, folio_size(folio));
1546 flush_dcache_folio(folio);
1547 folio_mark_uptodate(folio);
1548 }
1549
1550 swap = folio_alloc_swap(folio);
1551 if (!swap.val) {
1552 if (nr_pages > 1)
1553 goto try_split;
1554
1555 goto redirty;
1556 }
1557
1558 /*
1559 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1560 * if it's not already there. Do it now before the folio is
1561 * moved to swap cache, when its pagelock no longer protects
1562 * the inode from eviction. But don't unlock the mutex until
1563 * we've incremented swapped, because shmem_unuse_inode() will
1564 * prune a !swapped inode from the swaplist under this mutex.
1565 */
1566 mutex_lock(&shmem_swaplist_mutex);
1567 if (list_empty(&info->swaplist))
1568 list_add(&info->swaplist, &shmem_swaplist);
1569
1570 if (add_to_swap_cache(folio, swap,
1571 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1572 NULL) == 0) {
1573 shmem_recalc_inode(inode, 0, nr_pages);
1574 swap_shmem_alloc(swap, nr_pages);
1575 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1576
1577 mutex_unlock(&shmem_swaplist_mutex);
1578 BUG_ON(folio_mapped(folio));
1579 return swap_writepage(&folio->page, wbc);
1580 }
1581
1582 mutex_unlock(&shmem_swaplist_mutex);
1583 put_swap_folio(folio, swap);
1584redirty:
1585 folio_mark_dirty(folio);
1586 if (wbc->for_reclaim)
1587 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1588 folio_unlock(folio);
1589 return 0;
1590}
1591
1592#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1593static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1594{
1595 char buffer[64];
1596
1597 if (!mpol || mpol->mode == MPOL_DEFAULT)
1598 return; /* show nothing */
1599
1600 mpol_to_str(buffer, sizeof(buffer), mpol);
1601
1602 seq_printf(seq, ",mpol=%s", buffer);
1603}
1604
1605static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1606{
1607 struct mempolicy *mpol = NULL;
1608 if (sbinfo->mpol) {
1609 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1610 mpol = sbinfo->mpol;
1611 mpol_get(mpol);
1612 raw_spin_unlock(&sbinfo->stat_lock);
1613 }
1614 return mpol;
1615}
1616#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1617static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1618{
1619}
1620static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1621{
1622 return NULL;
1623}
1624#endif /* CONFIG_NUMA && CONFIG_TMPFS */
1625
1626static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
1627 pgoff_t index, unsigned int order, pgoff_t *ilx);
1628
1629static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1630 struct shmem_inode_info *info, pgoff_t index)
1631{
1632 struct mempolicy *mpol;
1633 pgoff_t ilx;
1634 struct folio *folio;
1635
1636 mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1637 folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1638 mpol_cond_put(mpol);
1639
1640 return folio;
1641}
1642
1643/*
1644 * Make sure huge_gfp is always more limited than limit_gfp.
1645 * Some of the flags set permissions, while others set limitations.
1646 */
1647static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1648{
1649 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1650 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1651 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1652 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1653
1654 /* Allow allocations only from the originally specified zones. */
1655 result |= zoneflags;
1656
1657 /*
1658 * Minimize the result gfp by taking the union with the deny flags,
1659 * and the intersection of the allow flags.
1660 */
1661 result |= (limit_gfp & denyflags);
1662 result |= (huge_gfp & limit_gfp) & allowflags;
1663
1664 return result;
1665}
1666
1667#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1668bool shmem_hpage_pmd_enabled(void)
1669{
1670 if (shmem_huge == SHMEM_HUGE_DENY)
1671 return false;
1672 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always))
1673 return true;
1674 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise))
1675 return true;
1676 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size))
1677 return true;
1678 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) &&
1679 shmem_huge != SHMEM_HUGE_NEVER)
1680 return true;
1681
1682 return false;
1683}
1684
1685unsigned long shmem_allowable_huge_orders(struct inode *inode,
1686 struct vm_area_struct *vma, pgoff_t index,
1687 loff_t write_end, bool shmem_huge_force)
1688{
1689 unsigned long mask = READ_ONCE(huge_shmem_orders_always);
1690 unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
1691 unsigned long vm_flags = vma ? vma->vm_flags : 0;
1692 pgoff_t aligned_index;
1693 bool global_huge;
1694 loff_t i_size;
1695 int order;
1696
1697 if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
1698 return 0;
1699
1700 global_huge = shmem_huge_global_enabled(inode, index, write_end,
1701 shmem_huge_force, vm_flags);
1702 if (!vma || !vma_is_anon_shmem(vma)) {
1703 /*
1704 * For tmpfs, we now only support PMD sized THP if huge page
1705 * is enabled, otherwise fallback to order 0.
1706 */
1707 return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
1708 }
1709
1710 /*
1711 * Following the 'deny' semantics of the top level, force the huge
1712 * option off from all mounts.
1713 */
1714 if (shmem_huge == SHMEM_HUGE_DENY)
1715 return 0;
1716
1717 /*
1718 * Only allow inherit orders if the top-level value is 'force', which
1719 * means non-PMD sized THP can not override 'huge' mount option now.
1720 */
1721 if (shmem_huge == SHMEM_HUGE_FORCE)
1722 return READ_ONCE(huge_shmem_orders_inherit);
1723
1724 /* Allow mTHP that will be fully within i_size. */
1725 order = highest_order(within_size_orders);
1726 while (within_size_orders) {
1727 aligned_index = round_up(index + 1, 1 << order);
1728 i_size = round_up(i_size_read(inode), PAGE_SIZE);
1729 if (i_size >> PAGE_SHIFT >= aligned_index) {
1730 mask |= within_size_orders;
1731 break;
1732 }
1733
1734 order = next_order(&within_size_orders, order);
1735 }
1736
1737 if (vm_flags & VM_HUGEPAGE)
1738 mask |= READ_ONCE(huge_shmem_orders_madvise);
1739
1740 if (global_huge)
1741 mask |= READ_ONCE(huge_shmem_orders_inherit);
1742
1743 return THP_ORDERS_ALL_FILE_DEFAULT & mask;
1744}
1745
1746static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1747 struct address_space *mapping, pgoff_t index,
1748 unsigned long orders)
1749{
1750 struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1751 pgoff_t aligned_index;
1752 unsigned long pages;
1753 int order;
1754
1755 if (vma) {
1756 orders = thp_vma_suitable_orders(vma, vmf->address, orders);
1757 if (!orders)
1758 return 0;
1759 }
1760
1761 /* Find the highest order that can add into the page cache */
1762 order = highest_order(orders);
1763 while (orders) {
1764 pages = 1UL << order;
1765 aligned_index = round_down(index, pages);
1766 /*
1767 * Check for conflict before waiting on a huge allocation.
1768 * Conflict might be that a huge page has just been allocated
1769 * and added to page cache by a racing thread, or that there
1770 * is already at least one small page in the huge extent.
1771 * Be careful to retry when appropriate, but not forever!
1772 * Elsewhere -EEXIST would be the right code, but not here.
1773 */
1774 if (!xa_find(&mapping->i_pages, &aligned_index,
1775 aligned_index + pages - 1, XA_PRESENT))
1776 break;
1777 order = next_order(&orders, order);
1778 }
1779
1780 return orders;
1781}
1782#else
1783static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1784 struct address_space *mapping, pgoff_t index,
1785 unsigned long orders)
1786{
1787 return 0;
1788}
1789#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1790
1791static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
1792 struct shmem_inode_info *info, pgoff_t index)
1793{
1794 struct mempolicy *mpol;
1795 pgoff_t ilx;
1796 struct folio *folio;
1797
1798 mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
1799 folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id());
1800 mpol_cond_put(mpol);
1801
1802 return folio;
1803}
1804
1805static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
1806 gfp_t gfp, struct inode *inode, pgoff_t index,
1807 struct mm_struct *fault_mm, unsigned long orders)
1808{
1809 struct address_space *mapping = inode->i_mapping;
1810 struct shmem_inode_info *info = SHMEM_I(inode);
1811 unsigned long suitable_orders = 0;
1812 struct folio *folio = NULL;
1813 long pages;
1814 int error, order;
1815
1816 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1817 orders = 0;
1818
1819 if (orders > 0) {
1820 suitable_orders = shmem_suitable_orders(inode, vmf,
1821 mapping, index, orders);
1822
1823 order = highest_order(suitable_orders);
1824 while (suitable_orders) {
1825 pages = 1UL << order;
1826 index = round_down(index, pages);
1827 folio = shmem_alloc_folio(gfp, order, info, index);
1828 if (folio)
1829 goto allocated;
1830
1831 if (pages == HPAGE_PMD_NR)
1832 count_vm_event(THP_FILE_FALLBACK);
1833 count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
1834 order = next_order(&suitable_orders, order);
1835 }
1836 } else {
1837 pages = 1;
1838 folio = shmem_alloc_folio(gfp, 0, info, index);
1839 }
1840 if (!folio)
1841 return ERR_PTR(-ENOMEM);
1842
1843allocated:
1844 __folio_set_locked(folio);
1845 __folio_set_swapbacked(folio);
1846
1847 gfp &= GFP_RECLAIM_MASK;
1848 error = mem_cgroup_charge(folio, fault_mm, gfp);
1849 if (error) {
1850 if (xa_find(&mapping->i_pages, &index,
1851 index + pages - 1, XA_PRESENT)) {
1852 error = -EEXIST;
1853 } else if (pages > 1) {
1854 if (pages == HPAGE_PMD_NR) {
1855 count_vm_event(THP_FILE_FALLBACK);
1856 count_vm_event(THP_FILE_FALLBACK_CHARGE);
1857 }
1858 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
1859 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
1860 }
1861 goto unlock;
1862 }
1863
1864 error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
1865 if (error)
1866 goto unlock;
1867
1868 error = shmem_inode_acct_blocks(inode, pages);
1869 if (error) {
1870 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1871 long freed;
1872 /*
1873 * Try to reclaim some space by splitting a few
1874 * large folios beyond i_size on the filesystem.
1875 */
1876 shmem_unused_huge_shrink(sbinfo, NULL, pages);
1877 /*
1878 * And do a shmem_recalc_inode() to account for freed pages:
1879 * except our folio is there in cache, so not quite balanced.
1880 */
1881 spin_lock(&info->lock);
1882 freed = pages + info->alloced - info->swapped -
1883 READ_ONCE(mapping->nrpages);
1884 if (freed > 0)
1885 info->alloced -= freed;
1886 spin_unlock(&info->lock);
1887 if (freed > 0)
1888 shmem_inode_unacct_blocks(inode, freed);
1889 error = shmem_inode_acct_blocks(inode, pages);
1890 if (error) {
1891 filemap_remove_folio(folio);
1892 goto unlock;
1893 }
1894 }
1895
1896 shmem_recalc_inode(inode, pages, 0);
1897 folio_add_lru(folio);
1898 return folio;
1899
1900unlock:
1901 folio_unlock(folio);
1902 folio_put(folio);
1903 return ERR_PTR(error);
1904}
1905
1906/*
1907 * When a page is moved from swapcache to shmem filecache (either by the
1908 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
1909 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1910 * ignorance of the mapping it belongs to. If that mapping has special
1911 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1912 * we may need to copy to a suitable page before moving to filecache.
1913 *
1914 * In a future release, this may well be extended to respect cpuset and
1915 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1916 * but for now it is a simple matter of zone.
1917 */
1918static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1919{
1920 return folio_zonenum(folio) > gfp_zone(gfp);
1921}
1922
1923static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1924 struct shmem_inode_info *info, pgoff_t index,
1925 struct vm_area_struct *vma)
1926{
1927 struct folio *new, *old = *foliop;
1928 swp_entry_t entry = old->swap;
1929 struct address_space *swap_mapping = swap_address_space(entry);
1930 pgoff_t swap_index = swap_cache_index(entry);
1931 XA_STATE(xas, &swap_mapping->i_pages, swap_index);
1932 int nr_pages = folio_nr_pages(old);
1933 int error = 0, i;
1934
1935 /*
1936 * We have arrived here because our zones are constrained, so don't
1937 * limit chance of success by further cpuset and node constraints.
1938 */
1939 gfp &= ~GFP_CONSTRAINT_MASK;
1940#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1941 if (nr_pages > 1) {
1942 gfp_t huge_gfp = vma_thp_gfp_mask(vma);
1943
1944 gfp = limit_gfp_mask(huge_gfp, gfp);
1945 }
1946#endif
1947
1948 new = shmem_alloc_folio(gfp, folio_order(old), info, index);
1949 if (!new)
1950 return -ENOMEM;
1951
1952 folio_ref_add(new, nr_pages);
1953 folio_copy(new, old);
1954 flush_dcache_folio(new);
1955
1956 __folio_set_locked(new);
1957 __folio_set_swapbacked(new);
1958 folio_mark_uptodate(new);
1959 new->swap = entry;
1960 folio_set_swapcache(new);
1961
1962 /* Swap cache still stores N entries instead of a high-order entry */
1963 xa_lock_irq(&swap_mapping->i_pages);
1964 for (i = 0; i < nr_pages; i++) {
1965 void *item = xas_load(&xas);
1966
1967 if (item != old) {
1968 error = -ENOENT;
1969 break;
1970 }
1971
1972 xas_store(&xas, new);
1973 xas_next(&xas);
1974 }
1975 if (!error) {
1976 mem_cgroup_replace_folio(old, new);
1977 shmem_update_stats(new, nr_pages);
1978 shmem_update_stats(old, -nr_pages);
1979 }
1980 xa_unlock_irq(&swap_mapping->i_pages);
1981
1982 if (unlikely(error)) {
1983 /*
1984 * Is this possible? I think not, now that our callers
1985 * check both the swapcache flag and folio->private
1986 * after getting the folio lock; but be defensive.
1987 * Reverse old to newpage for clear and free.
1988 */
1989 old = new;
1990 } else {
1991 folio_add_lru(new);
1992 *foliop = new;
1993 }
1994
1995 folio_clear_swapcache(old);
1996 old->private = NULL;
1997
1998 folio_unlock(old);
1999 /*
2000 * The old folio are removed from swap cache, drop the 'nr_pages'
2001 * reference, as well as one temporary reference getting from swap
2002 * cache.
2003 */
2004 folio_put_refs(old, nr_pages + 1);
2005 return error;
2006}
2007
2008static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
2009 struct folio *folio, swp_entry_t swap)
2010{
2011 struct address_space *mapping = inode->i_mapping;
2012 swp_entry_t swapin_error;
2013 void *old;
2014 int nr_pages;
2015
2016 swapin_error = make_poisoned_swp_entry();
2017 old = xa_cmpxchg_irq(&mapping->i_pages, index,
2018 swp_to_radix_entry(swap),
2019 swp_to_radix_entry(swapin_error), 0);
2020 if (old != swp_to_radix_entry(swap))
2021 return;
2022
2023 nr_pages = folio_nr_pages(folio);
2024 folio_wait_writeback(folio);
2025 delete_from_swap_cache(folio);
2026 /*
2027 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
2028 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
2029 * in shmem_evict_inode().
2030 */
2031 shmem_recalc_inode(inode, -nr_pages, -nr_pages);
2032 swap_free_nr(swap, nr_pages);
2033}
2034
2035static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
2036 swp_entry_t swap, gfp_t gfp)
2037{
2038 struct address_space *mapping = inode->i_mapping;
2039 XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
2040 void *alloced_shadow = NULL;
2041 int alloced_order = 0, i;
2042
2043 /* Convert user data gfp flags to xarray node gfp flags */
2044 gfp &= GFP_RECLAIM_MASK;
2045
2046 for (;;) {
2047 int order = -1, split_order = 0;
2048 void *old = NULL;
2049
2050 xas_lock_irq(&xas);
2051 old = xas_load(&xas);
2052 if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
2053 xas_set_err(&xas, -EEXIST);
2054 goto unlock;
2055 }
2056
2057 order = xas_get_order(&xas);
2058
2059 /* Swap entry may have changed before we re-acquire the lock */
2060 if (alloced_order &&
2061 (old != alloced_shadow || order != alloced_order)) {
2062 xas_destroy(&xas);
2063 alloced_order = 0;
2064 }
2065
2066 /* Try to split large swap entry in pagecache */
2067 if (order > 0) {
2068 if (!alloced_order) {
2069 split_order = order;
2070 goto unlock;
2071 }
2072 xas_split(&xas, old, order);
2073
2074 /*
2075 * Re-set the swap entry after splitting, and the swap
2076 * offset of the original large entry must be continuous.
2077 */
2078 for (i = 0; i < 1 << order; i++) {
2079 pgoff_t aligned_index = round_down(index, 1 << order);
2080 swp_entry_t tmp;
2081
2082 tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
2083 __xa_store(&mapping->i_pages, aligned_index + i,
2084 swp_to_radix_entry(tmp), 0);
2085 }
2086 }
2087
2088unlock:
2089 xas_unlock_irq(&xas);
2090
2091 /* split needed, alloc here and retry. */
2092 if (split_order) {
2093 xas_split_alloc(&xas, old, split_order, gfp);
2094 if (xas_error(&xas))
2095 goto error;
2096 alloced_shadow = old;
2097 alloced_order = split_order;
2098 xas_reset(&xas);
2099 continue;
2100 }
2101
2102 if (!xas_nomem(&xas, gfp))
2103 break;
2104 }
2105
2106error:
2107 if (xas_error(&xas))
2108 return xas_error(&xas);
2109
2110 return alloced_order;
2111}
2112
2113/*
2114 * Swap in the folio pointed to by *foliop.
2115 * Caller has to make sure that *foliop contains a valid swapped folio.
2116 * Returns 0 and the folio in foliop if success. On failure, returns the
2117 * error code and NULL in *foliop.
2118 */
2119static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
2120 struct folio **foliop, enum sgp_type sgp,
2121 gfp_t gfp, struct vm_area_struct *vma,
2122 vm_fault_t *fault_type)
2123{
2124 struct address_space *mapping = inode->i_mapping;
2125 struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
2126 struct shmem_inode_info *info = SHMEM_I(inode);
2127 struct swap_info_struct *si;
2128 struct folio *folio = NULL;
2129 swp_entry_t swap;
2130 int error, nr_pages;
2131
2132 VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
2133 swap = radix_to_swp_entry(*foliop);
2134 *foliop = NULL;
2135
2136 if (is_poisoned_swp_entry(swap))
2137 return -EIO;
2138
2139 si = get_swap_device(swap);
2140 if (!si) {
2141 if (!shmem_confirm_swap(mapping, index, swap))
2142 return -EEXIST;
2143 else
2144 return -EINVAL;
2145 }
2146
2147 /* Look it up and read it in.. */
2148 folio = swap_cache_get_folio(swap, NULL, 0);
2149 if (!folio) {
2150 int split_order;
2151
2152 /* Or update major stats only when swapin succeeds?? */
2153 if (fault_type) {
2154 *fault_type |= VM_FAULT_MAJOR;
2155 count_vm_event(PGMAJFAULT);
2156 count_memcg_event_mm(fault_mm, PGMAJFAULT);
2157 }
2158
2159 /*
2160 * Now swap device can only swap in order 0 folio, then we
2161 * should split the large swap entry stored in the pagecache
2162 * if necessary.
2163 */
2164 split_order = shmem_split_large_entry(inode, index, swap, gfp);
2165 if (split_order < 0) {
2166 error = split_order;
2167 goto failed;
2168 }
2169
2170 /*
2171 * If the large swap entry has already been split, it is
2172 * necessary to recalculate the new swap entry based on
2173 * the old order alignment.
2174 */
2175 if (split_order > 0) {
2176 pgoff_t offset = index - round_down(index, 1 << split_order);
2177
2178 swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
2179 }
2180
2181 /* Here we actually start the io */
2182 folio = shmem_swapin_cluster(swap, gfp, info, index);
2183 if (!folio) {
2184 error = -ENOMEM;
2185 goto failed;
2186 }
2187 }
2188
2189 /* We have to do this with folio locked to prevent races */
2190 folio_lock(folio);
2191 if (!folio_test_swapcache(folio) ||
2192 folio->swap.val != swap.val ||
2193 !shmem_confirm_swap(mapping, index, swap)) {
2194 error = -EEXIST;
2195 goto unlock;
2196 }
2197 if (!folio_test_uptodate(folio)) {
2198 error = -EIO;
2199 goto failed;
2200 }
2201 folio_wait_writeback(folio);
2202 nr_pages = folio_nr_pages(folio);
2203
2204 /*
2205 * Some architectures may have to restore extra metadata to the
2206 * folio after reading from swap.
2207 */
2208 arch_swap_restore(folio_swap(swap, folio), folio);
2209
2210 if (shmem_should_replace_folio(folio, gfp)) {
2211 error = shmem_replace_folio(&folio, gfp, info, index, vma);
2212 if (error)
2213 goto failed;
2214 }
2215
2216 error = shmem_add_to_page_cache(folio, mapping,
2217 round_down(index, nr_pages),
2218 swp_to_radix_entry(swap), gfp);
2219 if (error)
2220 goto failed;
2221
2222 shmem_recalc_inode(inode, 0, -nr_pages);
2223
2224 if (sgp == SGP_WRITE)
2225 folio_mark_accessed(folio);
2226
2227 delete_from_swap_cache(folio);
2228 folio_mark_dirty(folio);
2229 swap_free_nr(swap, nr_pages);
2230 put_swap_device(si);
2231
2232 *foliop = folio;
2233 return 0;
2234failed:
2235 if (!shmem_confirm_swap(mapping, index, swap))
2236 error = -EEXIST;
2237 if (error == -EIO)
2238 shmem_set_folio_swapin_error(inode, index, folio, swap);
2239unlock:
2240 if (folio) {
2241 folio_unlock(folio);
2242 folio_put(folio);
2243 }
2244 put_swap_device(si);
2245
2246 return error;
2247}
2248
2249/*
2250 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
2251 *
2252 * If we allocate a new one we do not mark it dirty. That's up to the
2253 * vm. If we swap it in we mark it dirty since we also free the swap
2254 * entry since a page cannot live in both the swap and page cache.
2255 *
2256 * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
2257 */
2258static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
2259 loff_t write_end, struct folio **foliop, enum sgp_type sgp,
2260 gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type)
2261{
2262 struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
2263 struct mm_struct *fault_mm;
2264 struct folio *folio;
2265 int error;
2266 bool alloced;
2267 unsigned long orders = 0;
2268
2269 if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
2270 return -EINVAL;
2271
2272 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
2273 return -EFBIG;
2274repeat:
2275 if (sgp <= SGP_CACHE &&
2276 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
2277 return -EINVAL;
2278
2279 alloced = false;
2280 fault_mm = vma ? vma->vm_mm : NULL;
2281
2282 folio = filemap_get_entry(inode->i_mapping, index);
2283 if (folio && vma && userfaultfd_minor(vma)) {
2284 if (!xa_is_value(folio))
2285 folio_put(folio);
2286 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
2287 return 0;
2288 }
2289
2290 if (xa_is_value(folio)) {
2291 error = shmem_swapin_folio(inode, index, &folio,
2292 sgp, gfp, vma, fault_type);
2293 if (error == -EEXIST)
2294 goto repeat;
2295
2296 *foliop = folio;
2297 return error;
2298 }
2299
2300 if (folio) {
2301 folio_lock(folio);
2302
2303 /* Has the folio been truncated or swapped out? */
2304 if (unlikely(folio->mapping != inode->i_mapping)) {
2305 folio_unlock(folio);
2306 folio_put(folio);
2307 goto repeat;
2308 }
2309 if (sgp == SGP_WRITE)
2310 folio_mark_accessed(folio);
2311 if (folio_test_uptodate(folio))
2312 goto out;
2313 /* fallocated folio */
2314 if (sgp != SGP_READ)
2315 goto clear;
2316 folio_unlock(folio);
2317 folio_put(folio);
2318 }
2319
2320 /*
2321 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2322 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2323 */
2324 *foliop = NULL;
2325 if (sgp == SGP_READ)
2326 return 0;
2327 if (sgp == SGP_NOALLOC)
2328 return -ENOENT;
2329
2330 /*
2331 * Fast cache lookup and swap lookup did not find it: allocate.
2332 */
2333
2334 if (vma && userfaultfd_missing(vma)) {
2335 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2336 return 0;
2337 }
2338
2339 /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
2340 orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false);
2341 if (orders > 0) {
2342 gfp_t huge_gfp;
2343
2344 huge_gfp = vma_thp_gfp_mask(vma);
2345 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2346 folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
2347 inode, index, fault_mm, orders);
2348 if (!IS_ERR(folio)) {
2349 if (folio_test_pmd_mappable(folio))
2350 count_vm_event(THP_FILE_ALLOC);
2351 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
2352 goto alloced;
2353 }
2354 if (PTR_ERR(folio) == -EEXIST)
2355 goto repeat;
2356 }
2357
2358 folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0);
2359 if (IS_ERR(folio)) {
2360 error = PTR_ERR(folio);
2361 if (error == -EEXIST)
2362 goto repeat;
2363 folio = NULL;
2364 goto unlock;
2365 }
2366
2367alloced:
2368 alloced = true;
2369 if (folio_test_large(folio) &&
2370 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2371 folio_next_index(folio)) {
2372 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2373 struct shmem_inode_info *info = SHMEM_I(inode);
2374 /*
2375 * Part of the large folio is beyond i_size: subject
2376 * to shrink under memory pressure.
2377 */
2378 spin_lock(&sbinfo->shrinklist_lock);
2379 /*
2380 * _careful to defend against unlocked access to
2381 * ->shrink_list in shmem_unused_huge_shrink()
2382 */
2383 if (list_empty_careful(&info->shrinklist)) {
2384 list_add_tail(&info->shrinklist,
2385 &sbinfo->shrinklist);
2386 sbinfo->shrinklist_len++;
2387 }
2388 spin_unlock(&sbinfo->shrinklist_lock);
2389 }
2390
2391 if (sgp == SGP_WRITE)
2392 folio_set_referenced(folio);
2393 /*
2394 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2395 */
2396 if (sgp == SGP_FALLOC)
2397 sgp = SGP_WRITE;
2398clear:
2399 /*
2400 * Let SGP_WRITE caller clear ends if write does not fill folio;
2401 * but SGP_FALLOC on a folio fallocated earlier must initialize
2402 * it now, lest undo on failure cancel our earlier guarantee.
2403 */
2404 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2405 long i, n = folio_nr_pages(folio);
2406
2407 for (i = 0; i < n; i++)
2408 clear_highpage(folio_page(folio, i));
2409 flush_dcache_folio(folio);
2410 folio_mark_uptodate(folio);
2411 }
2412
2413 /* Perhaps the file has been truncated since we checked */
2414 if (sgp <= SGP_CACHE &&
2415 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2416 error = -EINVAL;
2417 goto unlock;
2418 }
2419out:
2420 *foliop = folio;
2421 return 0;
2422
2423 /*
2424 * Error recovery.
2425 */
2426unlock:
2427 if (alloced)
2428 filemap_remove_folio(folio);
2429 shmem_recalc_inode(inode, 0, 0);
2430 if (folio) {
2431 folio_unlock(folio);
2432 folio_put(folio);
2433 }
2434 return error;
2435}
2436
2437/**
2438 * shmem_get_folio - find, and lock a shmem folio.
2439 * @inode: inode to search
2440 * @index: the page index.
2441 * @write_end: end of a write, could extend inode size
2442 * @foliop: pointer to the folio if found
2443 * @sgp: SGP_* flags to control behavior
2444 *
2445 * Looks up the page cache entry at @inode & @index. If a folio is
2446 * present, it is returned locked with an increased refcount.
2447 *
2448 * If the caller modifies data in the folio, it must call folio_mark_dirty()
2449 * before unlocking the folio to ensure that the folio is not reclaimed.
2450 * There is no need to reserve space before calling folio_mark_dirty().
2451 *
2452 * When no folio is found, the behavior depends on @sgp:
2453 * - for SGP_READ, *@foliop is %NULL and 0 is returned
2454 * - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned
2455 * - for all other flags a new folio is allocated, inserted into the
2456 * page cache and returned locked in @foliop.
2457 *
2458 * Context: May sleep.
2459 * Return: 0 if successful, else a negative error code.
2460 */
2461int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
2462 struct folio **foliop, enum sgp_type sgp)
2463{
2464 return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp,
2465 mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2466}
2467EXPORT_SYMBOL_GPL(shmem_get_folio);
2468
2469/*
2470 * This is like autoremove_wake_function, but it removes the wait queue
2471 * entry unconditionally - even if something else had already woken the
2472 * target.
2473 */
2474static int synchronous_wake_function(wait_queue_entry_t *wait,
2475 unsigned int mode, int sync, void *key)
2476{
2477 int ret = default_wake_function(wait, mode, sync, key);
2478 list_del_init(&wait->entry);
2479 return ret;
2480}
2481
2482/*
2483 * Trinity finds that probing a hole which tmpfs is punching can
2484 * prevent the hole-punch from ever completing: which in turn
2485 * locks writers out with its hold on i_rwsem. So refrain from
2486 * faulting pages into the hole while it's being punched. Although
2487 * shmem_undo_range() does remove the additions, it may be unable to
2488 * keep up, as each new page needs its own unmap_mapping_range() call,
2489 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2490 *
2491 * It does not matter if we sometimes reach this check just before the
2492 * hole-punch begins, so that one fault then races with the punch:
2493 * we just need to make racing faults a rare case.
2494 *
2495 * The implementation below would be much simpler if we just used a
2496 * standard mutex or completion: but we cannot take i_rwsem in fault,
2497 * and bloating every shmem inode for this unlikely case would be sad.
2498 */
2499static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2500{
2501 struct shmem_falloc *shmem_falloc;
2502 struct file *fpin = NULL;
2503 vm_fault_t ret = 0;
2504
2505 spin_lock(&inode->i_lock);
2506 shmem_falloc = inode->i_private;
2507 if (shmem_falloc &&
2508 shmem_falloc->waitq &&
2509 vmf->pgoff >= shmem_falloc->start &&
2510 vmf->pgoff < shmem_falloc->next) {
2511 wait_queue_head_t *shmem_falloc_waitq;
2512 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2513
2514 ret = VM_FAULT_NOPAGE;
2515 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2516 shmem_falloc_waitq = shmem_falloc->waitq;
2517 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2518 TASK_UNINTERRUPTIBLE);
2519 spin_unlock(&inode->i_lock);
2520 schedule();
2521
2522 /*
2523 * shmem_falloc_waitq points into the shmem_fallocate()
2524 * stack of the hole-punching task: shmem_falloc_waitq
2525 * is usually invalid by the time we reach here, but
2526 * finish_wait() does not dereference it in that case;
2527 * though i_lock needed lest racing with wake_up_all().
2528 */
2529 spin_lock(&inode->i_lock);
2530 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2531 }
2532 spin_unlock(&inode->i_lock);
2533 if (fpin) {
2534 fput(fpin);
2535 ret = VM_FAULT_RETRY;
2536 }
2537 return ret;
2538}
2539
2540static vm_fault_t shmem_fault(struct vm_fault *vmf)
2541{
2542 struct inode *inode = file_inode(vmf->vma->vm_file);
2543 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2544 struct folio *folio = NULL;
2545 vm_fault_t ret = 0;
2546 int err;
2547
2548 /*
2549 * Trinity finds that probing a hole which tmpfs is punching can
2550 * prevent the hole-punch from ever completing: noted in i_private.
2551 */
2552 if (unlikely(inode->i_private)) {
2553 ret = shmem_falloc_wait(vmf, inode);
2554 if (ret)
2555 return ret;
2556 }
2557
2558 WARN_ON_ONCE(vmf->page != NULL);
2559 err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE,
2560 gfp, vmf, &ret);
2561 if (err)
2562 return vmf_error(err);
2563 if (folio) {
2564 vmf->page = folio_file_page(folio, vmf->pgoff);
2565 ret |= VM_FAULT_LOCKED;
2566 }
2567 return ret;
2568}
2569
2570unsigned long shmem_get_unmapped_area(struct file *file,
2571 unsigned long uaddr, unsigned long len,
2572 unsigned long pgoff, unsigned long flags)
2573{
2574 unsigned long addr;
2575 unsigned long offset;
2576 unsigned long inflated_len;
2577 unsigned long inflated_addr;
2578 unsigned long inflated_offset;
2579 unsigned long hpage_size;
2580
2581 if (len > TASK_SIZE)
2582 return -ENOMEM;
2583
2584 addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff,
2585 flags);
2586
2587 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2588 return addr;
2589 if (IS_ERR_VALUE(addr))
2590 return addr;
2591 if (addr & ~PAGE_MASK)
2592 return addr;
2593 if (addr > TASK_SIZE - len)
2594 return addr;
2595
2596 if (shmem_huge == SHMEM_HUGE_DENY)
2597 return addr;
2598 if (flags & MAP_FIXED)
2599 return addr;
2600 /*
2601 * Our priority is to support MAP_SHARED mapped hugely;
2602 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2603 * But if caller specified an address hint and we allocated area there
2604 * successfully, respect that as before.
2605 */
2606 if (uaddr == addr)
2607 return addr;
2608
2609 hpage_size = HPAGE_PMD_SIZE;
2610 if (shmem_huge != SHMEM_HUGE_FORCE) {
2611 struct super_block *sb;
2612 unsigned long __maybe_unused hpage_orders;
2613 int order = 0;
2614
2615 if (file) {
2616 VM_BUG_ON(file->f_op != &shmem_file_operations);
2617 sb = file_inode(file)->i_sb;
2618 } else {
2619 /*
2620 * Called directly from mm/mmap.c, or drivers/char/mem.c
2621 * for "/dev/zero", to create a shared anonymous object.
2622 */
2623 if (IS_ERR(shm_mnt))
2624 return addr;
2625 sb = shm_mnt->mnt_sb;
2626
2627 /*
2628 * Find the highest mTHP order used for anonymous shmem to
2629 * provide a suitable alignment address.
2630 */
2631#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2632 hpage_orders = READ_ONCE(huge_shmem_orders_always);
2633 hpage_orders |= READ_ONCE(huge_shmem_orders_within_size);
2634 hpage_orders |= READ_ONCE(huge_shmem_orders_madvise);
2635 if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
2636 hpage_orders |= READ_ONCE(huge_shmem_orders_inherit);
2637
2638 if (hpage_orders > 0) {
2639 order = highest_order(hpage_orders);
2640 hpage_size = PAGE_SIZE << order;
2641 }
2642#endif
2643 }
2644 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order)
2645 return addr;
2646 }
2647
2648 if (len < hpage_size)
2649 return addr;
2650
2651 offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1);
2652 if (offset && offset + len < 2 * hpage_size)
2653 return addr;
2654 if ((addr & (hpage_size - 1)) == offset)
2655 return addr;
2656
2657 inflated_len = len + hpage_size - PAGE_SIZE;
2658 if (inflated_len > TASK_SIZE)
2659 return addr;
2660 if (inflated_len < len)
2661 return addr;
2662
2663 inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr,
2664 inflated_len, 0, flags);
2665 if (IS_ERR_VALUE(inflated_addr))
2666 return addr;
2667 if (inflated_addr & ~PAGE_MASK)
2668 return addr;
2669
2670 inflated_offset = inflated_addr & (hpage_size - 1);
2671 inflated_addr += offset - inflated_offset;
2672 if (inflated_offset > offset)
2673 inflated_addr += hpage_size;
2674
2675 if (inflated_addr > TASK_SIZE - len)
2676 return addr;
2677 return inflated_addr;
2678}
2679
2680#ifdef CONFIG_NUMA
2681static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2682{
2683 struct inode *inode = file_inode(vma->vm_file);
2684 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2685}
2686
2687static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2688 unsigned long addr, pgoff_t *ilx)
2689{
2690 struct inode *inode = file_inode(vma->vm_file);
2691 pgoff_t index;
2692
2693 /*
2694 * Bias interleave by inode number to distribute better across nodes;
2695 * but this interface is independent of which page order is used, so
2696 * supplies only that bias, letting caller apply the offset (adjusted
2697 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()).
2698 */
2699 *ilx = inode->i_ino;
2700 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2701 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2702}
2703
2704static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2705 pgoff_t index, unsigned int order, pgoff_t *ilx)
2706{
2707 struct mempolicy *mpol;
2708
2709 /* Bias interleave by inode number to distribute better across nodes */
2710 *ilx = info->vfs_inode.i_ino + (index >> order);
2711
2712 mpol = mpol_shared_policy_lookup(&info->policy, index);
2713 return mpol ? mpol : get_task_policy(current);
2714}
2715#else
2716static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2717 pgoff_t index, unsigned int order, pgoff_t *ilx)
2718{
2719 *ilx = 0;
2720 return NULL;
2721}
2722#endif /* CONFIG_NUMA */
2723
2724int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2725{
2726 struct inode *inode = file_inode(file);
2727 struct shmem_inode_info *info = SHMEM_I(inode);
2728 int retval = -ENOMEM;
2729
2730 /*
2731 * What serializes the accesses to info->flags?
2732 * ipc_lock_object() when called from shmctl_do_lock(),
2733 * no serialization needed when called from shm_destroy().
2734 */
2735 if (lock && !(info->flags & VM_LOCKED)) {
2736 if (!user_shm_lock(inode->i_size, ucounts))
2737 goto out_nomem;
2738 info->flags |= VM_LOCKED;
2739 mapping_set_unevictable(file->f_mapping);
2740 }
2741 if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2742 user_shm_unlock(inode->i_size, ucounts);
2743 info->flags &= ~VM_LOCKED;
2744 mapping_clear_unevictable(file->f_mapping);
2745 }
2746 retval = 0;
2747
2748out_nomem:
2749 return retval;
2750}
2751
2752static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2753{
2754 struct inode *inode = file_inode(file);
2755 struct shmem_inode_info *info = SHMEM_I(inode);
2756 int ret;
2757
2758 ret = seal_check_write(info->seals, vma);
2759 if (ret)
2760 return ret;
2761
2762 file_accessed(file);
2763 /* This is anonymous shared memory if it is unlinked at the time of mmap */
2764 if (inode->i_nlink)
2765 vma->vm_ops = &shmem_vm_ops;
2766 else
2767 vma->vm_ops = &shmem_anon_vm_ops;
2768 return 0;
2769}
2770
2771static int shmem_file_open(struct inode *inode, struct file *file)
2772{
2773 file->f_mode |= FMODE_CAN_ODIRECT;
2774 return generic_file_open(inode, file);
2775}
2776
2777#ifdef CONFIG_TMPFS_XATTR
2778static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2779
2780#if IS_ENABLED(CONFIG_UNICODE)
2781/*
2782 * shmem_inode_casefold_flags - Deal with casefold file attribute flag
2783 *
2784 * The casefold file attribute needs some special checks. I can just be added to
2785 * an empty dir, and can't be removed from a non-empty dir.
2786 */
2787static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
2788 struct dentry *dentry, unsigned int *i_flags)
2789{
2790 unsigned int old = inode->i_flags;
2791 struct super_block *sb = inode->i_sb;
2792
2793 if (fsflags & FS_CASEFOLD_FL) {
2794 if (!(old & S_CASEFOLD)) {
2795 if (!sb->s_encoding)
2796 return -EOPNOTSUPP;
2797
2798 if (!S_ISDIR(inode->i_mode))
2799 return -ENOTDIR;
2800
2801 if (dentry && !simple_empty(dentry))
2802 return -ENOTEMPTY;
2803 }
2804
2805 *i_flags = *i_flags | S_CASEFOLD;
2806 } else if (old & S_CASEFOLD) {
2807 if (dentry && !simple_empty(dentry))
2808 return -ENOTEMPTY;
2809 }
2810
2811 return 0;
2812}
2813#else
2814static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
2815 struct dentry *dentry, unsigned int *i_flags)
2816{
2817 if (fsflags & FS_CASEFOLD_FL)
2818 return -EOPNOTSUPP;
2819
2820 return 0;
2821}
2822#endif
2823
2824/*
2825 * chattr's fsflags are unrelated to extended attributes,
2826 * but tmpfs has chosen to enable them under the same config option.
2827 */
2828static int shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
2829{
2830 unsigned int i_flags = 0;
2831 int ret;
2832
2833 ret = shmem_inode_casefold_flags(inode, fsflags, dentry, &i_flags);
2834 if (ret)
2835 return ret;
2836
2837 if (fsflags & FS_NOATIME_FL)
2838 i_flags |= S_NOATIME;
2839 if (fsflags & FS_APPEND_FL)
2840 i_flags |= S_APPEND;
2841 if (fsflags & FS_IMMUTABLE_FL)
2842 i_flags |= S_IMMUTABLE;
2843 /*
2844 * But FS_NODUMP_FL does not require any action in i_flags.
2845 */
2846 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE | S_CASEFOLD);
2847
2848 return 0;
2849}
2850#else
2851static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
2852{
2853}
2854#define shmem_initxattrs NULL
2855#endif
2856
2857static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
2858{
2859 return &SHMEM_I(inode)->dir_offsets;
2860}
2861
2862static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
2863 struct super_block *sb,
2864 struct inode *dir, umode_t mode,
2865 dev_t dev, unsigned long flags)
2866{
2867 struct inode *inode;
2868 struct shmem_inode_info *info;
2869 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2870 ino_t ino;
2871 int err;
2872
2873 err = shmem_reserve_inode(sb, &ino);
2874 if (err)
2875 return ERR_PTR(err);
2876
2877 inode = new_inode(sb);
2878 if (!inode) {
2879 shmem_free_inode(sb, 0);
2880 return ERR_PTR(-ENOSPC);
2881 }
2882
2883 inode->i_ino = ino;
2884 inode_init_owner(idmap, inode, dir, mode);
2885 inode->i_blocks = 0;
2886 simple_inode_init_ts(inode);
2887 inode->i_generation = get_random_u32();
2888 info = SHMEM_I(inode);
2889 memset(info, 0, (char *)inode - (char *)info);
2890 spin_lock_init(&info->lock);
2891 atomic_set(&info->stop_eviction, 0);
2892 info->seals = F_SEAL_SEAL;
2893 info->flags = flags & VM_NORESERVE;
2894 info->i_crtime = inode_get_mtime(inode);
2895 info->fsflags = (dir == NULL) ? 0 :
2896 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2897 if (info->fsflags)
2898 shmem_set_inode_flags(inode, info->fsflags, NULL);
2899 INIT_LIST_HEAD(&info->shrinklist);
2900 INIT_LIST_HEAD(&info->swaplist);
2901 simple_xattrs_init(&info->xattrs);
2902 cache_no_acl(inode);
2903 if (sbinfo->noswap)
2904 mapping_set_unevictable(inode->i_mapping);
2905
2906 /* Don't consider 'deny' for emergencies and 'force' for testing */
2907 if (sbinfo->huge)
2908 mapping_set_large_folios(inode->i_mapping);
2909
2910 switch (mode & S_IFMT) {
2911 default:
2912 inode->i_op = &shmem_special_inode_operations;
2913 init_special_inode(inode, mode, dev);
2914 break;
2915 case S_IFREG:
2916 inode->i_mapping->a_ops = &shmem_aops;
2917 inode->i_op = &shmem_inode_operations;
2918 inode->i_fop = &shmem_file_operations;
2919 mpol_shared_policy_init(&info->policy,
2920 shmem_get_sbmpol(sbinfo));
2921 break;
2922 case S_IFDIR:
2923 inc_nlink(inode);
2924 /* Some things misbehave if size == 0 on a directory */
2925 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2926 inode->i_op = &shmem_dir_inode_operations;
2927 inode->i_fop = &simple_offset_dir_operations;
2928 simple_offset_init(shmem_get_offset_ctx(inode));
2929 break;
2930 case S_IFLNK:
2931 /*
2932 * Must not load anything in the rbtree,
2933 * mpol_free_shared_policy will not be called.
2934 */
2935 mpol_shared_policy_init(&info->policy, NULL);
2936 break;
2937 }
2938
2939 lockdep_annotate_inode_mutex_key(inode);
2940 return inode;
2941}
2942
2943#ifdef CONFIG_TMPFS_QUOTA
2944static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2945 struct super_block *sb, struct inode *dir,
2946 umode_t mode, dev_t dev, unsigned long flags)
2947{
2948 int err;
2949 struct inode *inode;
2950
2951 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2952 if (IS_ERR(inode))
2953 return inode;
2954
2955 err = dquot_initialize(inode);
2956 if (err)
2957 goto errout;
2958
2959 err = dquot_alloc_inode(inode);
2960 if (err) {
2961 dquot_drop(inode);
2962 goto errout;
2963 }
2964 return inode;
2965
2966errout:
2967 inode->i_flags |= S_NOQUOTA;
2968 iput(inode);
2969 return ERR_PTR(err);
2970}
2971#else
2972static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2973 struct super_block *sb, struct inode *dir,
2974 umode_t mode, dev_t dev, unsigned long flags)
2975{
2976 return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2977}
2978#endif /* CONFIG_TMPFS_QUOTA */
2979
2980#ifdef CONFIG_USERFAULTFD
2981int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
2982 struct vm_area_struct *dst_vma,
2983 unsigned long dst_addr,
2984 unsigned long src_addr,
2985 uffd_flags_t flags,
2986 struct folio **foliop)
2987{
2988 struct inode *inode = file_inode(dst_vma->vm_file);
2989 struct shmem_inode_info *info = SHMEM_I(inode);
2990 struct address_space *mapping = inode->i_mapping;
2991 gfp_t gfp = mapping_gfp_mask(mapping);
2992 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2993 void *page_kaddr;
2994 struct folio *folio;
2995 int ret;
2996 pgoff_t max_off;
2997
2998 if (shmem_inode_acct_blocks(inode, 1)) {
2999 /*
3000 * We may have got a page, returned -ENOENT triggering a retry,
3001 * and now we find ourselves with -ENOMEM. Release the page, to
3002 * avoid a BUG_ON in our caller.
3003 */
3004 if (unlikely(*foliop)) {
3005 folio_put(*foliop);
3006 *foliop = NULL;
3007 }
3008 return -ENOMEM;
3009 }
3010
3011 if (!*foliop) {
3012 ret = -ENOMEM;
3013 folio = shmem_alloc_folio(gfp, 0, info, pgoff);
3014 if (!folio)
3015 goto out_unacct_blocks;
3016
3017 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
3018 page_kaddr = kmap_local_folio(folio, 0);
3019 /*
3020 * The read mmap_lock is held here. Despite the
3021 * mmap_lock being read recursive a deadlock is still
3022 * possible if a writer has taken a lock. For example:
3023 *
3024 * process A thread 1 takes read lock on own mmap_lock
3025 * process A thread 2 calls mmap, blocks taking write lock
3026 * process B thread 1 takes page fault, read lock on own mmap lock
3027 * process B thread 2 calls mmap, blocks taking write lock
3028 * process A thread 1 blocks taking read lock on process B
3029 * process B thread 1 blocks taking read lock on process A
3030 *
3031 * Disable page faults to prevent potential deadlock
3032 * and retry the copy outside the mmap_lock.
3033 */
3034 pagefault_disable();
3035 ret = copy_from_user(page_kaddr,
3036 (const void __user *)src_addr,
3037 PAGE_SIZE);
3038 pagefault_enable();
3039 kunmap_local(page_kaddr);
3040
3041 /* fallback to copy_from_user outside mmap_lock */
3042 if (unlikely(ret)) {
3043 *foliop = folio;
3044 ret = -ENOENT;
3045 /* don't free the page */
3046 goto out_unacct_blocks;
3047 }
3048
3049 flush_dcache_folio(folio);
3050 } else { /* ZEROPAGE */
3051 clear_user_highpage(&folio->page, dst_addr);
3052 }
3053 } else {
3054 folio = *foliop;
3055 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
3056 *foliop = NULL;
3057 }
3058
3059 VM_BUG_ON(folio_test_locked(folio));
3060 VM_BUG_ON(folio_test_swapbacked(folio));
3061 __folio_set_locked(folio);
3062 __folio_set_swapbacked(folio);
3063 __folio_mark_uptodate(folio);
3064
3065 ret = -EFAULT;
3066 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3067 if (unlikely(pgoff >= max_off))
3068 goto out_release;
3069
3070 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
3071 if (ret)
3072 goto out_release;
3073 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
3074 if (ret)
3075 goto out_release;
3076
3077 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
3078 &folio->page, true, flags);
3079 if (ret)
3080 goto out_delete_from_cache;
3081
3082 shmem_recalc_inode(inode, 1, 0);
3083 folio_unlock(folio);
3084 return 0;
3085out_delete_from_cache:
3086 filemap_remove_folio(folio);
3087out_release:
3088 folio_unlock(folio);
3089 folio_put(folio);
3090out_unacct_blocks:
3091 shmem_inode_unacct_blocks(inode, 1);
3092 return ret;
3093}
3094#endif /* CONFIG_USERFAULTFD */
3095
3096#ifdef CONFIG_TMPFS
3097static const struct inode_operations shmem_symlink_inode_operations;
3098static const struct inode_operations shmem_short_symlink_operations;
3099
3100static int
3101shmem_write_begin(struct file *file, struct address_space *mapping,
3102 loff_t pos, unsigned len,
3103 struct folio **foliop, void **fsdata)
3104{
3105 struct inode *inode = mapping->host;
3106 struct shmem_inode_info *info = SHMEM_I(inode);
3107 pgoff_t index = pos >> PAGE_SHIFT;
3108 struct folio *folio;
3109 int ret = 0;
3110
3111 /* i_rwsem is held by caller */
3112 if (unlikely(info->seals & (F_SEAL_GROW |
3113 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
3114 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
3115 return -EPERM;
3116 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
3117 return -EPERM;
3118 }
3119
3120 ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE);
3121 if (ret)
3122 return ret;
3123
3124 if (folio_test_hwpoison(folio) ||
3125 (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
3126 folio_unlock(folio);
3127 folio_put(folio);
3128 return -EIO;
3129 }
3130
3131 *foliop = folio;
3132 return 0;
3133}
3134
3135static int
3136shmem_write_end(struct file *file, struct address_space *mapping,
3137 loff_t pos, unsigned len, unsigned copied,
3138 struct folio *folio, void *fsdata)
3139{
3140 struct inode *inode = mapping->host;
3141
3142 if (pos + copied > inode->i_size)
3143 i_size_write(inode, pos + copied);
3144
3145 if (!folio_test_uptodate(folio)) {
3146 if (copied < folio_size(folio)) {
3147 size_t from = offset_in_folio(folio, pos);
3148 folio_zero_segments(folio, 0, from,
3149 from + copied, folio_size(folio));
3150 }
3151 folio_mark_uptodate(folio);
3152 }
3153 folio_mark_dirty(folio);
3154 folio_unlock(folio);
3155 folio_put(folio);
3156
3157 return copied;
3158}
3159
3160static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3161{
3162 struct file *file = iocb->ki_filp;
3163 struct inode *inode = file_inode(file);
3164 struct address_space *mapping = inode->i_mapping;
3165 pgoff_t index;
3166 unsigned long offset;
3167 int error = 0;
3168 ssize_t retval = 0;
3169
3170 for (;;) {
3171 struct folio *folio = NULL;
3172 struct page *page = NULL;
3173 unsigned long nr, ret;
3174 loff_t end_offset, i_size = i_size_read(inode);
3175 bool fallback_page_copy = false;
3176 size_t fsize;
3177
3178 if (unlikely(iocb->ki_pos >= i_size))
3179 break;
3180
3181 index = iocb->ki_pos >> PAGE_SHIFT;
3182 error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3183 if (error) {
3184 if (error == -EINVAL)
3185 error = 0;
3186 break;
3187 }
3188 if (folio) {
3189 folio_unlock(folio);
3190
3191 page = folio_file_page(folio, index);
3192 if (PageHWPoison(page)) {
3193 folio_put(folio);
3194 error = -EIO;
3195 break;
3196 }
3197
3198 if (folio_test_large(folio) &&
3199 folio_test_has_hwpoisoned(folio))
3200 fallback_page_copy = true;
3201 }
3202
3203 /*
3204 * We must evaluate after, since reads (unlike writes)
3205 * are called without i_rwsem protection against truncate
3206 */
3207 i_size = i_size_read(inode);
3208 if (unlikely(iocb->ki_pos >= i_size)) {
3209 if (folio)
3210 folio_put(folio);
3211 break;
3212 }
3213 end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count);
3214 if (folio && likely(!fallback_page_copy))
3215 fsize = folio_size(folio);
3216 else
3217 fsize = PAGE_SIZE;
3218 offset = iocb->ki_pos & (fsize - 1);
3219 nr = min_t(loff_t, end_offset - iocb->ki_pos, fsize - offset);
3220
3221 if (folio) {
3222 /*
3223 * If users can be writing to this page using arbitrary
3224 * virtual addresses, take care about potential aliasing
3225 * before reading the page on the kernel side.
3226 */
3227 if (mapping_writably_mapped(mapping)) {
3228 if (likely(!fallback_page_copy))
3229 flush_dcache_folio(folio);
3230 else
3231 flush_dcache_page(page);
3232 }
3233
3234 /*
3235 * Mark the folio accessed if we read the beginning.
3236 */
3237 if (!offset)
3238 folio_mark_accessed(folio);
3239 /*
3240 * Ok, we have the page, and it's up-to-date, so
3241 * now we can copy it to user space...
3242 */
3243 if (likely(!fallback_page_copy))
3244 ret = copy_folio_to_iter(folio, offset, nr, to);
3245 else
3246 ret = copy_page_to_iter(page, offset, nr, to);
3247 folio_put(folio);
3248 } else if (user_backed_iter(to)) {
3249 /*
3250 * Copy to user tends to be so well optimized, but
3251 * clear_user() not so much, that it is noticeably
3252 * faster to copy the zero page instead of clearing.
3253 */
3254 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
3255 } else {
3256 /*
3257 * But submitting the same page twice in a row to
3258 * splice() - or others? - can result in confusion:
3259 * so don't attempt that optimization on pipes etc.
3260 */
3261 ret = iov_iter_zero(nr, to);
3262 }
3263
3264 retval += ret;
3265 iocb->ki_pos += ret;
3266
3267 if (!iov_iter_count(to))
3268 break;
3269 if (ret < nr) {
3270 error = -EFAULT;
3271 break;
3272 }
3273 cond_resched();
3274 }
3275
3276 file_accessed(file);
3277 return retval ? retval : error;
3278}
3279
3280static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3281{
3282 struct file *file = iocb->ki_filp;
3283 struct inode *inode = file->f_mapping->host;
3284 ssize_t ret;
3285
3286 inode_lock(inode);
3287 ret = generic_write_checks(iocb, from);
3288 if (ret <= 0)
3289 goto unlock;
3290 ret = file_remove_privs(file);
3291 if (ret)
3292 goto unlock;
3293 ret = file_update_time(file);
3294 if (ret)
3295 goto unlock;
3296 ret = generic_perform_write(iocb, from);
3297unlock:
3298 inode_unlock(inode);
3299 return ret;
3300}
3301
3302static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
3303 struct pipe_buffer *buf)
3304{
3305 return true;
3306}
3307
3308static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
3309 struct pipe_buffer *buf)
3310{
3311}
3312
3313static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
3314 struct pipe_buffer *buf)
3315{
3316 return false;
3317}
3318
3319static const struct pipe_buf_operations zero_pipe_buf_ops = {
3320 .release = zero_pipe_buf_release,
3321 .try_steal = zero_pipe_buf_try_steal,
3322 .get = zero_pipe_buf_get,
3323};
3324
3325static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
3326 loff_t fpos, size_t size)
3327{
3328 size_t offset = fpos & ~PAGE_MASK;
3329
3330 size = min_t(size_t, size, PAGE_SIZE - offset);
3331
3332 if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
3333 struct pipe_buffer *buf = pipe_head_buf(pipe);
3334
3335 *buf = (struct pipe_buffer) {
3336 .ops = &zero_pipe_buf_ops,
3337 .page = ZERO_PAGE(0),
3338 .offset = offset,
3339 .len = size,
3340 };
3341 pipe->head++;
3342 }
3343
3344 return size;
3345}
3346
3347static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
3348 struct pipe_inode_info *pipe,
3349 size_t len, unsigned int flags)
3350{
3351 struct inode *inode = file_inode(in);
3352 struct address_space *mapping = inode->i_mapping;
3353 struct folio *folio = NULL;
3354 size_t total_spliced = 0, used, npages, n, part;
3355 loff_t isize;
3356 int error = 0;
3357
3358 /* Work out how much data we can actually add into the pipe */
3359 used = pipe_occupancy(pipe->head, pipe->tail);
3360 npages = max_t(ssize_t, pipe->max_usage - used, 0);
3361 len = min_t(size_t, len, npages * PAGE_SIZE);
3362
3363 do {
3364 bool fallback_page_splice = false;
3365 struct page *page = NULL;
3366 pgoff_t index;
3367 size_t size;
3368
3369 if (*ppos >= i_size_read(inode))
3370 break;
3371
3372 index = *ppos >> PAGE_SHIFT;
3373 error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3374 if (error) {
3375 if (error == -EINVAL)
3376 error = 0;
3377 break;
3378 }
3379 if (folio) {
3380 folio_unlock(folio);
3381
3382 page = folio_file_page(folio, index);
3383 if (PageHWPoison(page)) {
3384 error = -EIO;
3385 break;
3386 }
3387
3388 if (folio_test_large(folio) &&
3389 folio_test_has_hwpoisoned(folio))
3390 fallback_page_splice = true;
3391 }
3392
3393 /*
3394 * i_size must be checked after we know the pages are Uptodate.
3395 *
3396 * Checking i_size after the check allows us to calculate
3397 * the correct value for "nr", which means the zero-filled
3398 * part of the page is not copied back to userspace (unless
3399 * another truncate extends the file - this is desired though).
3400 */
3401 isize = i_size_read(inode);
3402 if (unlikely(*ppos >= isize))
3403 break;
3404 /*
3405 * Fallback to PAGE_SIZE splice if the large folio has hwpoisoned
3406 * pages.
3407 */
3408 size = len;
3409 if (unlikely(fallback_page_splice)) {
3410 size_t offset = *ppos & ~PAGE_MASK;
3411
3412 size = umin(size, PAGE_SIZE - offset);
3413 }
3414 part = min_t(loff_t, isize - *ppos, size);
3415
3416 if (folio) {
3417 /*
3418 * If users can be writing to this page using arbitrary
3419 * virtual addresses, take care about potential aliasing
3420 * before reading the page on the kernel side.
3421 */
3422 if (mapping_writably_mapped(mapping)) {
3423 if (likely(!fallback_page_splice))
3424 flush_dcache_folio(folio);
3425 else
3426 flush_dcache_page(page);
3427 }
3428 folio_mark_accessed(folio);
3429 /*
3430 * Ok, we have the page, and it's up-to-date, so we can
3431 * now splice it into the pipe.
3432 */
3433 n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3434 folio_put(folio);
3435 folio = NULL;
3436 } else {
3437 n = splice_zeropage_into_pipe(pipe, *ppos, part);
3438 }
3439
3440 if (!n)
3441 break;
3442 len -= n;
3443 total_spliced += n;
3444 *ppos += n;
3445 in->f_ra.prev_pos = *ppos;
3446 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
3447 break;
3448
3449 cond_resched();
3450 } while (len);
3451
3452 if (folio)
3453 folio_put(folio);
3454
3455 file_accessed(in);
3456 return total_spliced ? total_spliced : error;
3457}
3458
3459static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3460{
3461 struct address_space *mapping = file->f_mapping;
3462 struct inode *inode = mapping->host;
3463
3464 if (whence != SEEK_DATA && whence != SEEK_HOLE)
3465 return generic_file_llseek_size(file, offset, whence,
3466 MAX_LFS_FILESIZE, i_size_read(inode));
3467 if (offset < 0)
3468 return -ENXIO;
3469
3470 inode_lock(inode);
3471 /* We're holding i_rwsem so we can access i_size directly */
3472 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3473 if (offset >= 0)
3474 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
3475 inode_unlock(inode);
3476 return offset;
3477}
3478
3479static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3480 loff_t len)
3481{
3482 struct inode *inode = file_inode(file);
3483 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3484 struct shmem_inode_info *info = SHMEM_I(inode);
3485 struct shmem_falloc shmem_falloc;
3486 pgoff_t start, index, end, undo_fallocend;
3487 int error;
3488
3489 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3490 return -EOPNOTSUPP;
3491
3492 inode_lock(inode);
3493
3494 if (mode & FALLOC_FL_PUNCH_HOLE) {
3495 struct address_space *mapping = file->f_mapping;
3496 loff_t unmap_start = round_up(offset, PAGE_SIZE);
3497 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3498 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3499
3500 /* protected by i_rwsem */
3501 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3502 error = -EPERM;
3503 goto out;
3504 }
3505
3506 shmem_falloc.waitq = &shmem_falloc_waitq;
3507 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3508 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3509 spin_lock(&inode->i_lock);
3510 inode->i_private = &shmem_falloc;
3511 spin_unlock(&inode->i_lock);
3512
3513 if ((u64)unmap_end > (u64)unmap_start)
3514 unmap_mapping_range(mapping, unmap_start,
3515 1 + unmap_end - unmap_start, 0);
3516 shmem_truncate_range(inode, offset, offset + len - 1);
3517 /* No need to unmap again: hole-punching leaves COWed pages */
3518
3519 spin_lock(&inode->i_lock);
3520 inode->i_private = NULL;
3521 wake_up_all(&shmem_falloc_waitq);
3522 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3523 spin_unlock(&inode->i_lock);
3524 error = 0;
3525 goto out;
3526 }
3527
3528 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3529 error = inode_newsize_ok(inode, offset + len);
3530 if (error)
3531 goto out;
3532
3533 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3534 error = -EPERM;
3535 goto out;
3536 }
3537
3538 start = offset >> PAGE_SHIFT;
3539 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3540 /* Try to avoid a swapstorm if len is impossible to satisfy */
3541 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3542 error = -ENOSPC;
3543 goto out;
3544 }
3545
3546 shmem_falloc.waitq = NULL;
3547 shmem_falloc.start = start;
3548 shmem_falloc.next = start;
3549 shmem_falloc.nr_falloced = 0;
3550 shmem_falloc.nr_unswapped = 0;
3551 spin_lock(&inode->i_lock);
3552 inode->i_private = &shmem_falloc;
3553 spin_unlock(&inode->i_lock);
3554
3555 /*
3556 * info->fallocend is only relevant when huge pages might be
3557 * involved: to prevent split_huge_page() freeing fallocated
3558 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3559 */
3560 undo_fallocend = info->fallocend;
3561 if (info->fallocend < end)
3562 info->fallocend = end;
3563
3564 for (index = start; index < end; ) {
3565 struct folio *folio;
3566
3567 /*
3568 * Check for fatal signal so that we abort early in OOM
3569 * situations. We don't want to abort in case of non-fatal
3570 * signals as large fallocate can take noticeable time and
3571 * e.g. periodic timers may result in fallocate constantly
3572 * restarting.
3573 */
3574 if (fatal_signal_pending(current))
3575 error = -EINTR;
3576 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3577 error = -ENOMEM;
3578 else
3579 error = shmem_get_folio(inode, index, offset + len,
3580 &folio, SGP_FALLOC);
3581 if (error) {
3582 info->fallocend = undo_fallocend;
3583 /* Remove the !uptodate folios we added */
3584 if (index > start) {
3585 shmem_undo_range(inode,
3586 (loff_t)start << PAGE_SHIFT,
3587 ((loff_t)index << PAGE_SHIFT) - 1, true);
3588 }
3589 goto undone;
3590 }
3591
3592 /*
3593 * Here is a more important optimization than it appears:
3594 * a second SGP_FALLOC on the same large folio will clear it,
3595 * making it uptodate and un-undoable if we fail later.
3596 */
3597 index = folio_next_index(folio);
3598 /* Beware 32-bit wraparound */
3599 if (!index)
3600 index--;
3601
3602 /*
3603 * Inform shmem_writepage() how far we have reached.
3604 * No need for lock or barrier: we have the page lock.
3605 */
3606 if (!folio_test_uptodate(folio))
3607 shmem_falloc.nr_falloced += index - shmem_falloc.next;
3608 shmem_falloc.next = index;
3609
3610 /*
3611 * If !uptodate, leave it that way so that freeable folios
3612 * can be recognized if we need to rollback on error later.
3613 * But mark it dirty so that memory pressure will swap rather
3614 * than free the folios we are allocating (and SGP_CACHE folios
3615 * might still be clean: we now need to mark those dirty too).
3616 */
3617 folio_mark_dirty(folio);
3618 folio_unlock(folio);
3619 folio_put(folio);
3620 cond_resched();
3621 }
3622
3623 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3624 i_size_write(inode, offset + len);
3625undone:
3626 spin_lock(&inode->i_lock);
3627 inode->i_private = NULL;
3628 spin_unlock(&inode->i_lock);
3629out:
3630 if (!error)
3631 file_modified(file);
3632 inode_unlock(inode);
3633 return error;
3634}
3635
3636static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3637{
3638 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3639
3640 buf->f_type = TMPFS_MAGIC;
3641 buf->f_bsize = PAGE_SIZE;
3642 buf->f_namelen = NAME_MAX;
3643 if (sbinfo->max_blocks) {
3644 buf->f_blocks = sbinfo->max_blocks;
3645 buf->f_bavail =
3646 buf->f_bfree = sbinfo->max_blocks -
3647 percpu_counter_sum(&sbinfo->used_blocks);
3648 }
3649 if (sbinfo->max_inodes) {
3650 buf->f_files = sbinfo->max_inodes;
3651 buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3652 }
3653 /* else leave those fields 0 like simple_statfs */
3654
3655 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3656
3657 return 0;
3658}
3659
3660/*
3661 * File creation. Allocate an inode, and we're done..
3662 */
3663static int
3664shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3665 struct dentry *dentry, umode_t mode, dev_t dev)
3666{
3667 struct inode *inode;
3668 int error;
3669
3670 if (!generic_ci_validate_strict_name(dir, &dentry->d_name))
3671 return -EINVAL;
3672
3673 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3674 if (IS_ERR(inode))
3675 return PTR_ERR(inode);
3676
3677 error = simple_acl_create(dir, inode);
3678 if (error)
3679 goto out_iput;
3680 error = security_inode_init_security(inode, dir, &dentry->d_name,
3681 shmem_initxattrs, NULL);
3682 if (error && error != -EOPNOTSUPP)
3683 goto out_iput;
3684
3685 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3686 if (error)
3687 goto out_iput;
3688
3689 dir->i_size += BOGO_DIRENT_SIZE;
3690 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3691 inode_inc_iversion(dir);
3692
3693 if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3694 d_add(dentry, inode);
3695 else
3696 d_instantiate(dentry, inode);
3697
3698 dget(dentry); /* Extra count - pin the dentry in core */
3699 return error;
3700
3701out_iput:
3702 iput(inode);
3703 return error;
3704}
3705
3706static int
3707shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3708 struct file *file, umode_t mode)
3709{
3710 struct inode *inode;
3711 int error;
3712
3713 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3714 if (IS_ERR(inode)) {
3715 error = PTR_ERR(inode);
3716 goto err_out;
3717 }
3718 error = security_inode_init_security(inode, dir, NULL,
3719 shmem_initxattrs, NULL);
3720 if (error && error != -EOPNOTSUPP)
3721 goto out_iput;
3722 error = simple_acl_create(dir, inode);
3723 if (error)
3724 goto out_iput;
3725 d_tmpfile(file, inode);
3726
3727err_out:
3728 return finish_open_simple(file, error);
3729out_iput:
3730 iput(inode);
3731 return error;
3732}
3733
3734static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3735 struct dentry *dentry, umode_t mode)
3736{
3737 int error;
3738
3739 error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3740 if (error)
3741 return error;
3742 inc_nlink(dir);
3743 return 0;
3744}
3745
3746static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3747 struct dentry *dentry, umode_t mode, bool excl)
3748{
3749 return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3750}
3751
3752/*
3753 * Link a file..
3754 */
3755static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3756 struct dentry *dentry)
3757{
3758 struct inode *inode = d_inode(old_dentry);
3759 int ret = 0;
3760
3761 /*
3762 * No ordinary (disk based) filesystem counts links as inodes;
3763 * but each new link needs a new dentry, pinning lowmem, and
3764 * tmpfs dentries cannot be pruned until they are unlinked.
3765 * But if an O_TMPFILE file is linked into the tmpfs, the
3766 * first link must skip that, to get the accounting right.
3767 */
3768 if (inode->i_nlink) {
3769 ret = shmem_reserve_inode(inode->i_sb, NULL);
3770 if (ret)
3771 goto out;
3772 }
3773
3774 ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3775 if (ret) {
3776 if (inode->i_nlink)
3777 shmem_free_inode(inode->i_sb, 0);
3778 goto out;
3779 }
3780
3781 dir->i_size += BOGO_DIRENT_SIZE;
3782 inode_set_mtime_to_ts(dir,
3783 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3784 inode_inc_iversion(dir);
3785 inc_nlink(inode);
3786 ihold(inode); /* New dentry reference */
3787 dget(dentry); /* Extra pinning count for the created dentry */
3788 if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3789 d_add(dentry, inode);
3790 else
3791 d_instantiate(dentry, inode);
3792out:
3793 return ret;
3794}
3795
3796static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3797{
3798 struct inode *inode = d_inode(dentry);
3799
3800 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3801 shmem_free_inode(inode->i_sb, 0);
3802
3803 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3804
3805 dir->i_size -= BOGO_DIRENT_SIZE;
3806 inode_set_mtime_to_ts(dir,
3807 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3808 inode_inc_iversion(dir);
3809 drop_nlink(inode);
3810 dput(dentry); /* Undo the count from "create" - does all the work */
3811
3812 /*
3813 * For now, VFS can't deal with case-insensitive negative dentries, so
3814 * we invalidate them
3815 */
3816 if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3817 d_invalidate(dentry);
3818
3819 return 0;
3820}
3821
3822static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3823{
3824 if (!simple_empty(dentry))
3825 return -ENOTEMPTY;
3826
3827 drop_nlink(d_inode(dentry));
3828 drop_nlink(dir);
3829 return shmem_unlink(dir, dentry);
3830}
3831
3832static int shmem_whiteout(struct mnt_idmap *idmap,
3833 struct inode *old_dir, struct dentry *old_dentry)
3834{
3835 struct dentry *whiteout;
3836 int error;
3837
3838 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3839 if (!whiteout)
3840 return -ENOMEM;
3841
3842 error = shmem_mknod(idmap, old_dir, whiteout,
3843 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3844 dput(whiteout);
3845 if (error)
3846 return error;
3847
3848 /*
3849 * Cheat and hash the whiteout while the old dentry is still in
3850 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3851 *
3852 * d_lookup() will consistently find one of them at this point,
3853 * not sure which one, but that isn't even important.
3854 */
3855 d_rehash(whiteout);
3856 return 0;
3857}
3858
3859/*
3860 * The VFS layer already does all the dentry stuff for rename,
3861 * we just have to decrement the usage count for the target if
3862 * it exists so that the VFS layer correctly free's it when it
3863 * gets overwritten.
3864 */
3865static int shmem_rename2(struct mnt_idmap *idmap,
3866 struct inode *old_dir, struct dentry *old_dentry,
3867 struct inode *new_dir, struct dentry *new_dentry,
3868 unsigned int flags)
3869{
3870 struct inode *inode = d_inode(old_dentry);
3871 int they_are_dirs = S_ISDIR(inode->i_mode);
3872 int error;
3873
3874 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3875 return -EINVAL;
3876
3877 if (flags & RENAME_EXCHANGE)
3878 return simple_offset_rename_exchange(old_dir, old_dentry,
3879 new_dir, new_dentry);
3880
3881 if (!simple_empty(new_dentry))
3882 return -ENOTEMPTY;
3883
3884 if (flags & RENAME_WHITEOUT) {
3885 error = shmem_whiteout(idmap, old_dir, old_dentry);
3886 if (error)
3887 return error;
3888 }
3889
3890 error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
3891 if (error)
3892 return error;
3893
3894 if (d_really_is_positive(new_dentry)) {
3895 (void) shmem_unlink(new_dir, new_dentry);
3896 if (they_are_dirs) {
3897 drop_nlink(d_inode(new_dentry));
3898 drop_nlink(old_dir);
3899 }
3900 } else if (they_are_dirs) {
3901 drop_nlink(old_dir);
3902 inc_nlink(new_dir);
3903 }
3904
3905 old_dir->i_size -= BOGO_DIRENT_SIZE;
3906 new_dir->i_size += BOGO_DIRENT_SIZE;
3907 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
3908 inode_inc_iversion(old_dir);
3909 inode_inc_iversion(new_dir);
3910 return 0;
3911}
3912
3913static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
3914 struct dentry *dentry, const char *symname)
3915{
3916 int error;
3917 int len;
3918 struct inode *inode;
3919 struct folio *folio;
3920
3921 len = strlen(symname) + 1;
3922 if (len > PAGE_SIZE)
3923 return -ENAMETOOLONG;
3924
3925 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
3926 VM_NORESERVE);
3927 if (IS_ERR(inode))
3928 return PTR_ERR(inode);
3929
3930 error = security_inode_init_security(inode, dir, &dentry->d_name,
3931 shmem_initxattrs, NULL);
3932 if (error && error != -EOPNOTSUPP)
3933 goto out_iput;
3934
3935 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3936 if (error)
3937 goto out_iput;
3938
3939 inode->i_size = len-1;
3940 if (len <= SHORT_SYMLINK_LEN) {
3941 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3942 if (!inode->i_link) {
3943 error = -ENOMEM;
3944 goto out_remove_offset;
3945 }
3946 inode->i_op = &shmem_short_symlink_operations;
3947 } else {
3948 inode_nohighmem(inode);
3949 inode->i_mapping->a_ops = &shmem_aops;
3950 error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE);
3951 if (error)
3952 goto out_remove_offset;
3953 inode->i_op = &shmem_symlink_inode_operations;
3954 memcpy(folio_address(folio), symname, len);
3955 folio_mark_uptodate(folio);
3956 folio_mark_dirty(folio);
3957 folio_unlock(folio);
3958 folio_put(folio);
3959 }
3960 dir->i_size += BOGO_DIRENT_SIZE;
3961 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3962 inode_inc_iversion(dir);
3963 if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3964 d_add(dentry, inode);
3965 else
3966 d_instantiate(dentry, inode);
3967 dget(dentry);
3968 return 0;
3969
3970out_remove_offset:
3971 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3972out_iput:
3973 iput(inode);
3974 return error;
3975}
3976
3977static void shmem_put_link(void *arg)
3978{
3979 folio_mark_accessed(arg);
3980 folio_put(arg);
3981}
3982
3983static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
3984 struct delayed_call *done)
3985{
3986 struct folio *folio = NULL;
3987 int error;
3988
3989 if (!dentry) {
3990 folio = filemap_get_folio(inode->i_mapping, 0);
3991 if (IS_ERR(folio))
3992 return ERR_PTR(-ECHILD);
3993 if (PageHWPoison(folio_page(folio, 0)) ||
3994 !folio_test_uptodate(folio)) {
3995 folio_put(folio);
3996 return ERR_PTR(-ECHILD);
3997 }
3998 } else {
3999 error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ);
4000 if (error)
4001 return ERR_PTR(error);
4002 if (!folio)
4003 return ERR_PTR(-ECHILD);
4004 if (PageHWPoison(folio_page(folio, 0))) {
4005 folio_unlock(folio);
4006 folio_put(folio);
4007 return ERR_PTR(-ECHILD);
4008 }
4009 folio_unlock(folio);
4010 }
4011 set_delayed_call(done, shmem_put_link, folio);
4012 return folio_address(folio);
4013}
4014
4015#ifdef CONFIG_TMPFS_XATTR
4016
4017static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
4018{
4019 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4020
4021 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
4022
4023 return 0;
4024}
4025
4026static int shmem_fileattr_set(struct mnt_idmap *idmap,
4027 struct dentry *dentry, struct fileattr *fa)
4028{
4029 struct inode *inode = d_inode(dentry);
4030 struct shmem_inode_info *info = SHMEM_I(inode);
4031 int ret, flags;
4032
4033 if (fileattr_has_fsx(fa))
4034 return -EOPNOTSUPP;
4035 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
4036 return -EOPNOTSUPP;
4037
4038 flags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
4039 (fa->flags & SHMEM_FL_USER_MODIFIABLE);
4040
4041 ret = shmem_set_inode_flags(inode, flags, dentry);
4042
4043 if (ret)
4044 return ret;
4045
4046 info->fsflags = flags;
4047
4048 inode_set_ctime_current(inode);
4049 inode_inc_iversion(inode);
4050 return 0;
4051}
4052
4053/*
4054 * Superblocks without xattr inode operations may get some security.* xattr
4055 * support from the LSM "for free". As soon as we have any other xattrs
4056 * like ACLs, we also need to implement the security.* handlers at
4057 * filesystem level, though.
4058 */
4059
4060/*
4061 * Callback for security_inode_init_security() for acquiring xattrs.
4062 */
4063static int shmem_initxattrs(struct inode *inode,
4064 const struct xattr *xattr_array, void *fs_info)
4065{
4066 struct shmem_inode_info *info = SHMEM_I(inode);
4067 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4068 const struct xattr *xattr;
4069 struct simple_xattr *new_xattr;
4070 size_t ispace = 0;
4071 size_t len;
4072
4073 if (sbinfo->max_inodes) {
4074 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4075 ispace += simple_xattr_space(xattr->name,
4076 xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
4077 }
4078 if (ispace) {
4079 raw_spin_lock(&sbinfo->stat_lock);
4080 if (sbinfo->free_ispace < ispace)
4081 ispace = 0;
4082 else
4083 sbinfo->free_ispace -= ispace;
4084 raw_spin_unlock(&sbinfo->stat_lock);
4085 if (!ispace)
4086 return -ENOSPC;
4087 }
4088 }
4089
4090 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4091 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
4092 if (!new_xattr)
4093 break;
4094
4095 len = strlen(xattr->name) + 1;
4096 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
4097 GFP_KERNEL_ACCOUNT);
4098 if (!new_xattr->name) {
4099 kvfree(new_xattr);
4100 break;
4101 }
4102
4103 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
4104 XATTR_SECURITY_PREFIX_LEN);
4105 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
4106 xattr->name, len);
4107
4108 simple_xattr_add(&info->xattrs, new_xattr);
4109 }
4110
4111 if (xattr->name != NULL) {
4112 if (ispace) {
4113 raw_spin_lock(&sbinfo->stat_lock);
4114 sbinfo->free_ispace += ispace;
4115 raw_spin_unlock(&sbinfo->stat_lock);
4116 }
4117 simple_xattrs_free(&info->xattrs, NULL);
4118 return -ENOMEM;
4119 }
4120
4121 return 0;
4122}
4123
4124static int shmem_xattr_handler_get(const struct xattr_handler *handler,
4125 struct dentry *unused, struct inode *inode,
4126 const char *name, void *buffer, size_t size)
4127{
4128 struct shmem_inode_info *info = SHMEM_I(inode);
4129
4130 name = xattr_full_name(handler, name);
4131 return simple_xattr_get(&info->xattrs, name, buffer, size);
4132}
4133
4134static int shmem_xattr_handler_set(const struct xattr_handler *handler,
4135 struct mnt_idmap *idmap,
4136 struct dentry *unused, struct inode *inode,
4137 const char *name, const void *value,
4138 size_t size, int flags)
4139{
4140 struct shmem_inode_info *info = SHMEM_I(inode);
4141 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4142 struct simple_xattr *old_xattr;
4143 size_t ispace = 0;
4144
4145 name = xattr_full_name(handler, name);
4146 if (value && sbinfo->max_inodes) {
4147 ispace = simple_xattr_space(name, size);
4148 raw_spin_lock(&sbinfo->stat_lock);
4149 if (sbinfo->free_ispace < ispace)
4150 ispace = 0;
4151 else
4152 sbinfo->free_ispace -= ispace;
4153 raw_spin_unlock(&sbinfo->stat_lock);
4154 if (!ispace)
4155 return -ENOSPC;
4156 }
4157
4158 old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
4159 if (!IS_ERR(old_xattr)) {
4160 ispace = 0;
4161 if (old_xattr && sbinfo->max_inodes)
4162 ispace = simple_xattr_space(old_xattr->name,
4163 old_xattr->size);
4164 simple_xattr_free(old_xattr);
4165 old_xattr = NULL;
4166 inode_set_ctime_current(inode);
4167 inode_inc_iversion(inode);
4168 }
4169 if (ispace) {
4170 raw_spin_lock(&sbinfo->stat_lock);
4171 sbinfo->free_ispace += ispace;
4172 raw_spin_unlock(&sbinfo->stat_lock);
4173 }
4174 return PTR_ERR(old_xattr);
4175}
4176
4177static const struct xattr_handler shmem_security_xattr_handler = {
4178 .prefix = XATTR_SECURITY_PREFIX,
4179 .get = shmem_xattr_handler_get,
4180 .set = shmem_xattr_handler_set,
4181};
4182
4183static const struct xattr_handler shmem_trusted_xattr_handler = {
4184 .prefix = XATTR_TRUSTED_PREFIX,
4185 .get = shmem_xattr_handler_get,
4186 .set = shmem_xattr_handler_set,
4187};
4188
4189static const struct xattr_handler shmem_user_xattr_handler = {
4190 .prefix = XATTR_USER_PREFIX,
4191 .get = shmem_xattr_handler_get,
4192 .set = shmem_xattr_handler_set,
4193};
4194
4195static const struct xattr_handler * const shmem_xattr_handlers[] = {
4196 &shmem_security_xattr_handler,
4197 &shmem_trusted_xattr_handler,
4198 &shmem_user_xattr_handler,
4199 NULL
4200};
4201
4202static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
4203{
4204 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4205 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
4206}
4207#endif /* CONFIG_TMPFS_XATTR */
4208
4209static const struct inode_operations shmem_short_symlink_operations = {
4210 .getattr = shmem_getattr,
4211 .setattr = shmem_setattr,
4212 .get_link = simple_get_link,
4213#ifdef CONFIG_TMPFS_XATTR
4214 .listxattr = shmem_listxattr,
4215#endif
4216};
4217
4218static const struct inode_operations shmem_symlink_inode_operations = {
4219 .getattr = shmem_getattr,
4220 .setattr = shmem_setattr,
4221 .get_link = shmem_get_link,
4222#ifdef CONFIG_TMPFS_XATTR
4223 .listxattr = shmem_listxattr,
4224#endif
4225};
4226
4227static struct dentry *shmem_get_parent(struct dentry *child)
4228{
4229 return ERR_PTR(-ESTALE);
4230}
4231
4232static int shmem_match(struct inode *ino, void *vfh)
4233{
4234 __u32 *fh = vfh;
4235 __u64 inum = fh[2];
4236 inum = (inum << 32) | fh[1];
4237 return ino->i_ino == inum && fh[0] == ino->i_generation;
4238}
4239
4240/* Find any alias of inode, but prefer a hashed alias */
4241static struct dentry *shmem_find_alias(struct inode *inode)
4242{
4243 struct dentry *alias = d_find_alias(inode);
4244
4245 return alias ?: d_find_any_alias(inode);
4246}
4247
4248static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
4249 struct fid *fid, int fh_len, int fh_type)
4250{
4251 struct inode *inode;
4252 struct dentry *dentry = NULL;
4253 u64 inum;
4254
4255 if (fh_len < 3)
4256 return NULL;
4257
4258 inum = fid->raw[2];
4259 inum = (inum << 32) | fid->raw[1];
4260
4261 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
4262 shmem_match, fid->raw);
4263 if (inode) {
4264 dentry = shmem_find_alias(inode);
4265 iput(inode);
4266 }
4267
4268 return dentry;
4269}
4270
4271static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
4272 struct inode *parent)
4273{
4274 if (*len < 3) {
4275 *len = 3;
4276 return FILEID_INVALID;
4277 }
4278
4279 if (inode_unhashed(inode)) {
4280 /* Unfortunately insert_inode_hash is not idempotent,
4281 * so as we hash inodes here rather than at creation
4282 * time, we need a lock to ensure we only try
4283 * to do it once
4284 */
4285 static DEFINE_SPINLOCK(lock);
4286 spin_lock(&lock);
4287 if (inode_unhashed(inode))
4288 __insert_inode_hash(inode,
4289 inode->i_ino + inode->i_generation);
4290 spin_unlock(&lock);
4291 }
4292
4293 fh[0] = inode->i_generation;
4294 fh[1] = inode->i_ino;
4295 fh[2] = ((__u64)inode->i_ino) >> 32;
4296
4297 *len = 3;
4298 return 1;
4299}
4300
4301static const struct export_operations shmem_export_ops = {
4302 .get_parent = shmem_get_parent,
4303 .encode_fh = shmem_encode_fh,
4304 .fh_to_dentry = shmem_fh_to_dentry,
4305};
4306
4307enum shmem_param {
4308 Opt_gid,
4309 Opt_huge,
4310 Opt_mode,
4311 Opt_mpol,
4312 Opt_nr_blocks,
4313 Opt_nr_inodes,
4314 Opt_size,
4315 Opt_uid,
4316 Opt_inode32,
4317 Opt_inode64,
4318 Opt_noswap,
4319 Opt_quota,
4320 Opt_usrquota,
4321 Opt_grpquota,
4322 Opt_usrquota_block_hardlimit,
4323 Opt_usrquota_inode_hardlimit,
4324 Opt_grpquota_block_hardlimit,
4325 Opt_grpquota_inode_hardlimit,
4326 Opt_casefold_version,
4327 Opt_casefold,
4328 Opt_strict_encoding,
4329};
4330
4331static const struct constant_table shmem_param_enums_huge[] = {
4332 {"never", SHMEM_HUGE_NEVER },
4333 {"always", SHMEM_HUGE_ALWAYS },
4334 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
4335 {"advise", SHMEM_HUGE_ADVISE },
4336 {}
4337};
4338
4339const struct fs_parameter_spec shmem_fs_parameters[] = {
4340 fsparam_gid ("gid", Opt_gid),
4341 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
4342 fsparam_u32oct("mode", Opt_mode),
4343 fsparam_string("mpol", Opt_mpol),
4344 fsparam_string("nr_blocks", Opt_nr_blocks),
4345 fsparam_string("nr_inodes", Opt_nr_inodes),
4346 fsparam_string("size", Opt_size),
4347 fsparam_uid ("uid", Opt_uid),
4348 fsparam_flag ("inode32", Opt_inode32),
4349 fsparam_flag ("inode64", Opt_inode64),
4350 fsparam_flag ("noswap", Opt_noswap),
4351#ifdef CONFIG_TMPFS_QUOTA
4352 fsparam_flag ("quota", Opt_quota),
4353 fsparam_flag ("usrquota", Opt_usrquota),
4354 fsparam_flag ("grpquota", Opt_grpquota),
4355 fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
4356 fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
4357 fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
4358 fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
4359#endif
4360 fsparam_string("casefold", Opt_casefold_version),
4361 fsparam_flag ("casefold", Opt_casefold),
4362 fsparam_flag ("strict_encoding", Opt_strict_encoding),
4363 {}
4364};
4365
4366#if IS_ENABLED(CONFIG_UNICODE)
4367static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4368 bool latest_version)
4369{
4370 struct shmem_options *ctx = fc->fs_private;
4371 int version = UTF8_LATEST;
4372 struct unicode_map *encoding;
4373 char *version_str = param->string + 5;
4374
4375 if (!latest_version) {
4376 if (strncmp(param->string, "utf8-", 5))
4377 return invalfc(fc, "Only UTF-8 encodings are supported "
4378 "in the format: utf8-<version number>");
4379
4380 version = utf8_parse_version(version_str);
4381 if (version < 0)
4382 return invalfc(fc, "Invalid UTF-8 version: %s", version_str);
4383 }
4384
4385 encoding = utf8_load(version);
4386
4387 if (IS_ERR(encoding)) {
4388 return invalfc(fc, "Failed loading UTF-8 version: utf8-%u.%u.%u\n",
4389 unicode_major(version), unicode_minor(version),
4390 unicode_rev(version));
4391 }
4392
4393 pr_info("tmpfs: Using encoding : utf8-%u.%u.%u\n",
4394 unicode_major(version), unicode_minor(version), unicode_rev(version));
4395
4396 ctx->encoding = encoding;
4397
4398 return 0;
4399}
4400#else
4401static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4402 bool latest_version)
4403{
4404 return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4405}
4406#endif
4407
4408static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
4409{
4410 struct shmem_options *ctx = fc->fs_private;
4411 struct fs_parse_result result;
4412 unsigned long long size;
4413 char *rest;
4414 int opt;
4415 kuid_t kuid;
4416 kgid_t kgid;
4417
4418 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
4419 if (opt < 0)
4420 return opt;
4421
4422 switch (opt) {
4423 case Opt_size:
4424 size = memparse(param->string, &rest);
4425 if (*rest == '%') {
4426 size <<= PAGE_SHIFT;
4427 size *= totalram_pages();
4428 do_div(size, 100);
4429 rest++;
4430 }
4431 if (*rest)
4432 goto bad_value;
4433 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
4434 ctx->seen |= SHMEM_SEEN_BLOCKS;
4435 break;
4436 case Opt_nr_blocks:
4437 ctx->blocks = memparse(param->string, &rest);
4438 if (*rest || ctx->blocks > LONG_MAX)
4439 goto bad_value;
4440 ctx->seen |= SHMEM_SEEN_BLOCKS;
4441 break;
4442 case Opt_nr_inodes:
4443 ctx->inodes = memparse(param->string, &rest);
4444 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
4445 goto bad_value;
4446 ctx->seen |= SHMEM_SEEN_INODES;
4447 break;
4448 case Opt_mode:
4449 ctx->mode = result.uint_32 & 07777;
4450 break;
4451 case Opt_uid:
4452 kuid = result.uid;
4453
4454 /*
4455 * The requested uid must be representable in the
4456 * filesystem's idmapping.
4457 */
4458 if (!kuid_has_mapping(fc->user_ns, kuid))
4459 goto bad_value;
4460
4461 ctx->uid = kuid;
4462 break;
4463 case Opt_gid:
4464 kgid = result.gid;
4465
4466 /*
4467 * The requested gid must be representable in the
4468 * filesystem's idmapping.
4469 */
4470 if (!kgid_has_mapping(fc->user_ns, kgid))
4471 goto bad_value;
4472
4473 ctx->gid = kgid;
4474 break;
4475 case Opt_huge:
4476 ctx->huge = result.uint_32;
4477 if (ctx->huge != SHMEM_HUGE_NEVER &&
4478 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4479 has_transparent_hugepage()))
4480 goto unsupported_parameter;
4481 ctx->seen |= SHMEM_SEEN_HUGE;
4482 break;
4483 case Opt_mpol:
4484 if (IS_ENABLED(CONFIG_NUMA)) {
4485 mpol_put(ctx->mpol);
4486 ctx->mpol = NULL;
4487 if (mpol_parse_str(param->string, &ctx->mpol))
4488 goto bad_value;
4489 break;
4490 }
4491 goto unsupported_parameter;
4492 case Opt_inode32:
4493 ctx->full_inums = false;
4494 ctx->seen |= SHMEM_SEEN_INUMS;
4495 break;
4496 case Opt_inode64:
4497 if (sizeof(ino_t) < 8) {
4498 return invalfc(fc,
4499 "Cannot use inode64 with <64bit inums in kernel\n");
4500 }
4501 ctx->full_inums = true;
4502 ctx->seen |= SHMEM_SEEN_INUMS;
4503 break;
4504 case Opt_noswap:
4505 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
4506 return invalfc(fc,
4507 "Turning off swap in unprivileged tmpfs mounts unsupported");
4508 }
4509 ctx->noswap = true;
4510 ctx->seen |= SHMEM_SEEN_NOSWAP;
4511 break;
4512 case Opt_quota:
4513 if (fc->user_ns != &init_user_ns)
4514 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4515 ctx->seen |= SHMEM_SEEN_QUOTA;
4516 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4517 break;
4518 case Opt_usrquota:
4519 if (fc->user_ns != &init_user_ns)
4520 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4521 ctx->seen |= SHMEM_SEEN_QUOTA;
4522 ctx->quota_types |= QTYPE_MASK_USR;
4523 break;
4524 case Opt_grpquota:
4525 if (fc->user_ns != &init_user_ns)
4526 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4527 ctx->seen |= SHMEM_SEEN_QUOTA;
4528 ctx->quota_types |= QTYPE_MASK_GRP;
4529 break;
4530 case Opt_usrquota_block_hardlimit:
4531 size = memparse(param->string, &rest);
4532 if (*rest || !size)
4533 goto bad_value;
4534 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4535 return invalfc(fc,
4536 "User quota block hardlimit too large.");
4537 ctx->qlimits.usrquota_bhardlimit = size;
4538 break;
4539 case Opt_grpquota_block_hardlimit:
4540 size = memparse(param->string, &rest);
4541 if (*rest || !size)
4542 goto bad_value;
4543 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4544 return invalfc(fc,
4545 "Group quota block hardlimit too large.");
4546 ctx->qlimits.grpquota_bhardlimit = size;
4547 break;
4548 case Opt_usrquota_inode_hardlimit:
4549 size = memparse(param->string, &rest);
4550 if (*rest || !size)
4551 goto bad_value;
4552 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4553 return invalfc(fc,
4554 "User quota inode hardlimit too large.");
4555 ctx->qlimits.usrquota_ihardlimit = size;
4556 break;
4557 case Opt_grpquota_inode_hardlimit:
4558 size = memparse(param->string, &rest);
4559 if (*rest || !size)
4560 goto bad_value;
4561 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4562 return invalfc(fc,
4563 "Group quota inode hardlimit too large.");
4564 ctx->qlimits.grpquota_ihardlimit = size;
4565 break;
4566 case Opt_casefold_version:
4567 return shmem_parse_opt_casefold(fc, param, false);
4568 case Opt_casefold:
4569 return shmem_parse_opt_casefold(fc, param, true);
4570 case Opt_strict_encoding:
4571#if IS_ENABLED(CONFIG_UNICODE)
4572 ctx->strict_encoding = true;
4573 break;
4574#else
4575 return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4576#endif
4577 }
4578 return 0;
4579
4580unsupported_parameter:
4581 return invalfc(fc, "Unsupported parameter '%s'", param->key);
4582bad_value:
4583 return invalfc(fc, "Bad value for '%s'", param->key);
4584}
4585
4586static int shmem_parse_options(struct fs_context *fc, void *data)
4587{
4588 char *options = data;
4589
4590 if (options) {
4591 int err = security_sb_eat_lsm_opts(options, &fc->security);
4592 if (err)
4593 return err;
4594 }
4595
4596 while (options != NULL) {
4597 char *this_char = options;
4598 for (;;) {
4599 /*
4600 * NUL-terminate this option: unfortunately,
4601 * mount options form a comma-separated list,
4602 * but mpol's nodelist may also contain commas.
4603 */
4604 options = strchr(options, ',');
4605 if (options == NULL)
4606 break;
4607 options++;
4608 if (!isdigit(*options)) {
4609 options[-1] = '\0';
4610 break;
4611 }
4612 }
4613 if (*this_char) {
4614 char *value = strchr(this_char, '=');
4615 size_t len = 0;
4616 int err;
4617
4618 if (value) {
4619 *value++ = '\0';
4620 len = strlen(value);
4621 }
4622 err = vfs_parse_fs_string(fc, this_char, value, len);
4623 if (err < 0)
4624 return err;
4625 }
4626 }
4627 return 0;
4628}
4629
4630/*
4631 * Reconfigure a shmem filesystem.
4632 */
4633static int shmem_reconfigure(struct fs_context *fc)
4634{
4635 struct shmem_options *ctx = fc->fs_private;
4636 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4637 unsigned long used_isp;
4638 struct mempolicy *mpol = NULL;
4639 const char *err;
4640
4641 raw_spin_lock(&sbinfo->stat_lock);
4642 used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
4643
4644 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4645 if (!sbinfo->max_blocks) {
4646 err = "Cannot retroactively limit size";
4647 goto out;
4648 }
4649 if (percpu_counter_compare(&sbinfo->used_blocks,
4650 ctx->blocks) > 0) {
4651 err = "Too small a size for current use";
4652 goto out;
4653 }
4654 }
4655 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4656 if (!sbinfo->max_inodes) {
4657 err = "Cannot retroactively limit inodes";
4658 goto out;
4659 }
4660 if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4661 err = "Too few inodes for current use";
4662 goto out;
4663 }
4664 }
4665
4666 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4667 sbinfo->next_ino > UINT_MAX) {
4668 err = "Current inum too high to switch to 32-bit inums";
4669 goto out;
4670 }
4671 if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
4672 err = "Cannot disable swap on remount";
4673 goto out;
4674 }
4675 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
4676 err = "Cannot enable swap on remount if it was disabled on first mount";
4677 goto out;
4678 }
4679
4680 if (ctx->seen & SHMEM_SEEN_QUOTA &&
4681 !sb_any_quota_loaded(fc->root->d_sb)) {
4682 err = "Cannot enable quota on remount";
4683 goto out;
4684 }
4685
4686#ifdef CONFIG_TMPFS_QUOTA
4687#define CHANGED_LIMIT(name) \
4688 (ctx->qlimits.name## hardlimit && \
4689 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4690
4691 if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4692 CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4693 err = "Cannot change global quota limit on remount";
4694 goto out;
4695 }
4696#endif /* CONFIG_TMPFS_QUOTA */
4697
4698 if (ctx->seen & SHMEM_SEEN_HUGE)
4699 sbinfo->huge = ctx->huge;
4700 if (ctx->seen & SHMEM_SEEN_INUMS)
4701 sbinfo->full_inums = ctx->full_inums;
4702 if (ctx->seen & SHMEM_SEEN_BLOCKS)
4703 sbinfo->max_blocks = ctx->blocks;
4704 if (ctx->seen & SHMEM_SEEN_INODES) {
4705 sbinfo->max_inodes = ctx->inodes;
4706 sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
4707 }
4708
4709 /*
4710 * Preserve previous mempolicy unless mpol remount option was specified.
4711 */
4712 if (ctx->mpol) {
4713 mpol = sbinfo->mpol;
4714 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
4715 ctx->mpol = NULL;
4716 }
4717
4718 if (ctx->noswap)
4719 sbinfo->noswap = true;
4720
4721 raw_spin_unlock(&sbinfo->stat_lock);
4722 mpol_put(mpol);
4723 return 0;
4724out:
4725 raw_spin_unlock(&sbinfo->stat_lock);
4726 return invalfc(fc, "%s", err);
4727}
4728
4729static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4730{
4731 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4732 struct mempolicy *mpol;
4733
4734 if (sbinfo->max_blocks != shmem_default_max_blocks())
4735 seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
4736 if (sbinfo->max_inodes != shmem_default_max_inodes())
4737 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4738 if (sbinfo->mode != (0777 | S_ISVTX))
4739 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4740 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4741 seq_printf(seq, ",uid=%u",
4742 from_kuid_munged(&init_user_ns, sbinfo->uid));
4743 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4744 seq_printf(seq, ",gid=%u",
4745 from_kgid_munged(&init_user_ns, sbinfo->gid));
4746
4747 /*
4748 * Showing inode{64,32} might be useful even if it's the system default,
4749 * since then people don't have to resort to checking both here and
4750 * /proc/config.gz to confirm 64-bit inums were successfully applied
4751 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4752 *
4753 * We hide it when inode64 isn't the default and we are using 32-bit
4754 * inodes, since that probably just means the feature isn't even under
4755 * consideration.
4756 *
4757 * As such:
4758 *
4759 * +-----------------+-----------------+
4760 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
4761 * +------------------+-----------------+-----------------+
4762 * | full_inums=true | show | show |
4763 * | full_inums=false | show | hide |
4764 * +------------------+-----------------+-----------------+
4765 *
4766 */
4767 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4768 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4769#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4770 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4771 if (sbinfo->huge)
4772 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4773#endif
4774 mpol = shmem_get_sbmpol(sbinfo);
4775 shmem_show_mpol(seq, mpol);
4776 mpol_put(mpol);
4777 if (sbinfo->noswap)
4778 seq_printf(seq, ",noswap");
4779#ifdef CONFIG_TMPFS_QUOTA
4780 if (sb_has_quota_active(root->d_sb, USRQUOTA))
4781 seq_printf(seq, ",usrquota");
4782 if (sb_has_quota_active(root->d_sb, GRPQUOTA))
4783 seq_printf(seq, ",grpquota");
4784 if (sbinfo->qlimits.usrquota_bhardlimit)
4785 seq_printf(seq, ",usrquota_block_hardlimit=%lld",
4786 sbinfo->qlimits.usrquota_bhardlimit);
4787 if (sbinfo->qlimits.grpquota_bhardlimit)
4788 seq_printf(seq, ",grpquota_block_hardlimit=%lld",
4789 sbinfo->qlimits.grpquota_bhardlimit);
4790 if (sbinfo->qlimits.usrquota_ihardlimit)
4791 seq_printf(seq, ",usrquota_inode_hardlimit=%lld",
4792 sbinfo->qlimits.usrquota_ihardlimit);
4793 if (sbinfo->qlimits.grpquota_ihardlimit)
4794 seq_printf(seq, ",grpquota_inode_hardlimit=%lld",
4795 sbinfo->qlimits.grpquota_ihardlimit);
4796#endif
4797 return 0;
4798}
4799
4800#endif /* CONFIG_TMPFS */
4801
4802static void shmem_put_super(struct super_block *sb)
4803{
4804 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4805
4806#if IS_ENABLED(CONFIG_UNICODE)
4807 if (sb->s_encoding)
4808 utf8_unload(sb->s_encoding);
4809#endif
4810
4811#ifdef CONFIG_TMPFS_QUOTA
4812 shmem_disable_quotas(sb);
4813#endif
4814 free_percpu(sbinfo->ino_batch);
4815 percpu_counter_destroy(&sbinfo->used_blocks);
4816 mpol_put(sbinfo->mpol);
4817 kfree(sbinfo);
4818 sb->s_fs_info = NULL;
4819}
4820
4821#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_TMPFS)
4822static const struct dentry_operations shmem_ci_dentry_ops = {
4823 .d_hash = generic_ci_d_hash,
4824 .d_compare = generic_ci_d_compare,
4825 .d_delete = always_delete_dentry,
4826};
4827#endif
4828
4829static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
4830{
4831 struct shmem_options *ctx = fc->fs_private;
4832 struct inode *inode;
4833 struct shmem_sb_info *sbinfo;
4834 int error = -ENOMEM;
4835
4836 /* Round up to L1_CACHE_BYTES to resist false sharing */
4837 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
4838 L1_CACHE_BYTES), GFP_KERNEL);
4839 if (!sbinfo)
4840 return error;
4841
4842 sb->s_fs_info = sbinfo;
4843
4844#ifdef CONFIG_TMPFS
4845 /*
4846 * Per default we only allow half of the physical ram per
4847 * tmpfs instance, limiting inodes to one per page of lowmem;
4848 * but the internal instance is left unlimited.
4849 */
4850 if (!(sb->s_flags & SB_KERNMOUNT)) {
4851 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
4852 ctx->blocks = shmem_default_max_blocks();
4853 if (!(ctx->seen & SHMEM_SEEN_INODES))
4854 ctx->inodes = shmem_default_max_inodes();
4855 if (!(ctx->seen & SHMEM_SEEN_INUMS))
4856 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
4857 sbinfo->noswap = ctx->noswap;
4858 } else {
4859 sb->s_flags |= SB_NOUSER;
4860 }
4861 sb->s_export_op = &shmem_export_ops;
4862 sb->s_flags |= SB_NOSEC | SB_I_VERSION;
4863
4864#if IS_ENABLED(CONFIG_UNICODE)
4865 if (!ctx->encoding && ctx->strict_encoding) {
4866 pr_err("tmpfs: strict_encoding option without encoding is forbidden\n");
4867 error = -EINVAL;
4868 goto failed;
4869 }
4870
4871 if (ctx->encoding) {
4872 sb->s_encoding = ctx->encoding;
4873 sb->s_d_op = &shmem_ci_dentry_ops;
4874 if (ctx->strict_encoding)
4875 sb->s_encoding_flags = SB_ENC_STRICT_MODE_FL;
4876 }
4877#endif
4878
4879#else
4880 sb->s_flags |= SB_NOUSER;
4881#endif /* CONFIG_TMPFS */
4882 sbinfo->max_blocks = ctx->blocks;
4883 sbinfo->max_inodes = ctx->inodes;
4884 sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
4885 if (sb->s_flags & SB_KERNMOUNT) {
4886 sbinfo->ino_batch = alloc_percpu(ino_t);
4887 if (!sbinfo->ino_batch)
4888 goto failed;
4889 }
4890 sbinfo->uid = ctx->uid;
4891 sbinfo->gid = ctx->gid;
4892 sbinfo->full_inums = ctx->full_inums;
4893 sbinfo->mode = ctx->mode;
4894 sbinfo->huge = ctx->huge;
4895 sbinfo->mpol = ctx->mpol;
4896 ctx->mpol = NULL;
4897
4898 raw_spin_lock_init(&sbinfo->stat_lock);
4899 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
4900 goto failed;
4901 spin_lock_init(&sbinfo->shrinklist_lock);
4902 INIT_LIST_HEAD(&sbinfo->shrinklist);
4903
4904 sb->s_maxbytes = MAX_LFS_FILESIZE;
4905 sb->s_blocksize = PAGE_SIZE;
4906 sb->s_blocksize_bits = PAGE_SHIFT;
4907 sb->s_magic = TMPFS_MAGIC;
4908 sb->s_op = &shmem_ops;
4909 sb->s_time_gran = 1;
4910#ifdef CONFIG_TMPFS_XATTR
4911 sb->s_xattr = shmem_xattr_handlers;
4912#endif
4913#ifdef CONFIG_TMPFS_POSIX_ACL
4914 sb->s_flags |= SB_POSIXACL;
4915#endif
4916 uuid_t uuid;
4917 uuid_gen(&uuid);
4918 super_set_uuid(sb, uuid.b, sizeof(uuid));
4919
4920#ifdef CONFIG_TMPFS_QUOTA
4921 if (ctx->seen & SHMEM_SEEN_QUOTA) {
4922 sb->dq_op = &shmem_quota_operations;
4923 sb->s_qcop = &dquot_quotactl_sysfile_ops;
4924 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
4925
4926 /* Copy the default limits from ctx into sbinfo */
4927 memcpy(&sbinfo->qlimits, &ctx->qlimits,
4928 sizeof(struct shmem_quota_limits));
4929
4930 if (shmem_enable_quotas(sb, ctx->quota_types))
4931 goto failed;
4932 }
4933#endif /* CONFIG_TMPFS_QUOTA */
4934
4935 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
4936 S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
4937 if (IS_ERR(inode)) {
4938 error = PTR_ERR(inode);
4939 goto failed;
4940 }
4941 inode->i_uid = sbinfo->uid;
4942 inode->i_gid = sbinfo->gid;
4943 sb->s_root = d_make_root(inode);
4944 if (!sb->s_root)
4945 goto failed;
4946 return 0;
4947
4948failed:
4949 shmem_put_super(sb);
4950 return error;
4951}
4952
4953static int shmem_get_tree(struct fs_context *fc)
4954{
4955 return get_tree_nodev(fc, shmem_fill_super);
4956}
4957
4958static void shmem_free_fc(struct fs_context *fc)
4959{
4960 struct shmem_options *ctx = fc->fs_private;
4961
4962 if (ctx) {
4963 mpol_put(ctx->mpol);
4964 kfree(ctx);
4965 }
4966}
4967
4968static const struct fs_context_operations shmem_fs_context_ops = {
4969 .free = shmem_free_fc,
4970 .get_tree = shmem_get_tree,
4971#ifdef CONFIG_TMPFS
4972 .parse_monolithic = shmem_parse_options,
4973 .parse_param = shmem_parse_one,
4974 .reconfigure = shmem_reconfigure,
4975#endif
4976};
4977
4978static struct kmem_cache *shmem_inode_cachep __ro_after_init;
4979
4980static struct inode *shmem_alloc_inode(struct super_block *sb)
4981{
4982 struct shmem_inode_info *info;
4983 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
4984 if (!info)
4985 return NULL;
4986 return &info->vfs_inode;
4987}
4988
4989static void shmem_free_in_core_inode(struct inode *inode)
4990{
4991 if (S_ISLNK(inode->i_mode))
4992 kfree(inode->i_link);
4993 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4994}
4995
4996static void shmem_destroy_inode(struct inode *inode)
4997{
4998 if (S_ISREG(inode->i_mode))
4999 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
5000 if (S_ISDIR(inode->i_mode))
5001 simple_offset_destroy(shmem_get_offset_ctx(inode));
5002}
5003
5004static void shmem_init_inode(void *foo)
5005{
5006 struct shmem_inode_info *info = foo;
5007 inode_init_once(&info->vfs_inode);
5008}
5009
5010static void __init shmem_init_inodecache(void)
5011{
5012 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
5013 sizeof(struct shmem_inode_info),
5014 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
5015}
5016
5017static void __init shmem_destroy_inodecache(void)
5018{
5019 kmem_cache_destroy(shmem_inode_cachep);
5020}
5021
5022/* Keep the page in page cache instead of truncating it */
5023static int shmem_error_remove_folio(struct address_space *mapping,
5024 struct folio *folio)
5025{
5026 return 0;
5027}
5028
5029static const struct address_space_operations shmem_aops = {
5030 .writepage = shmem_writepage,
5031 .dirty_folio = noop_dirty_folio,
5032#ifdef CONFIG_TMPFS
5033 .write_begin = shmem_write_begin,
5034 .write_end = shmem_write_end,
5035#endif
5036#ifdef CONFIG_MIGRATION
5037 .migrate_folio = migrate_folio,
5038#endif
5039 .error_remove_folio = shmem_error_remove_folio,
5040};
5041
5042static const struct file_operations shmem_file_operations = {
5043 .mmap = shmem_mmap,
5044 .open = shmem_file_open,
5045 .get_unmapped_area = shmem_get_unmapped_area,
5046#ifdef CONFIG_TMPFS
5047 .llseek = shmem_file_llseek,
5048 .read_iter = shmem_file_read_iter,
5049 .write_iter = shmem_file_write_iter,
5050 .fsync = noop_fsync,
5051 .splice_read = shmem_file_splice_read,
5052 .splice_write = iter_file_splice_write,
5053 .fallocate = shmem_fallocate,
5054#endif
5055};
5056
5057static const struct inode_operations shmem_inode_operations = {
5058 .getattr = shmem_getattr,
5059 .setattr = shmem_setattr,
5060#ifdef CONFIG_TMPFS_XATTR
5061 .listxattr = shmem_listxattr,
5062 .set_acl = simple_set_acl,
5063 .fileattr_get = shmem_fileattr_get,
5064 .fileattr_set = shmem_fileattr_set,
5065#endif
5066};
5067
5068static const struct inode_operations shmem_dir_inode_operations = {
5069#ifdef CONFIG_TMPFS
5070 .getattr = shmem_getattr,
5071 .create = shmem_create,
5072 .lookup = simple_lookup,
5073 .link = shmem_link,
5074 .unlink = shmem_unlink,
5075 .symlink = shmem_symlink,
5076 .mkdir = shmem_mkdir,
5077 .rmdir = shmem_rmdir,
5078 .mknod = shmem_mknod,
5079 .rename = shmem_rename2,
5080 .tmpfile = shmem_tmpfile,
5081 .get_offset_ctx = shmem_get_offset_ctx,
5082#endif
5083#ifdef CONFIG_TMPFS_XATTR
5084 .listxattr = shmem_listxattr,
5085 .fileattr_get = shmem_fileattr_get,
5086 .fileattr_set = shmem_fileattr_set,
5087#endif
5088#ifdef CONFIG_TMPFS_POSIX_ACL
5089 .setattr = shmem_setattr,
5090 .set_acl = simple_set_acl,
5091#endif
5092};
5093
5094static const struct inode_operations shmem_special_inode_operations = {
5095 .getattr = shmem_getattr,
5096#ifdef CONFIG_TMPFS_XATTR
5097 .listxattr = shmem_listxattr,
5098#endif
5099#ifdef CONFIG_TMPFS_POSIX_ACL
5100 .setattr = shmem_setattr,
5101 .set_acl = simple_set_acl,
5102#endif
5103};
5104
5105static const struct super_operations shmem_ops = {
5106 .alloc_inode = shmem_alloc_inode,
5107 .free_inode = shmem_free_in_core_inode,
5108 .destroy_inode = shmem_destroy_inode,
5109#ifdef CONFIG_TMPFS
5110 .statfs = shmem_statfs,
5111 .show_options = shmem_show_options,
5112#endif
5113#ifdef CONFIG_TMPFS_QUOTA
5114 .get_dquots = shmem_get_dquots,
5115#endif
5116 .evict_inode = shmem_evict_inode,
5117 .drop_inode = generic_delete_inode,
5118 .put_super = shmem_put_super,
5119#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5120 .nr_cached_objects = shmem_unused_huge_count,
5121 .free_cached_objects = shmem_unused_huge_scan,
5122#endif
5123};
5124
5125static const struct vm_operations_struct shmem_vm_ops = {
5126 .fault = shmem_fault,
5127 .map_pages = filemap_map_pages,
5128#ifdef CONFIG_NUMA
5129 .set_policy = shmem_set_policy,
5130 .get_policy = shmem_get_policy,
5131#endif
5132};
5133
5134static const struct vm_operations_struct shmem_anon_vm_ops = {
5135 .fault = shmem_fault,
5136 .map_pages = filemap_map_pages,
5137#ifdef CONFIG_NUMA
5138 .set_policy = shmem_set_policy,
5139 .get_policy = shmem_get_policy,
5140#endif
5141};
5142
5143int shmem_init_fs_context(struct fs_context *fc)
5144{
5145 struct shmem_options *ctx;
5146
5147 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
5148 if (!ctx)
5149 return -ENOMEM;
5150
5151 ctx->mode = 0777 | S_ISVTX;
5152 ctx->uid = current_fsuid();
5153 ctx->gid = current_fsgid();
5154
5155#if IS_ENABLED(CONFIG_UNICODE)
5156 ctx->encoding = NULL;
5157#endif
5158
5159 fc->fs_private = ctx;
5160 fc->ops = &shmem_fs_context_ops;
5161 return 0;
5162}
5163
5164static struct file_system_type shmem_fs_type = {
5165 .owner = THIS_MODULE,
5166 .name = "tmpfs",
5167 .init_fs_context = shmem_init_fs_context,
5168#ifdef CONFIG_TMPFS
5169 .parameters = shmem_fs_parameters,
5170#endif
5171 .kill_sb = kill_litter_super,
5172 .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP | FS_MGTIME,
5173};
5174
5175#if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5176
5177#define __INIT_KOBJ_ATTR(_name, _mode, _show, _store) \
5178{ \
5179 .attr = { .name = __stringify(_name), .mode = _mode }, \
5180 .show = _show, \
5181 .store = _store, \
5182}
5183
5184#define TMPFS_ATTR_W(_name, _store) \
5185 static struct kobj_attribute tmpfs_attr_##_name = \
5186 __INIT_KOBJ_ATTR(_name, 0200, NULL, _store)
5187
5188#define TMPFS_ATTR_RW(_name, _show, _store) \
5189 static struct kobj_attribute tmpfs_attr_##_name = \
5190 __INIT_KOBJ_ATTR(_name, 0644, _show, _store)
5191
5192#define TMPFS_ATTR_RO(_name, _show) \
5193 static struct kobj_attribute tmpfs_attr_##_name = \
5194 __INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
5195
5196#if IS_ENABLED(CONFIG_UNICODE)
5197static ssize_t casefold_show(struct kobject *kobj, struct kobj_attribute *a,
5198 char *buf)
5199{
5200 return sysfs_emit(buf, "supported\n");
5201}
5202TMPFS_ATTR_RO(casefold, casefold_show);
5203#endif
5204
5205static struct attribute *tmpfs_attributes[] = {
5206#if IS_ENABLED(CONFIG_UNICODE)
5207 &tmpfs_attr_casefold.attr,
5208#endif
5209 NULL
5210};
5211
5212static const struct attribute_group tmpfs_attribute_group = {
5213 .attrs = tmpfs_attributes,
5214 .name = "features"
5215};
5216
5217static struct kobject *tmpfs_kobj;
5218
5219static int __init tmpfs_sysfs_init(void)
5220{
5221 int ret;
5222
5223 tmpfs_kobj = kobject_create_and_add("tmpfs", fs_kobj);
5224 if (!tmpfs_kobj)
5225 return -ENOMEM;
5226
5227 ret = sysfs_create_group(tmpfs_kobj, &tmpfs_attribute_group);
5228 if (ret)
5229 kobject_put(tmpfs_kobj);
5230
5231 return ret;
5232}
5233#endif /* CONFIG_SYSFS && CONFIG_TMPFS */
5234
5235void __init shmem_init(void)
5236{
5237 int error;
5238
5239 shmem_init_inodecache();
5240
5241#ifdef CONFIG_TMPFS_QUOTA
5242 register_quota_format(&shmem_quota_format);
5243#endif
5244
5245 error = register_filesystem(&shmem_fs_type);
5246 if (error) {
5247 pr_err("Could not register tmpfs\n");
5248 goto out2;
5249 }
5250
5251 shm_mnt = kern_mount(&shmem_fs_type);
5252 if (IS_ERR(shm_mnt)) {
5253 error = PTR_ERR(shm_mnt);
5254 pr_err("Could not kern_mount tmpfs\n");
5255 goto out1;
5256 }
5257
5258#if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5259 error = tmpfs_sysfs_init();
5260 if (error) {
5261 pr_err("Could not init tmpfs sysfs\n");
5262 goto out1;
5263 }
5264#endif
5265
5266#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5267 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
5268 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5269 else
5270 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
5271
5272 /*
5273 * Default to setting PMD-sized THP to inherit the global setting and
5274 * disable all other multi-size THPs.
5275 */
5276 if (!shmem_orders_configured)
5277 huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER);
5278#endif
5279 return;
5280
5281out1:
5282 unregister_filesystem(&shmem_fs_type);
5283out2:
5284#ifdef CONFIG_TMPFS_QUOTA
5285 unregister_quota_format(&shmem_quota_format);
5286#endif
5287 shmem_destroy_inodecache();
5288 shm_mnt = ERR_PTR(error);
5289}
5290
5291#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
5292static ssize_t shmem_enabled_show(struct kobject *kobj,
5293 struct kobj_attribute *attr, char *buf)
5294{
5295 static const int values[] = {
5296 SHMEM_HUGE_ALWAYS,
5297 SHMEM_HUGE_WITHIN_SIZE,
5298 SHMEM_HUGE_ADVISE,
5299 SHMEM_HUGE_NEVER,
5300 SHMEM_HUGE_DENY,
5301 SHMEM_HUGE_FORCE,
5302 };
5303 int len = 0;
5304 int i;
5305
5306 for (i = 0; i < ARRAY_SIZE(values); i++) {
5307 len += sysfs_emit_at(buf, len,
5308 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
5309 i ? " " : "", shmem_format_huge(values[i]));
5310 }
5311 len += sysfs_emit_at(buf, len, "\n");
5312
5313 return len;
5314}
5315
5316static ssize_t shmem_enabled_store(struct kobject *kobj,
5317 struct kobj_attribute *attr, const char *buf, size_t count)
5318{
5319 char tmp[16];
5320 int huge, err;
5321
5322 if (count + 1 > sizeof(tmp))
5323 return -EINVAL;
5324 memcpy(tmp, buf, count);
5325 tmp[count] = '\0';
5326 if (count && tmp[count - 1] == '\n')
5327 tmp[count - 1] = '\0';
5328
5329 huge = shmem_parse_huge(tmp);
5330 if (huge == -EINVAL)
5331 return huge;
5332
5333 shmem_huge = huge;
5334 if (shmem_huge > SHMEM_HUGE_DENY)
5335 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5336
5337 err = start_stop_khugepaged();
5338 return err ? err : count;
5339}
5340
5341struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
5342static DEFINE_SPINLOCK(huge_shmem_orders_lock);
5343
5344static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
5345 struct kobj_attribute *attr, char *buf)
5346{
5347 int order = to_thpsize(kobj)->order;
5348 const char *output;
5349
5350 if (test_bit(order, &huge_shmem_orders_always))
5351 output = "[always] inherit within_size advise never";
5352 else if (test_bit(order, &huge_shmem_orders_inherit))
5353 output = "always [inherit] within_size advise never";
5354 else if (test_bit(order, &huge_shmem_orders_within_size))
5355 output = "always inherit [within_size] advise never";
5356 else if (test_bit(order, &huge_shmem_orders_madvise))
5357 output = "always inherit within_size [advise] never";
5358 else
5359 output = "always inherit within_size advise [never]";
5360
5361 return sysfs_emit(buf, "%s\n", output);
5362}
5363
5364static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
5365 struct kobj_attribute *attr,
5366 const char *buf, size_t count)
5367{
5368 int order = to_thpsize(kobj)->order;
5369 ssize_t ret = count;
5370
5371 if (sysfs_streq(buf, "always")) {
5372 spin_lock(&huge_shmem_orders_lock);
5373 clear_bit(order, &huge_shmem_orders_inherit);
5374 clear_bit(order, &huge_shmem_orders_madvise);
5375 clear_bit(order, &huge_shmem_orders_within_size);
5376 set_bit(order, &huge_shmem_orders_always);
5377 spin_unlock(&huge_shmem_orders_lock);
5378 } else if (sysfs_streq(buf, "inherit")) {
5379 /* Do not override huge allocation policy with non-PMD sized mTHP */
5380 if (shmem_huge == SHMEM_HUGE_FORCE &&
5381 order != HPAGE_PMD_ORDER)
5382 return -EINVAL;
5383
5384 spin_lock(&huge_shmem_orders_lock);
5385 clear_bit(order, &huge_shmem_orders_always);
5386 clear_bit(order, &huge_shmem_orders_madvise);
5387 clear_bit(order, &huge_shmem_orders_within_size);
5388 set_bit(order, &huge_shmem_orders_inherit);
5389 spin_unlock(&huge_shmem_orders_lock);
5390 } else if (sysfs_streq(buf, "within_size")) {
5391 spin_lock(&huge_shmem_orders_lock);
5392 clear_bit(order, &huge_shmem_orders_always);
5393 clear_bit(order, &huge_shmem_orders_inherit);
5394 clear_bit(order, &huge_shmem_orders_madvise);
5395 set_bit(order, &huge_shmem_orders_within_size);
5396 spin_unlock(&huge_shmem_orders_lock);
5397 } else if (sysfs_streq(buf, "advise")) {
5398 spin_lock(&huge_shmem_orders_lock);
5399 clear_bit(order, &huge_shmem_orders_always);
5400 clear_bit(order, &huge_shmem_orders_inherit);
5401 clear_bit(order, &huge_shmem_orders_within_size);
5402 set_bit(order, &huge_shmem_orders_madvise);
5403 spin_unlock(&huge_shmem_orders_lock);
5404 } else if (sysfs_streq(buf, "never")) {
5405 spin_lock(&huge_shmem_orders_lock);
5406 clear_bit(order, &huge_shmem_orders_always);
5407 clear_bit(order, &huge_shmem_orders_inherit);
5408 clear_bit(order, &huge_shmem_orders_within_size);
5409 clear_bit(order, &huge_shmem_orders_madvise);
5410 spin_unlock(&huge_shmem_orders_lock);
5411 } else {
5412 ret = -EINVAL;
5413 }
5414
5415 if (ret > 0) {
5416 int err = start_stop_khugepaged();
5417
5418 if (err)
5419 ret = err;
5420 }
5421 return ret;
5422}
5423
5424struct kobj_attribute thpsize_shmem_enabled_attr =
5425 __ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store);
5426#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
5427
5428#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
5429
5430static int __init setup_transparent_hugepage_shmem(char *str)
5431{
5432 int huge;
5433
5434 huge = shmem_parse_huge(str);
5435 if (huge == -EINVAL) {
5436 pr_warn("transparent_hugepage_shmem= cannot parse, ignored\n");
5437 return huge;
5438 }
5439
5440 shmem_huge = huge;
5441 return 1;
5442}
5443__setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem);
5444
5445static char str_dup[PAGE_SIZE] __initdata;
5446static int __init setup_thp_shmem(char *str)
5447{
5448 char *token, *range, *policy, *subtoken;
5449 unsigned long always, inherit, madvise, within_size;
5450 char *start_size, *end_size;
5451 int start, end, nr;
5452 char *p;
5453
5454 if (!str || strlen(str) + 1 > PAGE_SIZE)
5455 goto err;
5456 strscpy(str_dup, str);
5457
5458 always = huge_shmem_orders_always;
5459 inherit = huge_shmem_orders_inherit;
5460 madvise = huge_shmem_orders_madvise;
5461 within_size = huge_shmem_orders_within_size;
5462 p = str_dup;
5463 while ((token = strsep(&p, ";")) != NULL) {
5464 range = strsep(&token, ":");
5465 policy = token;
5466
5467 if (!policy)
5468 goto err;
5469
5470 while ((subtoken = strsep(&range, ",")) != NULL) {
5471 if (strchr(subtoken, '-')) {
5472 start_size = strsep(&subtoken, "-");
5473 end_size = subtoken;
5474
5475 start = get_order_from_str(start_size,
5476 THP_ORDERS_ALL_FILE_DEFAULT);
5477 end = get_order_from_str(end_size,
5478 THP_ORDERS_ALL_FILE_DEFAULT);
5479 } else {
5480 start_size = end_size = subtoken;
5481 start = end = get_order_from_str(subtoken,
5482 THP_ORDERS_ALL_FILE_DEFAULT);
5483 }
5484
5485 if (start == -EINVAL) {
5486 pr_err("invalid size %s in thp_shmem boot parameter\n",
5487 start_size);
5488 goto err;
5489 }
5490
5491 if (end == -EINVAL) {
5492 pr_err("invalid size %s in thp_shmem boot parameter\n",
5493 end_size);
5494 goto err;
5495 }
5496
5497 if (start < 0 || end < 0 || start > end)
5498 goto err;
5499
5500 nr = end - start + 1;
5501 if (!strcmp(policy, "always")) {
5502 bitmap_set(&always, start, nr);
5503 bitmap_clear(&inherit, start, nr);
5504 bitmap_clear(&madvise, start, nr);
5505 bitmap_clear(&within_size, start, nr);
5506 } else if (!strcmp(policy, "advise")) {
5507 bitmap_set(&madvise, start, nr);
5508 bitmap_clear(&inherit, start, nr);
5509 bitmap_clear(&always, start, nr);
5510 bitmap_clear(&within_size, start, nr);
5511 } else if (!strcmp(policy, "inherit")) {
5512 bitmap_set(&inherit, start, nr);
5513 bitmap_clear(&madvise, start, nr);
5514 bitmap_clear(&always, start, nr);
5515 bitmap_clear(&within_size, start, nr);
5516 } else if (!strcmp(policy, "within_size")) {
5517 bitmap_set(&within_size, start, nr);
5518 bitmap_clear(&inherit, start, nr);
5519 bitmap_clear(&madvise, start, nr);
5520 bitmap_clear(&always, start, nr);
5521 } else if (!strcmp(policy, "never")) {
5522 bitmap_clear(&inherit, start, nr);
5523 bitmap_clear(&madvise, start, nr);
5524 bitmap_clear(&always, start, nr);
5525 bitmap_clear(&within_size, start, nr);
5526 } else {
5527 pr_err("invalid policy %s in thp_shmem boot parameter\n", policy);
5528 goto err;
5529 }
5530 }
5531 }
5532
5533 huge_shmem_orders_always = always;
5534 huge_shmem_orders_madvise = madvise;
5535 huge_shmem_orders_inherit = inherit;
5536 huge_shmem_orders_within_size = within_size;
5537 shmem_orders_configured = true;
5538 return 1;
5539
5540err:
5541 pr_warn("thp_shmem=%s: error parsing string, ignoring setting\n", str);
5542 return 0;
5543}
5544__setup("thp_shmem=", setup_thp_shmem);
5545
5546#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5547
5548#else /* !CONFIG_SHMEM */
5549
5550/*
5551 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
5552 *
5553 * This is intended for small system where the benefits of the full
5554 * shmem code (swap-backed and resource-limited) are outweighed by
5555 * their complexity. On systems without swap this code should be
5556 * effectively equivalent, but much lighter weight.
5557 */
5558
5559static struct file_system_type shmem_fs_type = {
5560 .name = "tmpfs",
5561 .init_fs_context = ramfs_init_fs_context,
5562 .parameters = ramfs_fs_parameters,
5563 .kill_sb = ramfs_kill_sb,
5564 .fs_flags = FS_USERNS_MOUNT,
5565};
5566
5567void __init shmem_init(void)
5568{
5569 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
5570
5571 shm_mnt = kern_mount(&shmem_fs_type);
5572 BUG_ON(IS_ERR(shm_mnt));
5573}
5574
5575int shmem_unuse(unsigned int type)
5576{
5577 return 0;
5578}
5579
5580int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
5581{
5582 return 0;
5583}
5584
5585void shmem_unlock_mapping(struct address_space *mapping)
5586{
5587}
5588
5589#ifdef CONFIG_MMU
5590unsigned long shmem_get_unmapped_area(struct file *file,
5591 unsigned long addr, unsigned long len,
5592 unsigned long pgoff, unsigned long flags)
5593{
5594 return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
5595}
5596#endif
5597
5598void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5599{
5600 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5601}
5602EXPORT_SYMBOL_GPL(shmem_truncate_range);
5603
5604#define shmem_vm_ops generic_file_vm_ops
5605#define shmem_anon_vm_ops generic_file_vm_ops
5606#define shmem_file_operations ramfs_file_operations
5607#define shmem_acct_size(flags, size) 0
5608#define shmem_unacct_size(flags, size) do {} while (0)
5609
5610static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
5611 struct super_block *sb, struct inode *dir,
5612 umode_t mode, dev_t dev, unsigned long flags)
5613{
5614 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
5615 return inode ? inode : ERR_PTR(-ENOSPC);
5616}
5617
5618#endif /* CONFIG_SHMEM */
5619
5620/* common code */
5621
5622static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
5623 loff_t size, unsigned long flags, unsigned int i_flags)
5624{
5625 struct inode *inode;
5626 struct file *res;
5627
5628 if (IS_ERR(mnt))
5629 return ERR_CAST(mnt);
5630
5631 if (size < 0 || size > MAX_LFS_FILESIZE)
5632 return ERR_PTR(-EINVAL);
5633
5634 if (shmem_acct_size(flags, size))
5635 return ERR_PTR(-ENOMEM);
5636
5637 if (is_idmapped_mnt(mnt))
5638 return ERR_PTR(-EINVAL);
5639
5640 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
5641 S_IFREG | S_IRWXUGO, 0, flags);
5642 if (IS_ERR(inode)) {
5643 shmem_unacct_size(flags, size);
5644 return ERR_CAST(inode);
5645 }
5646 inode->i_flags |= i_flags;
5647 inode->i_size = size;
5648 clear_nlink(inode); /* It is unlinked */
5649 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
5650 if (!IS_ERR(res))
5651 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
5652 &shmem_file_operations);
5653 if (IS_ERR(res))
5654 iput(inode);
5655 return res;
5656}
5657
5658/**
5659 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
5660 * kernel internal. There will be NO LSM permission checks against the
5661 * underlying inode. So users of this interface must do LSM checks at a
5662 * higher layer. The users are the big_key and shm implementations. LSM
5663 * checks are provided at the key or shm level rather than the inode.
5664 * @name: name for dentry (to be seen in /proc/<pid>/maps
5665 * @size: size to be set for the file
5666 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5667 */
5668struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
5669{
5670 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
5671}
5672EXPORT_SYMBOL_GPL(shmem_kernel_file_setup);
5673
5674/**
5675 * shmem_file_setup - get an unlinked file living in tmpfs
5676 * @name: name for dentry (to be seen in /proc/<pid>/maps
5677 * @size: size to be set for the file
5678 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5679 */
5680struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
5681{
5682 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
5683}
5684EXPORT_SYMBOL_GPL(shmem_file_setup);
5685
5686/**
5687 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
5688 * @mnt: the tmpfs mount where the file will be created
5689 * @name: name for dentry (to be seen in /proc/<pid>/maps
5690 * @size: size to be set for the file
5691 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5692 */
5693struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
5694 loff_t size, unsigned long flags)
5695{
5696 return __shmem_file_setup(mnt, name, size, flags, 0);
5697}
5698EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
5699
5700/**
5701 * shmem_zero_setup - setup a shared anonymous mapping
5702 * @vma: the vma to be mmapped is prepared by do_mmap
5703 */
5704int shmem_zero_setup(struct vm_area_struct *vma)
5705{
5706 struct file *file;
5707 loff_t size = vma->vm_end - vma->vm_start;
5708
5709 /*
5710 * Cloning a new file under mmap_lock leads to a lock ordering conflict
5711 * between XFS directory reading and selinux: since this file is only
5712 * accessible to the user through its mapping, use S_PRIVATE flag to
5713 * bypass file security, in the same way as shmem_kernel_file_setup().
5714 */
5715 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
5716 if (IS_ERR(file))
5717 return PTR_ERR(file);
5718
5719 if (vma->vm_file)
5720 fput(vma->vm_file);
5721 vma->vm_file = file;
5722 vma->vm_ops = &shmem_anon_vm_ops;
5723
5724 return 0;
5725}
5726
5727/**
5728 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
5729 * @mapping: the folio's address_space
5730 * @index: the folio index
5731 * @gfp: the page allocator flags to use if allocating
5732 *
5733 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
5734 * with any new page allocations done using the specified allocation flags.
5735 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
5736 * suit tmpfs, since it may have pages in swapcache, and needs to find those
5737 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
5738 *
5739 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
5740 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
5741 */
5742struct folio *shmem_read_folio_gfp(struct address_space *mapping,
5743 pgoff_t index, gfp_t gfp)
5744{
5745#ifdef CONFIG_SHMEM
5746 struct inode *inode = mapping->host;
5747 struct folio *folio;
5748 int error;
5749
5750 error = shmem_get_folio_gfp(inode, index, 0, &folio, SGP_CACHE,
5751 gfp, NULL, NULL);
5752 if (error)
5753 return ERR_PTR(error);
5754
5755 folio_unlock(folio);
5756 return folio;
5757#else
5758 /*
5759 * The tiny !SHMEM case uses ramfs without swap
5760 */
5761 return mapping_read_folio_gfp(mapping, index, gfp);
5762#endif
5763}
5764EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
5765
5766struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
5767 pgoff_t index, gfp_t gfp)
5768{
5769 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
5770 struct page *page;
5771
5772 if (IS_ERR(folio))
5773 return &folio->page;
5774
5775 page = folio_file_page(folio, index);
5776 if (PageHWPoison(page)) {
5777 folio_put(folio);
5778 return ERR_PTR(-EIO);
5779 }
5780
5781 return page;
5782}
5783EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
28#include <linux/ramfs.h>
29#include <linux/pagemap.h>
30#include <linux/file.h>
31#include <linux/mm.h>
32#include <linux/random.h>
33#include <linux/sched/signal.h>
34#include <linux/export.h>
35#include <linux/swap.h>
36#include <linux/uio.h>
37#include <linux/khugepaged.h>
38#include <linux/hugetlb.h>
39#include <linux/frontswap.h>
40#include <linux/fs_parser.h>
41
42#include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
43
44static struct vfsmount *shm_mnt;
45
46#ifdef CONFIG_SHMEM
47/*
48 * This virtual memory filesystem is heavily based on the ramfs. It
49 * extends ramfs by the ability to use swap and honor resource limits
50 * which makes it a completely usable filesystem.
51 */
52
53#include <linux/xattr.h>
54#include <linux/exportfs.h>
55#include <linux/posix_acl.h>
56#include <linux/posix_acl_xattr.h>
57#include <linux/mman.h>
58#include <linux/string.h>
59#include <linux/slab.h>
60#include <linux/backing-dev.h>
61#include <linux/shmem_fs.h>
62#include <linux/writeback.h>
63#include <linux/blkdev.h>
64#include <linux/pagevec.h>
65#include <linux/percpu_counter.h>
66#include <linux/falloc.h>
67#include <linux/splice.h>
68#include <linux/security.h>
69#include <linux/swapops.h>
70#include <linux/mempolicy.h>
71#include <linux/namei.h>
72#include <linux/ctype.h>
73#include <linux/migrate.h>
74#include <linux/highmem.h>
75#include <linux/seq_file.h>
76#include <linux/magic.h>
77#include <linux/syscalls.h>
78#include <linux/fcntl.h>
79#include <uapi/linux/memfd.h>
80#include <linux/userfaultfd_k.h>
81#include <linux/rmap.h>
82#include <linux/uuid.h>
83
84#include <linux/uaccess.h>
85#include <asm/pgtable.h>
86
87#include "internal.h"
88
89#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
90#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
91
92/* Pretend that each entry is of this size in directory's i_size */
93#define BOGO_DIRENT_SIZE 20
94
95/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
96#define SHORT_SYMLINK_LEN 128
97
98/*
99 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
100 * inode->i_private (with i_mutex making sure that it has only one user at
101 * a time): we would prefer not to enlarge the shmem inode just for that.
102 */
103struct shmem_falloc {
104 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
105 pgoff_t start; /* start of range currently being fallocated */
106 pgoff_t next; /* the next page offset to be fallocated */
107 pgoff_t nr_falloced; /* how many new pages have been fallocated */
108 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
109};
110
111struct shmem_options {
112 unsigned long long blocks;
113 unsigned long long inodes;
114 struct mempolicy *mpol;
115 kuid_t uid;
116 kgid_t gid;
117 umode_t mode;
118 int huge;
119 int seen;
120#define SHMEM_SEEN_BLOCKS 1
121#define SHMEM_SEEN_INODES 2
122#define SHMEM_SEEN_HUGE 4
123};
124
125#ifdef CONFIG_TMPFS
126static unsigned long shmem_default_max_blocks(void)
127{
128 return totalram_pages() / 2;
129}
130
131static unsigned long shmem_default_max_inodes(void)
132{
133 unsigned long nr_pages = totalram_pages();
134
135 return min(nr_pages - totalhigh_pages(), nr_pages / 2);
136}
137#endif
138
139static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
140static int shmem_replace_page(struct page **pagep, gfp_t gfp,
141 struct shmem_inode_info *info, pgoff_t index);
142static int shmem_swapin_page(struct inode *inode, pgoff_t index,
143 struct page **pagep, enum sgp_type sgp,
144 gfp_t gfp, struct vm_area_struct *vma,
145 vm_fault_t *fault_type);
146static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
147 struct page **pagep, enum sgp_type sgp,
148 gfp_t gfp, struct vm_area_struct *vma,
149 struct vm_fault *vmf, vm_fault_t *fault_type);
150
151int shmem_getpage(struct inode *inode, pgoff_t index,
152 struct page **pagep, enum sgp_type sgp)
153{
154 return shmem_getpage_gfp(inode, index, pagep, sgp,
155 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
156}
157
158static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
159{
160 return sb->s_fs_info;
161}
162
163/*
164 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
165 * for shared memory and for shared anonymous (/dev/zero) mappings
166 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
167 * consistent with the pre-accounting of private mappings ...
168 */
169static inline int shmem_acct_size(unsigned long flags, loff_t size)
170{
171 return (flags & VM_NORESERVE) ?
172 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
173}
174
175static inline void shmem_unacct_size(unsigned long flags, loff_t size)
176{
177 if (!(flags & VM_NORESERVE))
178 vm_unacct_memory(VM_ACCT(size));
179}
180
181static inline int shmem_reacct_size(unsigned long flags,
182 loff_t oldsize, loff_t newsize)
183{
184 if (!(flags & VM_NORESERVE)) {
185 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
186 return security_vm_enough_memory_mm(current->mm,
187 VM_ACCT(newsize) - VM_ACCT(oldsize));
188 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
189 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
190 }
191 return 0;
192}
193
194/*
195 * ... whereas tmpfs objects are accounted incrementally as
196 * pages are allocated, in order to allow large sparse files.
197 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
198 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
199 */
200static inline int shmem_acct_block(unsigned long flags, long pages)
201{
202 if (!(flags & VM_NORESERVE))
203 return 0;
204
205 return security_vm_enough_memory_mm(current->mm,
206 pages * VM_ACCT(PAGE_SIZE));
207}
208
209static inline void shmem_unacct_blocks(unsigned long flags, long pages)
210{
211 if (flags & VM_NORESERVE)
212 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
213}
214
215static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
216{
217 struct shmem_inode_info *info = SHMEM_I(inode);
218 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
219
220 if (shmem_acct_block(info->flags, pages))
221 return false;
222
223 if (sbinfo->max_blocks) {
224 if (percpu_counter_compare(&sbinfo->used_blocks,
225 sbinfo->max_blocks - pages) > 0)
226 goto unacct;
227 percpu_counter_add(&sbinfo->used_blocks, pages);
228 }
229
230 return true;
231
232unacct:
233 shmem_unacct_blocks(info->flags, pages);
234 return false;
235}
236
237static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
238{
239 struct shmem_inode_info *info = SHMEM_I(inode);
240 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
241
242 if (sbinfo->max_blocks)
243 percpu_counter_sub(&sbinfo->used_blocks, pages);
244 shmem_unacct_blocks(info->flags, pages);
245}
246
247static const struct super_operations shmem_ops;
248static const struct address_space_operations shmem_aops;
249static const struct file_operations shmem_file_operations;
250static const struct inode_operations shmem_inode_operations;
251static const struct inode_operations shmem_dir_inode_operations;
252static const struct inode_operations shmem_special_inode_operations;
253static const struct vm_operations_struct shmem_vm_ops;
254static struct file_system_type shmem_fs_type;
255
256bool vma_is_shmem(struct vm_area_struct *vma)
257{
258 return vma->vm_ops == &shmem_vm_ops;
259}
260
261static LIST_HEAD(shmem_swaplist);
262static DEFINE_MUTEX(shmem_swaplist_mutex);
263
264static int shmem_reserve_inode(struct super_block *sb)
265{
266 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
267 if (sbinfo->max_inodes) {
268 spin_lock(&sbinfo->stat_lock);
269 if (!sbinfo->free_inodes) {
270 spin_unlock(&sbinfo->stat_lock);
271 return -ENOSPC;
272 }
273 sbinfo->free_inodes--;
274 spin_unlock(&sbinfo->stat_lock);
275 }
276 return 0;
277}
278
279static void shmem_free_inode(struct super_block *sb)
280{
281 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
282 if (sbinfo->max_inodes) {
283 spin_lock(&sbinfo->stat_lock);
284 sbinfo->free_inodes++;
285 spin_unlock(&sbinfo->stat_lock);
286 }
287}
288
289/**
290 * shmem_recalc_inode - recalculate the block usage of an inode
291 * @inode: inode to recalc
292 *
293 * We have to calculate the free blocks since the mm can drop
294 * undirtied hole pages behind our back.
295 *
296 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
297 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
298 *
299 * It has to be called with the spinlock held.
300 */
301static void shmem_recalc_inode(struct inode *inode)
302{
303 struct shmem_inode_info *info = SHMEM_I(inode);
304 long freed;
305
306 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
307 if (freed > 0) {
308 info->alloced -= freed;
309 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
310 shmem_inode_unacct_blocks(inode, freed);
311 }
312}
313
314bool shmem_charge(struct inode *inode, long pages)
315{
316 struct shmem_inode_info *info = SHMEM_I(inode);
317 unsigned long flags;
318
319 if (!shmem_inode_acct_block(inode, pages))
320 return false;
321
322 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
323 inode->i_mapping->nrpages += pages;
324
325 spin_lock_irqsave(&info->lock, flags);
326 info->alloced += pages;
327 inode->i_blocks += pages * BLOCKS_PER_PAGE;
328 shmem_recalc_inode(inode);
329 spin_unlock_irqrestore(&info->lock, flags);
330
331 return true;
332}
333
334void shmem_uncharge(struct inode *inode, long pages)
335{
336 struct shmem_inode_info *info = SHMEM_I(inode);
337 unsigned long flags;
338
339 /* nrpages adjustment done by __delete_from_page_cache() or caller */
340
341 spin_lock_irqsave(&info->lock, flags);
342 info->alloced -= pages;
343 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
344 shmem_recalc_inode(inode);
345 spin_unlock_irqrestore(&info->lock, flags);
346
347 shmem_inode_unacct_blocks(inode, pages);
348}
349
350/*
351 * Replace item expected in xarray by a new item, while holding xa_lock.
352 */
353static int shmem_replace_entry(struct address_space *mapping,
354 pgoff_t index, void *expected, void *replacement)
355{
356 XA_STATE(xas, &mapping->i_pages, index);
357 void *item;
358
359 VM_BUG_ON(!expected);
360 VM_BUG_ON(!replacement);
361 item = xas_load(&xas);
362 if (item != expected)
363 return -ENOENT;
364 xas_store(&xas, replacement);
365 return 0;
366}
367
368/*
369 * Sometimes, before we decide whether to proceed or to fail, we must check
370 * that an entry was not already brought back from swap by a racing thread.
371 *
372 * Checking page is not enough: by the time a SwapCache page is locked, it
373 * might be reused, and again be SwapCache, using the same swap as before.
374 */
375static bool shmem_confirm_swap(struct address_space *mapping,
376 pgoff_t index, swp_entry_t swap)
377{
378 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
379}
380
381/*
382 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
383 *
384 * SHMEM_HUGE_NEVER:
385 * disables huge pages for the mount;
386 * SHMEM_HUGE_ALWAYS:
387 * enables huge pages for the mount;
388 * SHMEM_HUGE_WITHIN_SIZE:
389 * only allocate huge pages if the page will be fully within i_size,
390 * also respect fadvise()/madvise() hints;
391 * SHMEM_HUGE_ADVISE:
392 * only allocate huge pages if requested with fadvise()/madvise();
393 */
394
395#define SHMEM_HUGE_NEVER 0
396#define SHMEM_HUGE_ALWAYS 1
397#define SHMEM_HUGE_WITHIN_SIZE 2
398#define SHMEM_HUGE_ADVISE 3
399
400/*
401 * Special values.
402 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
403 *
404 * SHMEM_HUGE_DENY:
405 * disables huge on shm_mnt and all mounts, for emergency use;
406 * SHMEM_HUGE_FORCE:
407 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
408 *
409 */
410#define SHMEM_HUGE_DENY (-1)
411#define SHMEM_HUGE_FORCE (-2)
412
413#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
414/* ifdef here to avoid bloating shmem.o when not necessary */
415
416static int shmem_huge __read_mostly;
417
418#if defined(CONFIG_SYSFS)
419static int shmem_parse_huge(const char *str)
420{
421 if (!strcmp(str, "never"))
422 return SHMEM_HUGE_NEVER;
423 if (!strcmp(str, "always"))
424 return SHMEM_HUGE_ALWAYS;
425 if (!strcmp(str, "within_size"))
426 return SHMEM_HUGE_WITHIN_SIZE;
427 if (!strcmp(str, "advise"))
428 return SHMEM_HUGE_ADVISE;
429 if (!strcmp(str, "deny"))
430 return SHMEM_HUGE_DENY;
431 if (!strcmp(str, "force"))
432 return SHMEM_HUGE_FORCE;
433 return -EINVAL;
434}
435#endif
436
437#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
438static const char *shmem_format_huge(int huge)
439{
440 switch (huge) {
441 case SHMEM_HUGE_NEVER:
442 return "never";
443 case SHMEM_HUGE_ALWAYS:
444 return "always";
445 case SHMEM_HUGE_WITHIN_SIZE:
446 return "within_size";
447 case SHMEM_HUGE_ADVISE:
448 return "advise";
449 case SHMEM_HUGE_DENY:
450 return "deny";
451 case SHMEM_HUGE_FORCE:
452 return "force";
453 default:
454 VM_BUG_ON(1);
455 return "bad_val";
456 }
457}
458#endif
459
460static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
461 struct shrink_control *sc, unsigned long nr_to_split)
462{
463 LIST_HEAD(list), *pos, *next;
464 LIST_HEAD(to_remove);
465 struct inode *inode;
466 struct shmem_inode_info *info;
467 struct page *page;
468 unsigned long batch = sc ? sc->nr_to_scan : 128;
469 int removed = 0, split = 0;
470
471 if (list_empty(&sbinfo->shrinklist))
472 return SHRINK_STOP;
473
474 spin_lock(&sbinfo->shrinklist_lock);
475 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
476 info = list_entry(pos, struct shmem_inode_info, shrinklist);
477
478 /* pin the inode */
479 inode = igrab(&info->vfs_inode);
480
481 /* inode is about to be evicted */
482 if (!inode) {
483 list_del_init(&info->shrinklist);
484 removed++;
485 goto next;
486 }
487
488 /* Check if there's anything to gain */
489 if (round_up(inode->i_size, PAGE_SIZE) ==
490 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
491 list_move(&info->shrinklist, &to_remove);
492 removed++;
493 goto next;
494 }
495
496 list_move(&info->shrinklist, &list);
497next:
498 if (!--batch)
499 break;
500 }
501 spin_unlock(&sbinfo->shrinklist_lock);
502
503 list_for_each_safe(pos, next, &to_remove) {
504 info = list_entry(pos, struct shmem_inode_info, shrinklist);
505 inode = &info->vfs_inode;
506 list_del_init(&info->shrinklist);
507 iput(inode);
508 }
509
510 list_for_each_safe(pos, next, &list) {
511 int ret;
512
513 info = list_entry(pos, struct shmem_inode_info, shrinklist);
514 inode = &info->vfs_inode;
515
516 if (nr_to_split && split >= nr_to_split)
517 goto leave;
518
519 page = find_get_page(inode->i_mapping,
520 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
521 if (!page)
522 goto drop;
523
524 /* No huge page at the end of the file: nothing to split */
525 if (!PageTransHuge(page)) {
526 put_page(page);
527 goto drop;
528 }
529
530 /*
531 * Leave the inode on the list if we failed to lock
532 * the page at this time.
533 *
534 * Waiting for the lock may lead to deadlock in the
535 * reclaim path.
536 */
537 if (!trylock_page(page)) {
538 put_page(page);
539 goto leave;
540 }
541
542 ret = split_huge_page(page);
543 unlock_page(page);
544 put_page(page);
545
546 /* If split failed leave the inode on the list */
547 if (ret)
548 goto leave;
549
550 split++;
551drop:
552 list_del_init(&info->shrinklist);
553 removed++;
554leave:
555 iput(inode);
556 }
557
558 spin_lock(&sbinfo->shrinklist_lock);
559 list_splice_tail(&list, &sbinfo->shrinklist);
560 sbinfo->shrinklist_len -= removed;
561 spin_unlock(&sbinfo->shrinklist_lock);
562
563 return split;
564}
565
566static long shmem_unused_huge_scan(struct super_block *sb,
567 struct shrink_control *sc)
568{
569 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
570
571 if (!READ_ONCE(sbinfo->shrinklist_len))
572 return SHRINK_STOP;
573
574 return shmem_unused_huge_shrink(sbinfo, sc, 0);
575}
576
577static long shmem_unused_huge_count(struct super_block *sb,
578 struct shrink_control *sc)
579{
580 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
581 return READ_ONCE(sbinfo->shrinklist_len);
582}
583#else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
584
585#define shmem_huge SHMEM_HUGE_DENY
586
587static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
588 struct shrink_control *sc, unsigned long nr_to_split)
589{
590 return 0;
591}
592#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
593
594static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
595{
596 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
597 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
598 shmem_huge != SHMEM_HUGE_DENY)
599 return true;
600 return false;
601}
602
603/*
604 * Like add_to_page_cache_locked, but error if expected item has gone.
605 */
606static int shmem_add_to_page_cache(struct page *page,
607 struct address_space *mapping,
608 pgoff_t index, void *expected, gfp_t gfp)
609{
610 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
611 unsigned long i = 0;
612 unsigned long nr = compound_nr(page);
613
614 VM_BUG_ON_PAGE(PageTail(page), page);
615 VM_BUG_ON_PAGE(index != round_down(index, nr), page);
616 VM_BUG_ON_PAGE(!PageLocked(page), page);
617 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
618 VM_BUG_ON(expected && PageTransHuge(page));
619
620 page_ref_add(page, nr);
621 page->mapping = mapping;
622 page->index = index;
623
624 do {
625 void *entry;
626 xas_lock_irq(&xas);
627 entry = xas_find_conflict(&xas);
628 if (entry != expected)
629 xas_set_err(&xas, -EEXIST);
630 xas_create_range(&xas);
631 if (xas_error(&xas))
632 goto unlock;
633next:
634 xas_store(&xas, page);
635 if (++i < nr) {
636 xas_next(&xas);
637 goto next;
638 }
639 if (PageTransHuge(page)) {
640 count_vm_event(THP_FILE_ALLOC);
641 __inc_node_page_state(page, NR_SHMEM_THPS);
642 }
643 mapping->nrpages += nr;
644 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
645 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
646unlock:
647 xas_unlock_irq(&xas);
648 } while (xas_nomem(&xas, gfp));
649
650 if (xas_error(&xas)) {
651 page->mapping = NULL;
652 page_ref_sub(page, nr);
653 return xas_error(&xas);
654 }
655
656 return 0;
657}
658
659/*
660 * Like delete_from_page_cache, but substitutes swap for page.
661 */
662static void shmem_delete_from_page_cache(struct page *page, void *radswap)
663{
664 struct address_space *mapping = page->mapping;
665 int error;
666
667 VM_BUG_ON_PAGE(PageCompound(page), page);
668
669 xa_lock_irq(&mapping->i_pages);
670 error = shmem_replace_entry(mapping, page->index, page, radswap);
671 page->mapping = NULL;
672 mapping->nrpages--;
673 __dec_node_page_state(page, NR_FILE_PAGES);
674 __dec_node_page_state(page, NR_SHMEM);
675 xa_unlock_irq(&mapping->i_pages);
676 put_page(page);
677 BUG_ON(error);
678}
679
680/*
681 * Remove swap entry from page cache, free the swap and its page cache.
682 */
683static int shmem_free_swap(struct address_space *mapping,
684 pgoff_t index, void *radswap)
685{
686 void *old;
687
688 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
689 if (old != radswap)
690 return -ENOENT;
691 free_swap_and_cache(radix_to_swp_entry(radswap));
692 return 0;
693}
694
695/*
696 * Determine (in bytes) how many of the shmem object's pages mapped by the
697 * given offsets are swapped out.
698 *
699 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
700 * as long as the inode doesn't go away and racy results are not a problem.
701 */
702unsigned long shmem_partial_swap_usage(struct address_space *mapping,
703 pgoff_t start, pgoff_t end)
704{
705 XA_STATE(xas, &mapping->i_pages, start);
706 struct page *page;
707 unsigned long swapped = 0;
708
709 rcu_read_lock();
710 xas_for_each(&xas, page, end - 1) {
711 if (xas_retry(&xas, page))
712 continue;
713 if (xa_is_value(page))
714 swapped++;
715
716 if (need_resched()) {
717 xas_pause(&xas);
718 cond_resched_rcu();
719 }
720 }
721
722 rcu_read_unlock();
723
724 return swapped << PAGE_SHIFT;
725}
726
727/*
728 * Determine (in bytes) how many of the shmem object's pages mapped by the
729 * given vma is swapped out.
730 *
731 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
732 * as long as the inode doesn't go away and racy results are not a problem.
733 */
734unsigned long shmem_swap_usage(struct vm_area_struct *vma)
735{
736 struct inode *inode = file_inode(vma->vm_file);
737 struct shmem_inode_info *info = SHMEM_I(inode);
738 struct address_space *mapping = inode->i_mapping;
739 unsigned long swapped;
740
741 /* Be careful as we don't hold info->lock */
742 swapped = READ_ONCE(info->swapped);
743
744 /*
745 * The easier cases are when the shmem object has nothing in swap, or
746 * the vma maps it whole. Then we can simply use the stats that we
747 * already track.
748 */
749 if (!swapped)
750 return 0;
751
752 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
753 return swapped << PAGE_SHIFT;
754
755 /* Here comes the more involved part */
756 return shmem_partial_swap_usage(mapping,
757 linear_page_index(vma, vma->vm_start),
758 linear_page_index(vma, vma->vm_end));
759}
760
761/*
762 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
763 */
764void shmem_unlock_mapping(struct address_space *mapping)
765{
766 struct pagevec pvec;
767 pgoff_t indices[PAGEVEC_SIZE];
768 pgoff_t index = 0;
769
770 pagevec_init(&pvec);
771 /*
772 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
773 */
774 while (!mapping_unevictable(mapping)) {
775 /*
776 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
777 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
778 */
779 pvec.nr = find_get_entries(mapping, index,
780 PAGEVEC_SIZE, pvec.pages, indices);
781 if (!pvec.nr)
782 break;
783 index = indices[pvec.nr - 1] + 1;
784 pagevec_remove_exceptionals(&pvec);
785 check_move_unevictable_pages(&pvec);
786 pagevec_release(&pvec);
787 cond_resched();
788 }
789}
790
791/*
792 * Remove range of pages and swap entries from page cache, and free them.
793 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
794 */
795static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
796 bool unfalloc)
797{
798 struct address_space *mapping = inode->i_mapping;
799 struct shmem_inode_info *info = SHMEM_I(inode);
800 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
801 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
802 unsigned int partial_start = lstart & (PAGE_SIZE - 1);
803 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
804 struct pagevec pvec;
805 pgoff_t indices[PAGEVEC_SIZE];
806 long nr_swaps_freed = 0;
807 pgoff_t index;
808 int i;
809
810 if (lend == -1)
811 end = -1; /* unsigned, so actually very big */
812
813 pagevec_init(&pvec);
814 index = start;
815 while (index < end) {
816 pvec.nr = find_get_entries(mapping, index,
817 min(end - index, (pgoff_t)PAGEVEC_SIZE),
818 pvec.pages, indices);
819 if (!pvec.nr)
820 break;
821 for (i = 0; i < pagevec_count(&pvec); i++) {
822 struct page *page = pvec.pages[i];
823
824 index = indices[i];
825 if (index >= end)
826 break;
827
828 if (xa_is_value(page)) {
829 if (unfalloc)
830 continue;
831 nr_swaps_freed += !shmem_free_swap(mapping,
832 index, page);
833 continue;
834 }
835
836 VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
837
838 if (!trylock_page(page))
839 continue;
840
841 if (PageTransTail(page)) {
842 /* Middle of THP: zero out the page */
843 clear_highpage(page);
844 unlock_page(page);
845 continue;
846 } else if (PageTransHuge(page)) {
847 if (index == round_down(end, HPAGE_PMD_NR)) {
848 /*
849 * Range ends in the middle of THP:
850 * zero out the page
851 */
852 clear_highpage(page);
853 unlock_page(page);
854 continue;
855 }
856 index += HPAGE_PMD_NR - 1;
857 i += HPAGE_PMD_NR - 1;
858 }
859
860 if (!unfalloc || !PageUptodate(page)) {
861 VM_BUG_ON_PAGE(PageTail(page), page);
862 if (page_mapping(page) == mapping) {
863 VM_BUG_ON_PAGE(PageWriteback(page), page);
864 truncate_inode_page(mapping, page);
865 }
866 }
867 unlock_page(page);
868 }
869 pagevec_remove_exceptionals(&pvec);
870 pagevec_release(&pvec);
871 cond_resched();
872 index++;
873 }
874
875 if (partial_start) {
876 struct page *page = NULL;
877 shmem_getpage(inode, start - 1, &page, SGP_READ);
878 if (page) {
879 unsigned int top = PAGE_SIZE;
880 if (start > end) {
881 top = partial_end;
882 partial_end = 0;
883 }
884 zero_user_segment(page, partial_start, top);
885 set_page_dirty(page);
886 unlock_page(page);
887 put_page(page);
888 }
889 }
890 if (partial_end) {
891 struct page *page = NULL;
892 shmem_getpage(inode, end, &page, SGP_READ);
893 if (page) {
894 zero_user_segment(page, 0, partial_end);
895 set_page_dirty(page);
896 unlock_page(page);
897 put_page(page);
898 }
899 }
900 if (start >= end)
901 return;
902
903 index = start;
904 while (index < end) {
905 cond_resched();
906
907 pvec.nr = find_get_entries(mapping, index,
908 min(end - index, (pgoff_t)PAGEVEC_SIZE),
909 pvec.pages, indices);
910 if (!pvec.nr) {
911 /* If all gone or hole-punch or unfalloc, we're done */
912 if (index == start || end != -1)
913 break;
914 /* But if truncating, restart to make sure all gone */
915 index = start;
916 continue;
917 }
918 for (i = 0; i < pagevec_count(&pvec); i++) {
919 struct page *page = pvec.pages[i];
920
921 index = indices[i];
922 if (index >= end)
923 break;
924
925 if (xa_is_value(page)) {
926 if (unfalloc)
927 continue;
928 if (shmem_free_swap(mapping, index, page)) {
929 /* Swap was replaced by page: retry */
930 index--;
931 break;
932 }
933 nr_swaps_freed++;
934 continue;
935 }
936
937 lock_page(page);
938
939 if (PageTransTail(page)) {
940 /* Middle of THP: zero out the page */
941 clear_highpage(page);
942 unlock_page(page);
943 /*
944 * Partial thp truncate due 'start' in middle
945 * of THP: don't need to look on these pages
946 * again on !pvec.nr restart.
947 */
948 if (index != round_down(end, HPAGE_PMD_NR))
949 start++;
950 continue;
951 } else if (PageTransHuge(page)) {
952 if (index == round_down(end, HPAGE_PMD_NR)) {
953 /*
954 * Range ends in the middle of THP:
955 * zero out the page
956 */
957 clear_highpage(page);
958 unlock_page(page);
959 continue;
960 }
961 index += HPAGE_PMD_NR - 1;
962 i += HPAGE_PMD_NR - 1;
963 }
964
965 if (!unfalloc || !PageUptodate(page)) {
966 VM_BUG_ON_PAGE(PageTail(page), page);
967 if (page_mapping(page) == mapping) {
968 VM_BUG_ON_PAGE(PageWriteback(page), page);
969 truncate_inode_page(mapping, page);
970 } else {
971 /* Page was replaced by swap: retry */
972 unlock_page(page);
973 index--;
974 break;
975 }
976 }
977 unlock_page(page);
978 }
979 pagevec_remove_exceptionals(&pvec);
980 pagevec_release(&pvec);
981 index++;
982 }
983
984 spin_lock_irq(&info->lock);
985 info->swapped -= nr_swaps_freed;
986 shmem_recalc_inode(inode);
987 spin_unlock_irq(&info->lock);
988}
989
990void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
991{
992 shmem_undo_range(inode, lstart, lend, false);
993 inode->i_ctime = inode->i_mtime = current_time(inode);
994}
995EXPORT_SYMBOL_GPL(shmem_truncate_range);
996
997static int shmem_getattr(const struct path *path, struct kstat *stat,
998 u32 request_mask, unsigned int query_flags)
999{
1000 struct inode *inode = path->dentry->d_inode;
1001 struct shmem_inode_info *info = SHMEM_I(inode);
1002 struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
1003
1004 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1005 spin_lock_irq(&info->lock);
1006 shmem_recalc_inode(inode);
1007 spin_unlock_irq(&info->lock);
1008 }
1009 generic_fillattr(inode, stat);
1010
1011 if (is_huge_enabled(sb_info))
1012 stat->blksize = HPAGE_PMD_SIZE;
1013
1014 return 0;
1015}
1016
1017static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1018{
1019 struct inode *inode = d_inode(dentry);
1020 struct shmem_inode_info *info = SHMEM_I(inode);
1021 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1022 int error;
1023
1024 error = setattr_prepare(dentry, attr);
1025 if (error)
1026 return error;
1027
1028 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1029 loff_t oldsize = inode->i_size;
1030 loff_t newsize = attr->ia_size;
1031
1032 /* protected by i_mutex */
1033 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1034 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1035 return -EPERM;
1036
1037 if (newsize != oldsize) {
1038 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1039 oldsize, newsize);
1040 if (error)
1041 return error;
1042 i_size_write(inode, newsize);
1043 inode->i_ctime = inode->i_mtime = current_time(inode);
1044 }
1045 if (newsize <= oldsize) {
1046 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1047 if (oldsize > holebegin)
1048 unmap_mapping_range(inode->i_mapping,
1049 holebegin, 0, 1);
1050 if (info->alloced)
1051 shmem_truncate_range(inode,
1052 newsize, (loff_t)-1);
1053 /* unmap again to remove racily COWed private pages */
1054 if (oldsize > holebegin)
1055 unmap_mapping_range(inode->i_mapping,
1056 holebegin, 0, 1);
1057
1058 /*
1059 * Part of the huge page can be beyond i_size: subject
1060 * to shrink under memory pressure.
1061 */
1062 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1063 spin_lock(&sbinfo->shrinklist_lock);
1064 /*
1065 * _careful to defend against unlocked access to
1066 * ->shrink_list in shmem_unused_huge_shrink()
1067 */
1068 if (list_empty_careful(&info->shrinklist)) {
1069 list_add_tail(&info->shrinklist,
1070 &sbinfo->shrinklist);
1071 sbinfo->shrinklist_len++;
1072 }
1073 spin_unlock(&sbinfo->shrinklist_lock);
1074 }
1075 }
1076 }
1077
1078 setattr_copy(inode, attr);
1079 if (attr->ia_valid & ATTR_MODE)
1080 error = posix_acl_chmod(inode, inode->i_mode);
1081 return error;
1082}
1083
1084static void shmem_evict_inode(struct inode *inode)
1085{
1086 struct shmem_inode_info *info = SHMEM_I(inode);
1087 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1088
1089 if (inode->i_mapping->a_ops == &shmem_aops) {
1090 shmem_unacct_size(info->flags, inode->i_size);
1091 inode->i_size = 0;
1092 shmem_truncate_range(inode, 0, (loff_t)-1);
1093 if (!list_empty(&info->shrinklist)) {
1094 spin_lock(&sbinfo->shrinklist_lock);
1095 if (!list_empty(&info->shrinklist)) {
1096 list_del_init(&info->shrinklist);
1097 sbinfo->shrinklist_len--;
1098 }
1099 spin_unlock(&sbinfo->shrinklist_lock);
1100 }
1101 while (!list_empty(&info->swaplist)) {
1102 /* Wait while shmem_unuse() is scanning this inode... */
1103 wait_var_event(&info->stop_eviction,
1104 !atomic_read(&info->stop_eviction));
1105 mutex_lock(&shmem_swaplist_mutex);
1106 /* ...but beware of the race if we peeked too early */
1107 if (!atomic_read(&info->stop_eviction))
1108 list_del_init(&info->swaplist);
1109 mutex_unlock(&shmem_swaplist_mutex);
1110 }
1111 }
1112
1113 simple_xattrs_free(&info->xattrs);
1114 WARN_ON(inode->i_blocks);
1115 shmem_free_inode(inode->i_sb);
1116 clear_inode(inode);
1117}
1118
1119extern struct swap_info_struct *swap_info[];
1120
1121static int shmem_find_swap_entries(struct address_space *mapping,
1122 pgoff_t start, unsigned int nr_entries,
1123 struct page **entries, pgoff_t *indices,
1124 unsigned int type, bool frontswap)
1125{
1126 XA_STATE(xas, &mapping->i_pages, start);
1127 struct page *page;
1128 swp_entry_t entry;
1129 unsigned int ret = 0;
1130
1131 if (!nr_entries)
1132 return 0;
1133
1134 rcu_read_lock();
1135 xas_for_each(&xas, page, ULONG_MAX) {
1136 if (xas_retry(&xas, page))
1137 continue;
1138
1139 if (!xa_is_value(page))
1140 continue;
1141
1142 entry = radix_to_swp_entry(page);
1143 if (swp_type(entry) != type)
1144 continue;
1145 if (frontswap &&
1146 !frontswap_test(swap_info[type], swp_offset(entry)))
1147 continue;
1148
1149 indices[ret] = xas.xa_index;
1150 entries[ret] = page;
1151
1152 if (need_resched()) {
1153 xas_pause(&xas);
1154 cond_resched_rcu();
1155 }
1156 if (++ret == nr_entries)
1157 break;
1158 }
1159 rcu_read_unlock();
1160
1161 return ret;
1162}
1163
1164/*
1165 * Move the swapped pages for an inode to page cache. Returns the count
1166 * of pages swapped in, or the error in case of failure.
1167 */
1168static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1169 pgoff_t *indices)
1170{
1171 int i = 0;
1172 int ret = 0;
1173 int error = 0;
1174 struct address_space *mapping = inode->i_mapping;
1175
1176 for (i = 0; i < pvec.nr; i++) {
1177 struct page *page = pvec.pages[i];
1178
1179 if (!xa_is_value(page))
1180 continue;
1181 error = shmem_swapin_page(inode, indices[i],
1182 &page, SGP_CACHE,
1183 mapping_gfp_mask(mapping),
1184 NULL, NULL);
1185 if (error == 0) {
1186 unlock_page(page);
1187 put_page(page);
1188 ret++;
1189 }
1190 if (error == -ENOMEM)
1191 break;
1192 error = 0;
1193 }
1194 return error ? error : ret;
1195}
1196
1197/*
1198 * If swap found in inode, free it and move page from swapcache to filecache.
1199 */
1200static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1201 bool frontswap, unsigned long *fs_pages_to_unuse)
1202{
1203 struct address_space *mapping = inode->i_mapping;
1204 pgoff_t start = 0;
1205 struct pagevec pvec;
1206 pgoff_t indices[PAGEVEC_SIZE];
1207 bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1208 int ret = 0;
1209
1210 pagevec_init(&pvec);
1211 do {
1212 unsigned int nr_entries = PAGEVEC_SIZE;
1213
1214 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1215 nr_entries = *fs_pages_to_unuse;
1216
1217 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1218 pvec.pages, indices,
1219 type, frontswap);
1220 if (pvec.nr == 0) {
1221 ret = 0;
1222 break;
1223 }
1224
1225 ret = shmem_unuse_swap_entries(inode, pvec, indices);
1226 if (ret < 0)
1227 break;
1228
1229 if (frontswap_partial) {
1230 *fs_pages_to_unuse -= ret;
1231 if (*fs_pages_to_unuse == 0) {
1232 ret = FRONTSWAP_PAGES_UNUSED;
1233 break;
1234 }
1235 }
1236
1237 start = indices[pvec.nr - 1];
1238 } while (true);
1239
1240 return ret;
1241}
1242
1243/*
1244 * Read all the shared memory data that resides in the swap
1245 * device 'type' back into memory, so the swap device can be
1246 * unused.
1247 */
1248int shmem_unuse(unsigned int type, bool frontswap,
1249 unsigned long *fs_pages_to_unuse)
1250{
1251 struct shmem_inode_info *info, *next;
1252 int error = 0;
1253
1254 if (list_empty(&shmem_swaplist))
1255 return 0;
1256
1257 mutex_lock(&shmem_swaplist_mutex);
1258 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1259 if (!info->swapped) {
1260 list_del_init(&info->swaplist);
1261 continue;
1262 }
1263 /*
1264 * Drop the swaplist mutex while searching the inode for swap;
1265 * but before doing so, make sure shmem_evict_inode() will not
1266 * remove placeholder inode from swaplist, nor let it be freed
1267 * (igrab() would protect from unlink, but not from unmount).
1268 */
1269 atomic_inc(&info->stop_eviction);
1270 mutex_unlock(&shmem_swaplist_mutex);
1271
1272 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1273 fs_pages_to_unuse);
1274 cond_resched();
1275
1276 mutex_lock(&shmem_swaplist_mutex);
1277 next = list_next_entry(info, swaplist);
1278 if (!info->swapped)
1279 list_del_init(&info->swaplist);
1280 if (atomic_dec_and_test(&info->stop_eviction))
1281 wake_up_var(&info->stop_eviction);
1282 if (error)
1283 break;
1284 }
1285 mutex_unlock(&shmem_swaplist_mutex);
1286
1287 return error;
1288}
1289
1290/*
1291 * Move the page from the page cache to the swap cache.
1292 */
1293static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1294{
1295 struct shmem_inode_info *info;
1296 struct address_space *mapping;
1297 struct inode *inode;
1298 swp_entry_t swap;
1299 pgoff_t index;
1300
1301 VM_BUG_ON_PAGE(PageCompound(page), page);
1302 BUG_ON(!PageLocked(page));
1303 mapping = page->mapping;
1304 index = page->index;
1305 inode = mapping->host;
1306 info = SHMEM_I(inode);
1307 if (info->flags & VM_LOCKED)
1308 goto redirty;
1309 if (!total_swap_pages)
1310 goto redirty;
1311
1312 /*
1313 * Our capabilities prevent regular writeback or sync from ever calling
1314 * shmem_writepage; but a stacking filesystem might use ->writepage of
1315 * its underlying filesystem, in which case tmpfs should write out to
1316 * swap only in response to memory pressure, and not for the writeback
1317 * threads or sync.
1318 */
1319 if (!wbc->for_reclaim) {
1320 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
1321 goto redirty;
1322 }
1323
1324 /*
1325 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1326 * value into swapfile.c, the only way we can correctly account for a
1327 * fallocated page arriving here is now to initialize it and write it.
1328 *
1329 * That's okay for a page already fallocated earlier, but if we have
1330 * not yet completed the fallocation, then (a) we want to keep track
1331 * of this page in case we have to undo it, and (b) it may not be a
1332 * good idea to continue anyway, once we're pushing into swap. So
1333 * reactivate the page, and let shmem_fallocate() quit when too many.
1334 */
1335 if (!PageUptodate(page)) {
1336 if (inode->i_private) {
1337 struct shmem_falloc *shmem_falloc;
1338 spin_lock(&inode->i_lock);
1339 shmem_falloc = inode->i_private;
1340 if (shmem_falloc &&
1341 !shmem_falloc->waitq &&
1342 index >= shmem_falloc->start &&
1343 index < shmem_falloc->next)
1344 shmem_falloc->nr_unswapped++;
1345 else
1346 shmem_falloc = NULL;
1347 spin_unlock(&inode->i_lock);
1348 if (shmem_falloc)
1349 goto redirty;
1350 }
1351 clear_highpage(page);
1352 flush_dcache_page(page);
1353 SetPageUptodate(page);
1354 }
1355
1356 swap = get_swap_page(page);
1357 if (!swap.val)
1358 goto redirty;
1359
1360 /*
1361 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1362 * if it's not already there. Do it now before the page is
1363 * moved to swap cache, when its pagelock no longer protects
1364 * the inode from eviction. But don't unlock the mutex until
1365 * we've incremented swapped, because shmem_unuse_inode() will
1366 * prune a !swapped inode from the swaplist under this mutex.
1367 */
1368 mutex_lock(&shmem_swaplist_mutex);
1369 if (list_empty(&info->swaplist))
1370 list_add(&info->swaplist, &shmem_swaplist);
1371
1372 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1373 spin_lock_irq(&info->lock);
1374 shmem_recalc_inode(inode);
1375 info->swapped++;
1376 spin_unlock_irq(&info->lock);
1377
1378 swap_shmem_alloc(swap);
1379 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1380
1381 mutex_unlock(&shmem_swaplist_mutex);
1382 BUG_ON(page_mapped(page));
1383 swap_writepage(page, wbc);
1384 return 0;
1385 }
1386
1387 mutex_unlock(&shmem_swaplist_mutex);
1388 put_swap_page(page, swap);
1389redirty:
1390 set_page_dirty(page);
1391 if (wbc->for_reclaim)
1392 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1393 unlock_page(page);
1394 return 0;
1395}
1396
1397#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1398static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1399{
1400 char buffer[64];
1401
1402 if (!mpol || mpol->mode == MPOL_DEFAULT)
1403 return; /* show nothing */
1404
1405 mpol_to_str(buffer, sizeof(buffer), mpol);
1406
1407 seq_printf(seq, ",mpol=%s", buffer);
1408}
1409
1410static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1411{
1412 struct mempolicy *mpol = NULL;
1413 if (sbinfo->mpol) {
1414 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1415 mpol = sbinfo->mpol;
1416 mpol_get(mpol);
1417 spin_unlock(&sbinfo->stat_lock);
1418 }
1419 return mpol;
1420}
1421#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1422static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1423{
1424}
1425static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1426{
1427 return NULL;
1428}
1429#endif /* CONFIG_NUMA && CONFIG_TMPFS */
1430#ifndef CONFIG_NUMA
1431#define vm_policy vm_private_data
1432#endif
1433
1434static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1435 struct shmem_inode_info *info, pgoff_t index)
1436{
1437 /* Create a pseudo vma that just contains the policy */
1438 vma_init(vma, NULL);
1439 /* Bias interleave by inode number to distribute better across nodes */
1440 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1441 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1442}
1443
1444static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1445{
1446 /* Drop reference taken by mpol_shared_policy_lookup() */
1447 mpol_cond_put(vma->vm_policy);
1448}
1449
1450static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1451 struct shmem_inode_info *info, pgoff_t index)
1452{
1453 struct vm_area_struct pvma;
1454 struct page *page;
1455 struct vm_fault vmf;
1456
1457 shmem_pseudo_vma_init(&pvma, info, index);
1458 vmf.vma = &pvma;
1459 vmf.address = 0;
1460 page = swap_cluster_readahead(swap, gfp, &vmf);
1461 shmem_pseudo_vma_destroy(&pvma);
1462
1463 return page;
1464}
1465
1466static struct page *shmem_alloc_hugepage(gfp_t gfp,
1467 struct shmem_inode_info *info, pgoff_t index)
1468{
1469 struct vm_area_struct pvma;
1470 struct address_space *mapping = info->vfs_inode.i_mapping;
1471 pgoff_t hindex;
1472 struct page *page;
1473
1474 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1475 return NULL;
1476
1477 hindex = round_down(index, HPAGE_PMD_NR);
1478 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1479 XA_PRESENT))
1480 return NULL;
1481
1482 shmem_pseudo_vma_init(&pvma, info, hindex);
1483 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1484 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1485 shmem_pseudo_vma_destroy(&pvma);
1486 if (page)
1487 prep_transhuge_page(page);
1488 return page;
1489}
1490
1491static struct page *shmem_alloc_page(gfp_t gfp,
1492 struct shmem_inode_info *info, pgoff_t index)
1493{
1494 struct vm_area_struct pvma;
1495 struct page *page;
1496
1497 shmem_pseudo_vma_init(&pvma, info, index);
1498 page = alloc_page_vma(gfp, &pvma, 0);
1499 shmem_pseudo_vma_destroy(&pvma);
1500
1501 return page;
1502}
1503
1504static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1505 struct inode *inode,
1506 pgoff_t index, bool huge)
1507{
1508 struct shmem_inode_info *info = SHMEM_I(inode);
1509 struct page *page;
1510 int nr;
1511 int err = -ENOSPC;
1512
1513 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1514 huge = false;
1515 nr = huge ? HPAGE_PMD_NR : 1;
1516
1517 if (!shmem_inode_acct_block(inode, nr))
1518 goto failed;
1519
1520 if (huge)
1521 page = shmem_alloc_hugepage(gfp, info, index);
1522 else
1523 page = shmem_alloc_page(gfp, info, index);
1524 if (page) {
1525 __SetPageLocked(page);
1526 __SetPageSwapBacked(page);
1527 return page;
1528 }
1529
1530 err = -ENOMEM;
1531 shmem_inode_unacct_blocks(inode, nr);
1532failed:
1533 return ERR_PTR(err);
1534}
1535
1536/*
1537 * When a page is moved from swapcache to shmem filecache (either by the
1538 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1539 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1540 * ignorance of the mapping it belongs to. If that mapping has special
1541 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1542 * we may need to copy to a suitable page before moving to filecache.
1543 *
1544 * In a future release, this may well be extended to respect cpuset and
1545 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1546 * but for now it is a simple matter of zone.
1547 */
1548static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1549{
1550 return page_zonenum(page) > gfp_zone(gfp);
1551}
1552
1553static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1554 struct shmem_inode_info *info, pgoff_t index)
1555{
1556 struct page *oldpage, *newpage;
1557 struct address_space *swap_mapping;
1558 swp_entry_t entry;
1559 pgoff_t swap_index;
1560 int error;
1561
1562 oldpage = *pagep;
1563 entry.val = page_private(oldpage);
1564 swap_index = swp_offset(entry);
1565 swap_mapping = page_mapping(oldpage);
1566
1567 /*
1568 * We have arrived here because our zones are constrained, so don't
1569 * limit chance of success by further cpuset and node constraints.
1570 */
1571 gfp &= ~GFP_CONSTRAINT_MASK;
1572 newpage = shmem_alloc_page(gfp, info, index);
1573 if (!newpage)
1574 return -ENOMEM;
1575
1576 get_page(newpage);
1577 copy_highpage(newpage, oldpage);
1578 flush_dcache_page(newpage);
1579
1580 __SetPageLocked(newpage);
1581 __SetPageSwapBacked(newpage);
1582 SetPageUptodate(newpage);
1583 set_page_private(newpage, entry.val);
1584 SetPageSwapCache(newpage);
1585
1586 /*
1587 * Our caller will very soon move newpage out of swapcache, but it's
1588 * a nice clean interface for us to replace oldpage by newpage there.
1589 */
1590 xa_lock_irq(&swap_mapping->i_pages);
1591 error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1592 if (!error) {
1593 __inc_node_page_state(newpage, NR_FILE_PAGES);
1594 __dec_node_page_state(oldpage, NR_FILE_PAGES);
1595 }
1596 xa_unlock_irq(&swap_mapping->i_pages);
1597
1598 if (unlikely(error)) {
1599 /*
1600 * Is this possible? I think not, now that our callers check
1601 * both PageSwapCache and page_private after getting page lock;
1602 * but be defensive. Reverse old to newpage for clear and free.
1603 */
1604 oldpage = newpage;
1605 } else {
1606 mem_cgroup_migrate(oldpage, newpage);
1607 lru_cache_add_anon(newpage);
1608 *pagep = newpage;
1609 }
1610
1611 ClearPageSwapCache(oldpage);
1612 set_page_private(oldpage, 0);
1613
1614 unlock_page(oldpage);
1615 put_page(oldpage);
1616 put_page(oldpage);
1617 return error;
1618}
1619
1620/*
1621 * Swap in the page pointed to by *pagep.
1622 * Caller has to make sure that *pagep contains a valid swapped page.
1623 * Returns 0 and the page in pagep if success. On failure, returns the
1624 * the error code and NULL in *pagep.
1625 */
1626static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1627 struct page **pagep, enum sgp_type sgp,
1628 gfp_t gfp, struct vm_area_struct *vma,
1629 vm_fault_t *fault_type)
1630{
1631 struct address_space *mapping = inode->i_mapping;
1632 struct shmem_inode_info *info = SHMEM_I(inode);
1633 struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
1634 struct mem_cgroup *memcg;
1635 struct page *page;
1636 swp_entry_t swap;
1637 int error;
1638
1639 VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1640 swap = radix_to_swp_entry(*pagep);
1641 *pagep = NULL;
1642
1643 /* Look it up and read it in.. */
1644 page = lookup_swap_cache(swap, NULL, 0);
1645 if (!page) {
1646 /* Or update major stats only when swapin succeeds?? */
1647 if (fault_type) {
1648 *fault_type |= VM_FAULT_MAJOR;
1649 count_vm_event(PGMAJFAULT);
1650 count_memcg_event_mm(charge_mm, PGMAJFAULT);
1651 }
1652 /* Here we actually start the io */
1653 page = shmem_swapin(swap, gfp, info, index);
1654 if (!page) {
1655 error = -ENOMEM;
1656 goto failed;
1657 }
1658 }
1659
1660 /* We have to do this with page locked to prevent races */
1661 lock_page(page);
1662 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1663 !shmem_confirm_swap(mapping, index, swap)) {
1664 error = -EEXIST;
1665 goto unlock;
1666 }
1667 if (!PageUptodate(page)) {
1668 error = -EIO;
1669 goto failed;
1670 }
1671 wait_on_page_writeback(page);
1672
1673 if (shmem_should_replace_page(page, gfp)) {
1674 error = shmem_replace_page(&page, gfp, info, index);
1675 if (error)
1676 goto failed;
1677 }
1678
1679 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1680 false);
1681 if (!error) {
1682 error = shmem_add_to_page_cache(page, mapping, index,
1683 swp_to_radix_entry(swap), gfp);
1684 /*
1685 * We already confirmed swap under page lock, and make
1686 * no memory allocation here, so usually no possibility
1687 * of error; but free_swap_and_cache() only trylocks a
1688 * page, so it is just possible that the entry has been
1689 * truncated or holepunched since swap was confirmed.
1690 * shmem_undo_range() will have done some of the
1691 * unaccounting, now delete_from_swap_cache() will do
1692 * the rest.
1693 */
1694 if (error) {
1695 mem_cgroup_cancel_charge(page, memcg, false);
1696 delete_from_swap_cache(page);
1697 }
1698 }
1699 if (error)
1700 goto failed;
1701
1702 mem_cgroup_commit_charge(page, memcg, true, false);
1703
1704 spin_lock_irq(&info->lock);
1705 info->swapped--;
1706 shmem_recalc_inode(inode);
1707 spin_unlock_irq(&info->lock);
1708
1709 if (sgp == SGP_WRITE)
1710 mark_page_accessed(page);
1711
1712 delete_from_swap_cache(page);
1713 set_page_dirty(page);
1714 swap_free(swap);
1715
1716 *pagep = page;
1717 return 0;
1718failed:
1719 if (!shmem_confirm_swap(mapping, index, swap))
1720 error = -EEXIST;
1721unlock:
1722 if (page) {
1723 unlock_page(page);
1724 put_page(page);
1725 }
1726
1727 return error;
1728}
1729
1730/*
1731 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1732 *
1733 * If we allocate a new one we do not mark it dirty. That's up to the
1734 * vm. If we swap it in we mark it dirty since we also free the swap
1735 * entry since a page cannot live in both the swap and page cache.
1736 *
1737 * vmf and fault_type are only supplied by shmem_fault:
1738 * otherwise they are NULL.
1739 */
1740static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1741 struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1742 struct vm_area_struct *vma, struct vm_fault *vmf,
1743 vm_fault_t *fault_type)
1744{
1745 struct address_space *mapping = inode->i_mapping;
1746 struct shmem_inode_info *info = SHMEM_I(inode);
1747 struct shmem_sb_info *sbinfo;
1748 struct mm_struct *charge_mm;
1749 struct mem_cgroup *memcg;
1750 struct page *page;
1751 enum sgp_type sgp_huge = sgp;
1752 pgoff_t hindex = index;
1753 int error;
1754 int once = 0;
1755 int alloced = 0;
1756
1757 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1758 return -EFBIG;
1759 if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1760 sgp = SGP_CACHE;
1761repeat:
1762 if (sgp <= SGP_CACHE &&
1763 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1764 return -EINVAL;
1765 }
1766
1767 sbinfo = SHMEM_SB(inode->i_sb);
1768 charge_mm = vma ? vma->vm_mm : current->mm;
1769
1770 page = find_lock_entry(mapping, index);
1771 if (xa_is_value(page)) {
1772 error = shmem_swapin_page(inode, index, &page,
1773 sgp, gfp, vma, fault_type);
1774 if (error == -EEXIST)
1775 goto repeat;
1776
1777 *pagep = page;
1778 return error;
1779 }
1780
1781 if (page && sgp == SGP_WRITE)
1782 mark_page_accessed(page);
1783
1784 /* fallocated page? */
1785 if (page && !PageUptodate(page)) {
1786 if (sgp != SGP_READ)
1787 goto clear;
1788 unlock_page(page);
1789 put_page(page);
1790 page = NULL;
1791 }
1792 if (page || sgp == SGP_READ) {
1793 *pagep = page;
1794 return 0;
1795 }
1796
1797 /*
1798 * Fast cache lookup did not find it:
1799 * bring it back from swap or allocate.
1800 */
1801
1802 if (vma && userfaultfd_missing(vma)) {
1803 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1804 return 0;
1805 }
1806
1807 /* shmem_symlink() */
1808 if (mapping->a_ops != &shmem_aops)
1809 goto alloc_nohuge;
1810 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1811 goto alloc_nohuge;
1812 if (shmem_huge == SHMEM_HUGE_FORCE)
1813 goto alloc_huge;
1814 switch (sbinfo->huge) {
1815 loff_t i_size;
1816 pgoff_t off;
1817 case SHMEM_HUGE_NEVER:
1818 goto alloc_nohuge;
1819 case SHMEM_HUGE_WITHIN_SIZE:
1820 off = round_up(index, HPAGE_PMD_NR);
1821 i_size = round_up(i_size_read(inode), PAGE_SIZE);
1822 if (i_size >= HPAGE_PMD_SIZE &&
1823 i_size >> PAGE_SHIFT >= off)
1824 goto alloc_huge;
1825 /* fallthrough */
1826 case SHMEM_HUGE_ADVISE:
1827 if (sgp_huge == SGP_HUGE)
1828 goto alloc_huge;
1829 /* TODO: implement fadvise() hints */
1830 goto alloc_nohuge;
1831 }
1832
1833alloc_huge:
1834 page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1835 if (IS_ERR(page)) {
1836alloc_nohuge:
1837 page = shmem_alloc_and_acct_page(gfp, inode,
1838 index, false);
1839 }
1840 if (IS_ERR(page)) {
1841 int retry = 5;
1842
1843 error = PTR_ERR(page);
1844 page = NULL;
1845 if (error != -ENOSPC)
1846 goto unlock;
1847 /*
1848 * Try to reclaim some space by splitting a huge page
1849 * beyond i_size on the filesystem.
1850 */
1851 while (retry--) {
1852 int ret;
1853
1854 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1855 if (ret == SHRINK_STOP)
1856 break;
1857 if (ret)
1858 goto alloc_nohuge;
1859 }
1860 goto unlock;
1861 }
1862
1863 if (PageTransHuge(page))
1864 hindex = round_down(index, HPAGE_PMD_NR);
1865 else
1866 hindex = index;
1867
1868 if (sgp == SGP_WRITE)
1869 __SetPageReferenced(page);
1870
1871 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1872 PageTransHuge(page));
1873 if (error)
1874 goto unacct;
1875 error = shmem_add_to_page_cache(page, mapping, hindex,
1876 NULL, gfp & GFP_RECLAIM_MASK);
1877 if (error) {
1878 mem_cgroup_cancel_charge(page, memcg,
1879 PageTransHuge(page));
1880 goto unacct;
1881 }
1882 mem_cgroup_commit_charge(page, memcg, false,
1883 PageTransHuge(page));
1884 lru_cache_add_anon(page);
1885
1886 spin_lock_irq(&info->lock);
1887 info->alloced += compound_nr(page);
1888 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1889 shmem_recalc_inode(inode);
1890 spin_unlock_irq(&info->lock);
1891 alloced = true;
1892
1893 if (PageTransHuge(page) &&
1894 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1895 hindex + HPAGE_PMD_NR - 1) {
1896 /*
1897 * Part of the huge page is beyond i_size: subject
1898 * to shrink under memory pressure.
1899 */
1900 spin_lock(&sbinfo->shrinklist_lock);
1901 /*
1902 * _careful to defend against unlocked access to
1903 * ->shrink_list in shmem_unused_huge_shrink()
1904 */
1905 if (list_empty_careful(&info->shrinklist)) {
1906 list_add_tail(&info->shrinklist,
1907 &sbinfo->shrinklist);
1908 sbinfo->shrinklist_len++;
1909 }
1910 spin_unlock(&sbinfo->shrinklist_lock);
1911 }
1912
1913 /*
1914 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1915 */
1916 if (sgp == SGP_FALLOC)
1917 sgp = SGP_WRITE;
1918clear:
1919 /*
1920 * Let SGP_WRITE caller clear ends if write does not fill page;
1921 * but SGP_FALLOC on a page fallocated earlier must initialize
1922 * it now, lest undo on failure cancel our earlier guarantee.
1923 */
1924 if (sgp != SGP_WRITE && !PageUptodate(page)) {
1925 struct page *head = compound_head(page);
1926 int i;
1927
1928 for (i = 0; i < compound_nr(head); i++) {
1929 clear_highpage(head + i);
1930 flush_dcache_page(head + i);
1931 }
1932 SetPageUptodate(head);
1933 }
1934
1935 /* Perhaps the file has been truncated since we checked */
1936 if (sgp <= SGP_CACHE &&
1937 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1938 if (alloced) {
1939 ClearPageDirty(page);
1940 delete_from_page_cache(page);
1941 spin_lock_irq(&info->lock);
1942 shmem_recalc_inode(inode);
1943 spin_unlock_irq(&info->lock);
1944 }
1945 error = -EINVAL;
1946 goto unlock;
1947 }
1948 *pagep = page + index - hindex;
1949 return 0;
1950
1951 /*
1952 * Error recovery.
1953 */
1954unacct:
1955 shmem_inode_unacct_blocks(inode, compound_nr(page));
1956
1957 if (PageTransHuge(page)) {
1958 unlock_page(page);
1959 put_page(page);
1960 goto alloc_nohuge;
1961 }
1962unlock:
1963 if (page) {
1964 unlock_page(page);
1965 put_page(page);
1966 }
1967 if (error == -ENOSPC && !once++) {
1968 spin_lock_irq(&info->lock);
1969 shmem_recalc_inode(inode);
1970 spin_unlock_irq(&info->lock);
1971 goto repeat;
1972 }
1973 if (error == -EEXIST)
1974 goto repeat;
1975 return error;
1976}
1977
1978/*
1979 * This is like autoremove_wake_function, but it removes the wait queue
1980 * entry unconditionally - even if something else had already woken the
1981 * target.
1982 */
1983static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1984{
1985 int ret = default_wake_function(wait, mode, sync, key);
1986 list_del_init(&wait->entry);
1987 return ret;
1988}
1989
1990static vm_fault_t shmem_fault(struct vm_fault *vmf)
1991{
1992 struct vm_area_struct *vma = vmf->vma;
1993 struct inode *inode = file_inode(vma->vm_file);
1994 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1995 enum sgp_type sgp;
1996 int err;
1997 vm_fault_t ret = VM_FAULT_LOCKED;
1998
1999 /*
2000 * Trinity finds that probing a hole which tmpfs is punching can
2001 * prevent the hole-punch from ever completing: which in turn
2002 * locks writers out with its hold on i_mutex. So refrain from
2003 * faulting pages into the hole while it's being punched. Although
2004 * shmem_undo_range() does remove the additions, it may be unable to
2005 * keep up, as each new page needs its own unmap_mapping_range() call,
2006 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2007 *
2008 * It does not matter if we sometimes reach this check just before the
2009 * hole-punch begins, so that one fault then races with the punch:
2010 * we just need to make racing faults a rare case.
2011 *
2012 * The implementation below would be much simpler if we just used a
2013 * standard mutex or completion: but we cannot take i_mutex in fault,
2014 * and bloating every shmem inode for this unlikely case would be sad.
2015 */
2016 if (unlikely(inode->i_private)) {
2017 struct shmem_falloc *shmem_falloc;
2018
2019 spin_lock(&inode->i_lock);
2020 shmem_falloc = inode->i_private;
2021 if (shmem_falloc &&
2022 shmem_falloc->waitq &&
2023 vmf->pgoff >= shmem_falloc->start &&
2024 vmf->pgoff < shmem_falloc->next) {
2025 wait_queue_head_t *shmem_falloc_waitq;
2026 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2027
2028 ret = VM_FAULT_NOPAGE;
2029 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
2030 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
2031 /* It's polite to up mmap_sem if we can */
2032 up_read(&vma->vm_mm->mmap_sem);
2033 ret = VM_FAULT_RETRY;
2034 }
2035
2036 shmem_falloc_waitq = shmem_falloc->waitq;
2037 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2038 TASK_UNINTERRUPTIBLE);
2039 spin_unlock(&inode->i_lock);
2040 schedule();
2041
2042 /*
2043 * shmem_falloc_waitq points into the shmem_fallocate()
2044 * stack of the hole-punching task: shmem_falloc_waitq
2045 * is usually invalid by the time we reach here, but
2046 * finish_wait() does not dereference it in that case;
2047 * though i_lock needed lest racing with wake_up_all().
2048 */
2049 spin_lock(&inode->i_lock);
2050 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2051 spin_unlock(&inode->i_lock);
2052 return ret;
2053 }
2054 spin_unlock(&inode->i_lock);
2055 }
2056
2057 sgp = SGP_CACHE;
2058
2059 if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2060 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2061 sgp = SGP_NOHUGE;
2062 else if (vma->vm_flags & VM_HUGEPAGE)
2063 sgp = SGP_HUGE;
2064
2065 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2066 gfp, vma, vmf, &ret);
2067 if (err)
2068 return vmf_error(err);
2069 return ret;
2070}
2071
2072unsigned long shmem_get_unmapped_area(struct file *file,
2073 unsigned long uaddr, unsigned long len,
2074 unsigned long pgoff, unsigned long flags)
2075{
2076 unsigned long (*get_area)(struct file *,
2077 unsigned long, unsigned long, unsigned long, unsigned long);
2078 unsigned long addr;
2079 unsigned long offset;
2080 unsigned long inflated_len;
2081 unsigned long inflated_addr;
2082 unsigned long inflated_offset;
2083
2084 if (len > TASK_SIZE)
2085 return -ENOMEM;
2086
2087 get_area = current->mm->get_unmapped_area;
2088 addr = get_area(file, uaddr, len, pgoff, flags);
2089
2090 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
2091 return addr;
2092 if (IS_ERR_VALUE(addr))
2093 return addr;
2094 if (addr & ~PAGE_MASK)
2095 return addr;
2096 if (addr > TASK_SIZE - len)
2097 return addr;
2098
2099 if (shmem_huge == SHMEM_HUGE_DENY)
2100 return addr;
2101 if (len < HPAGE_PMD_SIZE)
2102 return addr;
2103 if (flags & MAP_FIXED)
2104 return addr;
2105 /*
2106 * Our priority is to support MAP_SHARED mapped hugely;
2107 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2108 * But if caller specified an address hint, respect that as before.
2109 */
2110 if (uaddr)
2111 return addr;
2112
2113 if (shmem_huge != SHMEM_HUGE_FORCE) {
2114 struct super_block *sb;
2115
2116 if (file) {
2117 VM_BUG_ON(file->f_op != &shmem_file_operations);
2118 sb = file_inode(file)->i_sb;
2119 } else {
2120 /*
2121 * Called directly from mm/mmap.c, or drivers/char/mem.c
2122 * for "/dev/zero", to create a shared anonymous object.
2123 */
2124 if (IS_ERR(shm_mnt))
2125 return addr;
2126 sb = shm_mnt->mnt_sb;
2127 }
2128 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2129 return addr;
2130 }
2131
2132 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2133 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2134 return addr;
2135 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2136 return addr;
2137
2138 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2139 if (inflated_len > TASK_SIZE)
2140 return addr;
2141 if (inflated_len < len)
2142 return addr;
2143
2144 inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2145 if (IS_ERR_VALUE(inflated_addr))
2146 return addr;
2147 if (inflated_addr & ~PAGE_MASK)
2148 return addr;
2149
2150 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2151 inflated_addr += offset - inflated_offset;
2152 if (inflated_offset > offset)
2153 inflated_addr += HPAGE_PMD_SIZE;
2154
2155 if (inflated_addr > TASK_SIZE - len)
2156 return addr;
2157 return inflated_addr;
2158}
2159
2160#ifdef CONFIG_NUMA
2161static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2162{
2163 struct inode *inode = file_inode(vma->vm_file);
2164 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2165}
2166
2167static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2168 unsigned long addr)
2169{
2170 struct inode *inode = file_inode(vma->vm_file);
2171 pgoff_t index;
2172
2173 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2174 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2175}
2176#endif
2177
2178int shmem_lock(struct file *file, int lock, struct user_struct *user)
2179{
2180 struct inode *inode = file_inode(file);
2181 struct shmem_inode_info *info = SHMEM_I(inode);
2182 int retval = -ENOMEM;
2183
2184 spin_lock_irq(&info->lock);
2185 if (lock && !(info->flags & VM_LOCKED)) {
2186 if (!user_shm_lock(inode->i_size, user))
2187 goto out_nomem;
2188 info->flags |= VM_LOCKED;
2189 mapping_set_unevictable(file->f_mapping);
2190 }
2191 if (!lock && (info->flags & VM_LOCKED) && user) {
2192 user_shm_unlock(inode->i_size, user);
2193 info->flags &= ~VM_LOCKED;
2194 mapping_clear_unevictable(file->f_mapping);
2195 }
2196 retval = 0;
2197
2198out_nomem:
2199 spin_unlock_irq(&info->lock);
2200 return retval;
2201}
2202
2203static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2204{
2205 struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2206
2207 if (info->seals & F_SEAL_FUTURE_WRITE) {
2208 /*
2209 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
2210 * "future write" seal active.
2211 */
2212 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
2213 return -EPERM;
2214
2215 /*
2216 * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
2217 * read-only mapping, take care to not allow mprotect to revert
2218 * protections.
2219 */
2220 vma->vm_flags &= ~(VM_MAYWRITE);
2221 }
2222
2223 file_accessed(file);
2224 vma->vm_ops = &shmem_vm_ops;
2225 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2226 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2227 (vma->vm_end & HPAGE_PMD_MASK)) {
2228 khugepaged_enter(vma, vma->vm_flags);
2229 }
2230 return 0;
2231}
2232
2233static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2234 umode_t mode, dev_t dev, unsigned long flags)
2235{
2236 struct inode *inode;
2237 struct shmem_inode_info *info;
2238 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2239
2240 if (shmem_reserve_inode(sb))
2241 return NULL;
2242
2243 inode = new_inode(sb);
2244 if (inode) {
2245 inode->i_ino = get_next_ino();
2246 inode_init_owner(inode, dir, mode);
2247 inode->i_blocks = 0;
2248 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2249 inode->i_generation = prandom_u32();
2250 info = SHMEM_I(inode);
2251 memset(info, 0, (char *)inode - (char *)info);
2252 spin_lock_init(&info->lock);
2253 atomic_set(&info->stop_eviction, 0);
2254 info->seals = F_SEAL_SEAL;
2255 info->flags = flags & VM_NORESERVE;
2256 INIT_LIST_HEAD(&info->shrinklist);
2257 INIT_LIST_HEAD(&info->swaplist);
2258 simple_xattrs_init(&info->xattrs);
2259 cache_no_acl(inode);
2260
2261 switch (mode & S_IFMT) {
2262 default:
2263 inode->i_op = &shmem_special_inode_operations;
2264 init_special_inode(inode, mode, dev);
2265 break;
2266 case S_IFREG:
2267 inode->i_mapping->a_ops = &shmem_aops;
2268 inode->i_op = &shmem_inode_operations;
2269 inode->i_fop = &shmem_file_operations;
2270 mpol_shared_policy_init(&info->policy,
2271 shmem_get_sbmpol(sbinfo));
2272 break;
2273 case S_IFDIR:
2274 inc_nlink(inode);
2275 /* Some things misbehave if size == 0 on a directory */
2276 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2277 inode->i_op = &shmem_dir_inode_operations;
2278 inode->i_fop = &simple_dir_operations;
2279 break;
2280 case S_IFLNK:
2281 /*
2282 * Must not load anything in the rbtree,
2283 * mpol_free_shared_policy will not be called.
2284 */
2285 mpol_shared_policy_init(&info->policy, NULL);
2286 break;
2287 }
2288
2289 lockdep_annotate_inode_mutex_key(inode);
2290 } else
2291 shmem_free_inode(sb);
2292 return inode;
2293}
2294
2295bool shmem_mapping(struct address_space *mapping)
2296{
2297 return mapping->a_ops == &shmem_aops;
2298}
2299
2300static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2301 pmd_t *dst_pmd,
2302 struct vm_area_struct *dst_vma,
2303 unsigned long dst_addr,
2304 unsigned long src_addr,
2305 bool zeropage,
2306 struct page **pagep)
2307{
2308 struct inode *inode = file_inode(dst_vma->vm_file);
2309 struct shmem_inode_info *info = SHMEM_I(inode);
2310 struct address_space *mapping = inode->i_mapping;
2311 gfp_t gfp = mapping_gfp_mask(mapping);
2312 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2313 struct mem_cgroup *memcg;
2314 spinlock_t *ptl;
2315 void *page_kaddr;
2316 struct page *page;
2317 pte_t _dst_pte, *dst_pte;
2318 int ret;
2319 pgoff_t offset, max_off;
2320
2321 ret = -ENOMEM;
2322 if (!shmem_inode_acct_block(inode, 1))
2323 goto out;
2324
2325 if (!*pagep) {
2326 page = shmem_alloc_page(gfp, info, pgoff);
2327 if (!page)
2328 goto out_unacct_blocks;
2329
2330 if (!zeropage) { /* mcopy_atomic */
2331 page_kaddr = kmap_atomic(page);
2332 ret = copy_from_user(page_kaddr,
2333 (const void __user *)src_addr,
2334 PAGE_SIZE);
2335 kunmap_atomic(page_kaddr);
2336
2337 /* fallback to copy_from_user outside mmap_sem */
2338 if (unlikely(ret)) {
2339 *pagep = page;
2340 shmem_inode_unacct_blocks(inode, 1);
2341 /* don't free the page */
2342 return -ENOENT;
2343 }
2344 } else { /* mfill_zeropage_atomic */
2345 clear_highpage(page);
2346 }
2347 } else {
2348 page = *pagep;
2349 *pagep = NULL;
2350 }
2351
2352 VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
2353 __SetPageLocked(page);
2354 __SetPageSwapBacked(page);
2355 __SetPageUptodate(page);
2356
2357 ret = -EFAULT;
2358 offset = linear_page_index(dst_vma, dst_addr);
2359 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2360 if (unlikely(offset >= max_off))
2361 goto out_release;
2362
2363 ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
2364 if (ret)
2365 goto out_release;
2366
2367 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2368 gfp & GFP_RECLAIM_MASK);
2369 if (ret)
2370 goto out_release_uncharge;
2371
2372 mem_cgroup_commit_charge(page, memcg, false, false);
2373
2374 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2375 if (dst_vma->vm_flags & VM_WRITE)
2376 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2377 else {
2378 /*
2379 * We don't set the pte dirty if the vma has no
2380 * VM_WRITE permission, so mark the page dirty or it
2381 * could be freed from under us. We could do it
2382 * unconditionally before unlock_page(), but doing it
2383 * only if VM_WRITE is not set is faster.
2384 */
2385 set_page_dirty(page);
2386 }
2387
2388 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2389
2390 ret = -EFAULT;
2391 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2392 if (unlikely(offset >= max_off))
2393 goto out_release_uncharge_unlock;
2394
2395 ret = -EEXIST;
2396 if (!pte_none(*dst_pte))
2397 goto out_release_uncharge_unlock;
2398
2399 lru_cache_add_anon(page);
2400
2401 spin_lock(&info->lock);
2402 info->alloced++;
2403 inode->i_blocks += BLOCKS_PER_PAGE;
2404 shmem_recalc_inode(inode);
2405 spin_unlock(&info->lock);
2406
2407 inc_mm_counter(dst_mm, mm_counter_file(page));
2408 page_add_file_rmap(page, false);
2409 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
2410
2411 /* No need to invalidate - it was non-present before */
2412 update_mmu_cache(dst_vma, dst_addr, dst_pte);
2413 pte_unmap_unlock(dst_pte, ptl);
2414 unlock_page(page);
2415 ret = 0;
2416out:
2417 return ret;
2418out_release_uncharge_unlock:
2419 pte_unmap_unlock(dst_pte, ptl);
2420 ClearPageDirty(page);
2421 delete_from_page_cache(page);
2422out_release_uncharge:
2423 mem_cgroup_cancel_charge(page, memcg, false);
2424out_release:
2425 unlock_page(page);
2426 put_page(page);
2427out_unacct_blocks:
2428 shmem_inode_unacct_blocks(inode, 1);
2429 goto out;
2430}
2431
2432int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
2433 pmd_t *dst_pmd,
2434 struct vm_area_struct *dst_vma,
2435 unsigned long dst_addr,
2436 unsigned long src_addr,
2437 struct page **pagep)
2438{
2439 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2440 dst_addr, src_addr, false, pagep);
2441}
2442
2443int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
2444 pmd_t *dst_pmd,
2445 struct vm_area_struct *dst_vma,
2446 unsigned long dst_addr)
2447{
2448 struct page *page = NULL;
2449
2450 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2451 dst_addr, 0, true, &page);
2452}
2453
2454#ifdef CONFIG_TMPFS
2455static const struct inode_operations shmem_symlink_inode_operations;
2456static const struct inode_operations shmem_short_symlink_operations;
2457
2458#ifdef CONFIG_TMPFS_XATTR
2459static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2460#else
2461#define shmem_initxattrs NULL
2462#endif
2463
2464static int
2465shmem_write_begin(struct file *file, struct address_space *mapping,
2466 loff_t pos, unsigned len, unsigned flags,
2467 struct page **pagep, void **fsdata)
2468{
2469 struct inode *inode = mapping->host;
2470 struct shmem_inode_info *info = SHMEM_I(inode);
2471 pgoff_t index = pos >> PAGE_SHIFT;
2472
2473 /* i_mutex is held by caller */
2474 if (unlikely(info->seals & (F_SEAL_GROW |
2475 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2476 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2477 return -EPERM;
2478 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2479 return -EPERM;
2480 }
2481
2482 return shmem_getpage(inode, index, pagep, SGP_WRITE);
2483}
2484
2485static int
2486shmem_write_end(struct file *file, struct address_space *mapping,
2487 loff_t pos, unsigned len, unsigned copied,
2488 struct page *page, void *fsdata)
2489{
2490 struct inode *inode = mapping->host;
2491
2492 if (pos + copied > inode->i_size)
2493 i_size_write(inode, pos + copied);
2494
2495 if (!PageUptodate(page)) {
2496 struct page *head = compound_head(page);
2497 if (PageTransCompound(page)) {
2498 int i;
2499
2500 for (i = 0; i < HPAGE_PMD_NR; i++) {
2501 if (head + i == page)
2502 continue;
2503 clear_highpage(head + i);
2504 flush_dcache_page(head + i);
2505 }
2506 }
2507 if (copied < PAGE_SIZE) {
2508 unsigned from = pos & (PAGE_SIZE - 1);
2509 zero_user_segments(page, 0, from,
2510 from + copied, PAGE_SIZE);
2511 }
2512 SetPageUptodate(head);
2513 }
2514 set_page_dirty(page);
2515 unlock_page(page);
2516 put_page(page);
2517
2518 return copied;
2519}
2520
2521static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2522{
2523 struct file *file = iocb->ki_filp;
2524 struct inode *inode = file_inode(file);
2525 struct address_space *mapping = inode->i_mapping;
2526 pgoff_t index;
2527 unsigned long offset;
2528 enum sgp_type sgp = SGP_READ;
2529 int error = 0;
2530 ssize_t retval = 0;
2531 loff_t *ppos = &iocb->ki_pos;
2532
2533 /*
2534 * Might this read be for a stacking filesystem? Then when reading
2535 * holes of a sparse file, we actually need to allocate those pages,
2536 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2537 */
2538 if (!iter_is_iovec(to))
2539 sgp = SGP_CACHE;
2540
2541 index = *ppos >> PAGE_SHIFT;
2542 offset = *ppos & ~PAGE_MASK;
2543
2544 for (;;) {
2545 struct page *page = NULL;
2546 pgoff_t end_index;
2547 unsigned long nr, ret;
2548 loff_t i_size = i_size_read(inode);
2549
2550 end_index = i_size >> PAGE_SHIFT;
2551 if (index > end_index)
2552 break;
2553 if (index == end_index) {
2554 nr = i_size & ~PAGE_MASK;
2555 if (nr <= offset)
2556 break;
2557 }
2558
2559 error = shmem_getpage(inode, index, &page, sgp);
2560 if (error) {
2561 if (error == -EINVAL)
2562 error = 0;
2563 break;
2564 }
2565 if (page) {
2566 if (sgp == SGP_CACHE)
2567 set_page_dirty(page);
2568 unlock_page(page);
2569 }
2570
2571 /*
2572 * We must evaluate after, since reads (unlike writes)
2573 * are called without i_mutex protection against truncate
2574 */
2575 nr = PAGE_SIZE;
2576 i_size = i_size_read(inode);
2577 end_index = i_size >> PAGE_SHIFT;
2578 if (index == end_index) {
2579 nr = i_size & ~PAGE_MASK;
2580 if (nr <= offset) {
2581 if (page)
2582 put_page(page);
2583 break;
2584 }
2585 }
2586 nr -= offset;
2587
2588 if (page) {
2589 /*
2590 * If users can be writing to this page using arbitrary
2591 * virtual addresses, take care about potential aliasing
2592 * before reading the page on the kernel side.
2593 */
2594 if (mapping_writably_mapped(mapping))
2595 flush_dcache_page(page);
2596 /*
2597 * Mark the page accessed if we read the beginning.
2598 */
2599 if (!offset)
2600 mark_page_accessed(page);
2601 } else {
2602 page = ZERO_PAGE(0);
2603 get_page(page);
2604 }
2605
2606 /*
2607 * Ok, we have the page, and it's up-to-date, so
2608 * now we can copy it to user space...
2609 */
2610 ret = copy_page_to_iter(page, offset, nr, to);
2611 retval += ret;
2612 offset += ret;
2613 index += offset >> PAGE_SHIFT;
2614 offset &= ~PAGE_MASK;
2615
2616 put_page(page);
2617 if (!iov_iter_count(to))
2618 break;
2619 if (ret < nr) {
2620 error = -EFAULT;
2621 break;
2622 }
2623 cond_resched();
2624 }
2625
2626 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2627 file_accessed(file);
2628 return retval ? retval : error;
2629}
2630
2631/*
2632 * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2633 */
2634static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2635 pgoff_t index, pgoff_t end, int whence)
2636{
2637 struct page *page;
2638 struct pagevec pvec;
2639 pgoff_t indices[PAGEVEC_SIZE];
2640 bool done = false;
2641 int i;
2642
2643 pagevec_init(&pvec);
2644 pvec.nr = 1; /* start small: we may be there already */
2645 while (!done) {
2646 pvec.nr = find_get_entries(mapping, index,
2647 pvec.nr, pvec.pages, indices);
2648 if (!pvec.nr) {
2649 if (whence == SEEK_DATA)
2650 index = end;
2651 break;
2652 }
2653 for (i = 0; i < pvec.nr; i++, index++) {
2654 if (index < indices[i]) {
2655 if (whence == SEEK_HOLE) {
2656 done = true;
2657 break;
2658 }
2659 index = indices[i];
2660 }
2661 page = pvec.pages[i];
2662 if (page && !xa_is_value(page)) {
2663 if (!PageUptodate(page))
2664 page = NULL;
2665 }
2666 if (index >= end ||
2667 (page && whence == SEEK_DATA) ||
2668 (!page && whence == SEEK_HOLE)) {
2669 done = true;
2670 break;
2671 }
2672 }
2673 pagevec_remove_exceptionals(&pvec);
2674 pagevec_release(&pvec);
2675 pvec.nr = PAGEVEC_SIZE;
2676 cond_resched();
2677 }
2678 return index;
2679}
2680
2681static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2682{
2683 struct address_space *mapping = file->f_mapping;
2684 struct inode *inode = mapping->host;
2685 pgoff_t start, end;
2686 loff_t new_offset;
2687
2688 if (whence != SEEK_DATA && whence != SEEK_HOLE)
2689 return generic_file_llseek_size(file, offset, whence,
2690 MAX_LFS_FILESIZE, i_size_read(inode));
2691 inode_lock(inode);
2692 /* We're holding i_mutex so we can access i_size directly */
2693
2694 if (offset < 0 || offset >= inode->i_size)
2695 offset = -ENXIO;
2696 else {
2697 start = offset >> PAGE_SHIFT;
2698 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2699 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2700 new_offset <<= PAGE_SHIFT;
2701 if (new_offset > offset) {
2702 if (new_offset < inode->i_size)
2703 offset = new_offset;
2704 else if (whence == SEEK_DATA)
2705 offset = -ENXIO;
2706 else
2707 offset = inode->i_size;
2708 }
2709 }
2710
2711 if (offset >= 0)
2712 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2713 inode_unlock(inode);
2714 return offset;
2715}
2716
2717static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2718 loff_t len)
2719{
2720 struct inode *inode = file_inode(file);
2721 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2722 struct shmem_inode_info *info = SHMEM_I(inode);
2723 struct shmem_falloc shmem_falloc;
2724 pgoff_t start, index, end;
2725 int error;
2726
2727 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2728 return -EOPNOTSUPP;
2729
2730 inode_lock(inode);
2731
2732 if (mode & FALLOC_FL_PUNCH_HOLE) {
2733 struct address_space *mapping = file->f_mapping;
2734 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2735 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2736 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2737
2738 /* protected by i_mutex */
2739 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2740 error = -EPERM;
2741 goto out;
2742 }
2743
2744 shmem_falloc.waitq = &shmem_falloc_waitq;
2745 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2746 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2747 spin_lock(&inode->i_lock);
2748 inode->i_private = &shmem_falloc;
2749 spin_unlock(&inode->i_lock);
2750
2751 if ((u64)unmap_end > (u64)unmap_start)
2752 unmap_mapping_range(mapping, unmap_start,
2753 1 + unmap_end - unmap_start, 0);
2754 shmem_truncate_range(inode, offset, offset + len - 1);
2755 /* No need to unmap again: hole-punching leaves COWed pages */
2756
2757 spin_lock(&inode->i_lock);
2758 inode->i_private = NULL;
2759 wake_up_all(&shmem_falloc_waitq);
2760 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2761 spin_unlock(&inode->i_lock);
2762 error = 0;
2763 goto out;
2764 }
2765
2766 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2767 error = inode_newsize_ok(inode, offset + len);
2768 if (error)
2769 goto out;
2770
2771 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2772 error = -EPERM;
2773 goto out;
2774 }
2775
2776 start = offset >> PAGE_SHIFT;
2777 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2778 /* Try to avoid a swapstorm if len is impossible to satisfy */
2779 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2780 error = -ENOSPC;
2781 goto out;
2782 }
2783
2784 shmem_falloc.waitq = NULL;
2785 shmem_falloc.start = start;
2786 shmem_falloc.next = start;
2787 shmem_falloc.nr_falloced = 0;
2788 shmem_falloc.nr_unswapped = 0;
2789 spin_lock(&inode->i_lock);
2790 inode->i_private = &shmem_falloc;
2791 spin_unlock(&inode->i_lock);
2792
2793 for (index = start; index < end; index++) {
2794 struct page *page;
2795
2796 /*
2797 * Good, the fallocate(2) manpage permits EINTR: we may have
2798 * been interrupted because we are using up too much memory.
2799 */
2800 if (signal_pending(current))
2801 error = -EINTR;
2802 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2803 error = -ENOMEM;
2804 else
2805 error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2806 if (error) {
2807 /* Remove the !PageUptodate pages we added */
2808 if (index > start) {
2809 shmem_undo_range(inode,
2810 (loff_t)start << PAGE_SHIFT,
2811 ((loff_t)index << PAGE_SHIFT) - 1, true);
2812 }
2813 goto undone;
2814 }
2815
2816 /*
2817 * Inform shmem_writepage() how far we have reached.
2818 * No need for lock or barrier: we have the page lock.
2819 */
2820 shmem_falloc.next++;
2821 if (!PageUptodate(page))
2822 shmem_falloc.nr_falloced++;
2823
2824 /*
2825 * If !PageUptodate, leave it that way so that freeable pages
2826 * can be recognized if we need to rollback on error later.
2827 * But set_page_dirty so that memory pressure will swap rather
2828 * than free the pages we are allocating (and SGP_CACHE pages
2829 * might still be clean: we now need to mark those dirty too).
2830 */
2831 set_page_dirty(page);
2832 unlock_page(page);
2833 put_page(page);
2834 cond_resched();
2835 }
2836
2837 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2838 i_size_write(inode, offset + len);
2839 inode->i_ctime = current_time(inode);
2840undone:
2841 spin_lock(&inode->i_lock);
2842 inode->i_private = NULL;
2843 spin_unlock(&inode->i_lock);
2844out:
2845 inode_unlock(inode);
2846 return error;
2847}
2848
2849static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2850{
2851 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2852
2853 buf->f_type = TMPFS_MAGIC;
2854 buf->f_bsize = PAGE_SIZE;
2855 buf->f_namelen = NAME_MAX;
2856 if (sbinfo->max_blocks) {
2857 buf->f_blocks = sbinfo->max_blocks;
2858 buf->f_bavail =
2859 buf->f_bfree = sbinfo->max_blocks -
2860 percpu_counter_sum(&sbinfo->used_blocks);
2861 }
2862 if (sbinfo->max_inodes) {
2863 buf->f_files = sbinfo->max_inodes;
2864 buf->f_ffree = sbinfo->free_inodes;
2865 }
2866 /* else leave those fields 0 like simple_statfs */
2867 return 0;
2868}
2869
2870/*
2871 * File creation. Allocate an inode, and we're done..
2872 */
2873static int
2874shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2875{
2876 struct inode *inode;
2877 int error = -ENOSPC;
2878
2879 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2880 if (inode) {
2881 error = simple_acl_create(dir, inode);
2882 if (error)
2883 goto out_iput;
2884 error = security_inode_init_security(inode, dir,
2885 &dentry->d_name,
2886 shmem_initxattrs, NULL);
2887 if (error && error != -EOPNOTSUPP)
2888 goto out_iput;
2889
2890 error = 0;
2891 dir->i_size += BOGO_DIRENT_SIZE;
2892 dir->i_ctime = dir->i_mtime = current_time(dir);
2893 d_instantiate(dentry, inode);
2894 dget(dentry); /* Extra count - pin the dentry in core */
2895 }
2896 return error;
2897out_iput:
2898 iput(inode);
2899 return error;
2900}
2901
2902static int
2903shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2904{
2905 struct inode *inode;
2906 int error = -ENOSPC;
2907
2908 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2909 if (inode) {
2910 error = security_inode_init_security(inode, dir,
2911 NULL,
2912 shmem_initxattrs, NULL);
2913 if (error && error != -EOPNOTSUPP)
2914 goto out_iput;
2915 error = simple_acl_create(dir, inode);
2916 if (error)
2917 goto out_iput;
2918 d_tmpfile(dentry, inode);
2919 }
2920 return error;
2921out_iput:
2922 iput(inode);
2923 return error;
2924}
2925
2926static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2927{
2928 int error;
2929
2930 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2931 return error;
2932 inc_nlink(dir);
2933 return 0;
2934}
2935
2936static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2937 bool excl)
2938{
2939 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2940}
2941
2942/*
2943 * Link a file..
2944 */
2945static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2946{
2947 struct inode *inode = d_inode(old_dentry);
2948 int ret = 0;
2949
2950 /*
2951 * No ordinary (disk based) filesystem counts links as inodes;
2952 * but each new link needs a new dentry, pinning lowmem, and
2953 * tmpfs dentries cannot be pruned until they are unlinked.
2954 * But if an O_TMPFILE file is linked into the tmpfs, the
2955 * first link must skip that, to get the accounting right.
2956 */
2957 if (inode->i_nlink) {
2958 ret = shmem_reserve_inode(inode->i_sb);
2959 if (ret)
2960 goto out;
2961 }
2962
2963 dir->i_size += BOGO_DIRENT_SIZE;
2964 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2965 inc_nlink(inode);
2966 ihold(inode); /* New dentry reference */
2967 dget(dentry); /* Extra pinning count for the created dentry */
2968 d_instantiate(dentry, inode);
2969out:
2970 return ret;
2971}
2972
2973static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2974{
2975 struct inode *inode = d_inode(dentry);
2976
2977 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2978 shmem_free_inode(inode->i_sb);
2979
2980 dir->i_size -= BOGO_DIRENT_SIZE;
2981 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2982 drop_nlink(inode);
2983 dput(dentry); /* Undo the count from "create" - this does all the work */
2984 return 0;
2985}
2986
2987static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2988{
2989 if (!simple_empty(dentry))
2990 return -ENOTEMPTY;
2991
2992 drop_nlink(d_inode(dentry));
2993 drop_nlink(dir);
2994 return shmem_unlink(dir, dentry);
2995}
2996
2997static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2998{
2999 bool old_is_dir = d_is_dir(old_dentry);
3000 bool new_is_dir = d_is_dir(new_dentry);
3001
3002 if (old_dir != new_dir && old_is_dir != new_is_dir) {
3003 if (old_is_dir) {
3004 drop_nlink(old_dir);
3005 inc_nlink(new_dir);
3006 } else {
3007 drop_nlink(new_dir);
3008 inc_nlink(old_dir);
3009 }
3010 }
3011 old_dir->i_ctime = old_dir->i_mtime =
3012 new_dir->i_ctime = new_dir->i_mtime =
3013 d_inode(old_dentry)->i_ctime =
3014 d_inode(new_dentry)->i_ctime = current_time(old_dir);
3015
3016 return 0;
3017}
3018
3019static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
3020{
3021 struct dentry *whiteout;
3022 int error;
3023
3024 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3025 if (!whiteout)
3026 return -ENOMEM;
3027
3028 error = shmem_mknod(old_dir, whiteout,
3029 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3030 dput(whiteout);
3031 if (error)
3032 return error;
3033
3034 /*
3035 * Cheat and hash the whiteout while the old dentry is still in
3036 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3037 *
3038 * d_lookup() will consistently find one of them at this point,
3039 * not sure which one, but that isn't even important.
3040 */
3041 d_rehash(whiteout);
3042 return 0;
3043}
3044
3045/*
3046 * The VFS layer already does all the dentry stuff for rename,
3047 * we just have to decrement the usage count for the target if
3048 * it exists so that the VFS layer correctly free's it when it
3049 * gets overwritten.
3050 */
3051static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
3052{
3053 struct inode *inode = d_inode(old_dentry);
3054 int they_are_dirs = S_ISDIR(inode->i_mode);
3055
3056 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3057 return -EINVAL;
3058
3059 if (flags & RENAME_EXCHANGE)
3060 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3061
3062 if (!simple_empty(new_dentry))
3063 return -ENOTEMPTY;
3064
3065 if (flags & RENAME_WHITEOUT) {
3066 int error;
3067
3068 error = shmem_whiteout(old_dir, old_dentry);
3069 if (error)
3070 return error;
3071 }
3072
3073 if (d_really_is_positive(new_dentry)) {
3074 (void) shmem_unlink(new_dir, new_dentry);
3075 if (they_are_dirs) {
3076 drop_nlink(d_inode(new_dentry));
3077 drop_nlink(old_dir);
3078 }
3079 } else if (they_are_dirs) {
3080 drop_nlink(old_dir);
3081 inc_nlink(new_dir);
3082 }
3083
3084 old_dir->i_size -= BOGO_DIRENT_SIZE;
3085 new_dir->i_size += BOGO_DIRENT_SIZE;
3086 old_dir->i_ctime = old_dir->i_mtime =
3087 new_dir->i_ctime = new_dir->i_mtime =
3088 inode->i_ctime = current_time(old_dir);
3089 return 0;
3090}
3091
3092static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3093{
3094 int error;
3095 int len;
3096 struct inode *inode;
3097 struct page *page;
3098
3099 len = strlen(symname) + 1;
3100 if (len > PAGE_SIZE)
3101 return -ENAMETOOLONG;
3102
3103 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3104 VM_NORESERVE);
3105 if (!inode)
3106 return -ENOSPC;
3107
3108 error = security_inode_init_security(inode, dir, &dentry->d_name,
3109 shmem_initxattrs, NULL);
3110 if (error) {
3111 if (error != -EOPNOTSUPP) {
3112 iput(inode);
3113 return error;
3114 }
3115 error = 0;
3116 }
3117
3118 inode->i_size = len-1;
3119 if (len <= SHORT_SYMLINK_LEN) {
3120 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3121 if (!inode->i_link) {
3122 iput(inode);
3123 return -ENOMEM;
3124 }
3125 inode->i_op = &shmem_short_symlink_operations;
3126 } else {
3127 inode_nohighmem(inode);
3128 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3129 if (error) {
3130 iput(inode);
3131 return error;
3132 }
3133 inode->i_mapping->a_ops = &shmem_aops;
3134 inode->i_op = &shmem_symlink_inode_operations;
3135 memcpy(page_address(page), symname, len);
3136 SetPageUptodate(page);
3137 set_page_dirty(page);
3138 unlock_page(page);
3139 put_page(page);
3140 }
3141 dir->i_size += BOGO_DIRENT_SIZE;
3142 dir->i_ctime = dir->i_mtime = current_time(dir);
3143 d_instantiate(dentry, inode);
3144 dget(dentry);
3145 return 0;
3146}
3147
3148static void shmem_put_link(void *arg)
3149{
3150 mark_page_accessed(arg);
3151 put_page(arg);
3152}
3153
3154static const char *shmem_get_link(struct dentry *dentry,
3155 struct inode *inode,
3156 struct delayed_call *done)
3157{
3158 struct page *page = NULL;
3159 int error;
3160 if (!dentry) {
3161 page = find_get_page(inode->i_mapping, 0);
3162 if (!page)
3163 return ERR_PTR(-ECHILD);
3164 if (!PageUptodate(page)) {
3165 put_page(page);
3166 return ERR_PTR(-ECHILD);
3167 }
3168 } else {
3169 error = shmem_getpage(inode, 0, &page, SGP_READ);
3170 if (error)
3171 return ERR_PTR(error);
3172 unlock_page(page);
3173 }
3174 set_delayed_call(done, shmem_put_link, page);
3175 return page_address(page);
3176}
3177
3178#ifdef CONFIG_TMPFS_XATTR
3179/*
3180 * Superblocks without xattr inode operations may get some security.* xattr
3181 * support from the LSM "for free". As soon as we have any other xattrs
3182 * like ACLs, we also need to implement the security.* handlers at
3183 * filesystem level, though.
3184 */
3185
3186/*
3187 * Callback for security_inode_init_security() for acquiring xattrs.
3188 */
3189static int shmem_initxattrs(struct inode *inode,
3190 const struct xattr *xattr_array,
3191 void *fs_info)
3192{
3193 struct shmem_inode_info *info = SHMEM_I(inode);
3194 const struct xattr *xattr;
3195 struct simple_xattr *new_xattr;
3196 size_t len;
3197
3198 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3199 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3200 if (!new_xattr)
3201 return -ENOMEM;
3202
3203 len = strlen(xattr->name) + 1;
3204 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3205 GFP_KERNEL);
3206 if (!new_xattr->name) {
3207 kfree(new_xattr);
3208 return -ENOMEM;
3209 }
3210
3211 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3212 XATTR_SECURITY_PREFIX_LEN);
3213 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3214 xattr->name, len);
3215
3216 simple_xattr_list_add(&info->xattrs, new_xattr);
3217 }
3218
3219 return 0;
3220}
3221
3222static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3223 struct dentry *unused, struct inode *inode,
3224 const char *name, void *buffer, size_t size)
3225{
3226 struct shmem_inode_info *info = SHMEM_I(inode);
3227
3228 name = xattr_full_name(handler, name);
3229 return simple_xattr_get(&info->xattrs, name, buffer, size);
3230}
3231
3232static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3233 struct dentry *unused, struct inode *inode,
3234 const char *name, const void *value,
3235 size_t size, int flags)
3236{
3237 struct shmem_inode_info *info = SHMEM_I(inode);
3238
3239 name = xattr_full_name(handler, name);
3240 return simple_xattr_set(&info->xattrs, name, value, size, flags);
3241}
3242
3243static const struct xattr_handler shmem_security_xattr_handler = {
3244 .prefix = XATTR_SECURITY_PREFIX,
3245 .get = shmem_xattr_handler_get,
3246 .set = shmem_xattr_handler_set,
3247};
3248
3249static const struct xattr_handler shmem_trusted_xattr_handler = {
3250 .prefix = XATTR_TRUSTED_PREFIX,
3251 .get = shmem_xattr_handler_get,
3252 .set = shmem_xattr_handler_set,
3253};
3254
3255static const struct xattr_handler *shmem_xattr_handlers[] = {
3256#ifdef CONFIG_TMPFS_POSIX_ACL
3257 &posix_acl_access_xattr_handler,
3258 &posix_acl_default_xattr_handler,
3259#endif
3260 &shmem_security_xattr_handler,
3261 &shmem_trusted_xattr_handler,
3262 NULL
3263};
3264
3265static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3266{
3267 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3268 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3269}
3270#endif /* CONFIG_TMPFS_XATTR */
3271
3272static const struct inode_operations shmem_short_symlink_operations = {
3273 .get_link = simple_get_link,
3274#ifdef CONFIG_TMPFS_XATTR
3275 .listxattr = shmem_listxattr,
3276#endif
3277};
3278
3279static const struct inode_operations shmem_symlink_inode_operations = {
3280 .get_link = shmem_get_link,
3281#ifdef CONFIG_TMPFS_XATTR
3282 .listxattr = shmem_listxattr,
3283#endif
3284};
3285
3286static struct dentry *shmem_get_parent(struct dentry *child)
3287{
3288 return ERR_PTR(-ESTALE);
3289}
3290
3291static int shmem_match(struct inode *ino, void *vfh)
3292{
3293 __u32 *fh = vfh;
3294 __u64 inum = fh[2];
3295 inum = (inum << 32) | fh[1];
3296 return ino->i_ino == inum && fh[0] == ino->i_generation;
3297}
3298
3299/* Find any alias of inode, but prefer a hashed alias */
3300static struct dentry *shmem_find_alias(struct inode *inode)
3301{
3302 struct dentry *alias = d_find_alias(inode);
3303
3304 return alias ?: d_find_any_alias(inode);
3305}
3306
3307
3308static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3309 struct fid *fid, int fh_len, int fh_type)
3310{
3311 struct inode *inode;
3312 struct dentry *dentry = NULL;
3313 u64 inum;
3314
3315 if (fh_len < 3)
3316 return NULL;
3317
3318 inum = fid->raw[2];
3319 inum = (inum << 32) | fid->raw[1];
3320
3321 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3322 shmem_match, fid->raw);
3323 if (inode) {
3324 dentry = shmem_find_alias(inode);
3325 iput(inode);
3326 }
3327
3328 return dentry;
3329}
3330
3331static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3332 struct inode *parent)
3333{
3334 if (*len < 3) {
3335 *len = 3;
3336 return FILEID_INVALID;
3337 }
3338
3339 if (inode_unhashed(inode)) {
3340 /* Unfortunately insert_inode_hash is not idempotent,
3341 * so as we hash inodes here rather than at creation
3342 * time, we need a lock to ensure we only try
3343 * to do it once
3344 */
3345 static DEFINE_SPINLOCK(lock);
3346 spin_lock(&lock);
3347 if (inode_unhashed(inode))
3348 __insert_inode_hash(inode,
3349 inode->i_ino + inode->i_generation);
3350 spin_unlock(&lock);
3351 }
3352
3353 fh[0] = inode->i_generation;
3354 fh[1] = inode->i_ino;
3355 fh[2] = ((__u64)inode->i_ino) >> 32;
3356
3357 *len = 3;
3358 return 1;
3359}
3360
3361static const struct export_operations shmem_export_ops = {
3362 .get_parent = shmem_get_parent,
3363 .encode_fh = shmem_encode_fh,
3364 .fh_to_dentry = shmem_fh_to_dentry,
3365};
3366
3367enum shmem_param {
3368 Opt_gid,
3369 Opt_huge,
3370 Opt_mode,
3371 Opt_mpol,
3372 Opt_nr_blocks,
3373 Opt_nr_inodes,
3374 Opt_size,
3375 Opt_uid,
3376};
3377
3378static const struct fs_parameter_spec shmem_param_specs[] = {
3379 fsparam_u32 ("gid", Opt_gid),
3380 fsparam_enum ("huge", Opt_huge),
3381 fsparam_u32oct("mode", Opt_mode),
3382 fsparam_string("mpol", Opt_mpol),
3383 fsparam_string("nr_blocks", Opt_nr_blocks),
3384 fsparam_string("nr_inodes", Opt_nr_inodes),
3385 fsparam_string("size", Opt_size),
3386 fsparam_u32 ("uid", Opt_uid),
3387 {}
3388};
3389
3390static const struct fs_parameter_enum shmem_param_enums[] = {
3391 { Opt_huge, "never", SHMEM_HUGE_NEVER },
3392 { Opt_huge, "always", SHMEM_HUGE_ALWAYS },
3393 { Opt_huge, "within_size", SHMEM_HUGE_WITHIN_SIZE },
3394 { Opt_huge, "advise", SHMEM_HUGE_ADVISE },
3395 {}
3396};
3397
3398const struct fs_parameter_description shmem_fs_parameters = {
3399 .name = "tmpfs",
3400 .specs = shmem_param_specs,
3401 .enums = shmem_param_enums,
3402};
3403
3404static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3405{
3406 struct shmem_options *ctx = fc->fs_private;
3407 struct fs_parse_result result;
3408 unsigned long long size;
3409 char *rest;
3410 int opt;
3411
3412 opt = fs_parse(fc, &shmem_fs_parameters, param, &result);
3413 if (opt < 0)
3414 return opt;
3415
3416 switch (opt) {
3417 case Opt_size:
3418 size = memparse(param->string, &rest);
3419 if (*rest == '%') {
3420 size <<= PAGE_SHIFT;
3421 size *= totalram_pages();
3422 do_div(size, 100);
3423 rest++;
3424 }
3425 if (*rest)
3426 goto bad_value;
3427 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3428 ctx->seen |= SHMEM_SEEN_BLOCKS;
3429 break;
3430 case Opt_nr_blocks:
3431 ctx->blocks = memparse(param->string, &rest);
3432 if (*rest)
3433 goto bad_value;
3434 ctx->seen |= SHMEM_SEEN_BLOCKS;
3435 break;
3436 case Opt_nr_inodes:
3437 ctx->inodes = memparse(param->string, &rest);
3438 if (*rest)
3439 goto bad_value;
3440 ctx->seen |= SHMEM_SEEN_INODES;
3441 break;
3442 case Opt_mode:
3443 ctx->mode = result.uint_32 & 07777;
3444 break;
3445 case Opt_uid:
3446 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3447 if (!uid_valid(ctx->uid))
3448 goto bad_value;
3449 break;
3450 case Opt_gid:
3451 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3452 if (!gid_valid(ctx->gid))
3453 goto bad_value;
3454 break;
3455 case Opt_huge:
3456 ctx->huge = result.uint_32;
3457 if (ctx->huge != SHMEM_HUGE_NEVER &&
3458 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
3459 has_transparent_hugepage()))
3460 goto unsupported_parameter;
3461 ctx->seen |= SHMEM_SEEN_HUGE;
3462 break;
3463 case Opt_mpol:
3464 if (IS_ENABLED(CONFIG_NUMA)) {
3465 mpol_put(ctx->mpol);
3466 ctx->mpol = NULL;
3467 if (mpol_parse_str(param->string, &ctx->mpol))
3468 goto bad_value;
3469 break;
3470 }
3471 goto unsupported_parameter;
3472 }
3473 return 0;
3474
3475unsupported_parameter:
3476 return invalf(fc, "tmpfs: Unsupported parameter '%s'", param->key);
3477bad_value:
3478 return invalf(fc, "tmpfs: Bad value for '%s'", param->key);
3479}
3480
3481static int shmem_parse_options(struct fs_context *fc, void *data)
3482{
3483 char *options = data;
3484
3485 if (options) {
3486 int err = security_sb_eat_lsm_opts(options, &fc->security);
3487 if (err)
3488 return err;
3489 }
3490
3491 while (options != NULL) {
3492 char *this_char = options;
3493 for (;;) {
3494 /*
3495 * NUL-terminate this option: unfortunately,
3496 * mount options form a comma-separated list,
3497 * but mpol's nodelist may also contain commas.
3498 */
3499 options = strchr(options, ',');
3500 if (options == NULL)
3501 break;
3502 options++;
3503 if (!isdigit(*options)) {
3504 options[-1] = '\0';
3505 break;
3506 }
3507 }
3508 if (*this_char) {
3509 char *value = strchr(this_char,'=');
3510 size_t len = 0;
3511 int err;
3512
3513 if (value) {
3514 *value++ = '\0';
3515 len = strlen(value);
3516 }
3517 err = vfs_parse_fs_string(fc, this_char, value, len);
3518 if (err < 0)
3519 return err;
3520 }
3521 }
3522 return 0;
3523}
3524
3525/*
3526 * Reconfigure a shmem filesystem.
3527 *
3528 * Note that we disallow change from limited->unlimited blocks/inodes while any
3529 * are in use; but we must separately disallow unlimited->limited, because in
3530 * that case we have no record of how much is already in use.
3531 */
3532static int shmem_reconfigure(struct fs_context *fc)
3533{
3534 struct shmem_options *ctx = fc->fs_private;
3535 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3536 unsigned long inodes;
3537 const char *err;
3538
3539 spin_lock(&sbinfo->stat_lock);
3540 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3541 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3542 if (!sbinfo->max_blocks) {
3543 err = "Cannot retroactively limit size";
3544 goto out;
3545 }
3546 if (percpu_counter_compare(&sbinfo->used_blocks,
3547 ctx->blocks) > 0) {
3548 err = "Too small a size for current use";
3549 goto out;
3550 }
3551 }
3552 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3553 if (!sbinfo->max_inodes) {
3554 err = "Cannot retroactively limit inodes";
3555 goto out;
3556 }
3557 if (ctx->inodes < inodes) {
3558 err = "Too few inodes for current use";
3559 goto out;
3560 }
3561 }
3562
3563 if (ctx->seen & SHMEM_SEEN_HUGE)
3564 sbinfo->huge = ctx->huge;
3565 if (ctx->seen & SHMEM_SEEN_BLOCKS)
3566 sbinfo->max_blocks = ctx->blocks;
3567 if (ctx->seen & SHMEM_SEEN_INODES) {
3568 sbinfo->max_inodes = ctx->inodes;
3569 sbinfo->free_inodes = ctx->inodes - inodes;
3570 }
3571
3572 /*
3573 * Preserve previous mempolicy unless mpol remount option was specified.
3574 */
3575 if (ctx->mpol) {
3576 mpol_put(sbinfo->mpol);
3577 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
3578 ctx->mpol = NULL;
3579 }
3580 spin_unlock(&sbinfo->stat_lock);
3581 return 0;
3582out:
3583 spin_unlock(&sbinfo->stat_lock);
3584 return invalf(fc, "tmpfs: %s", err);
3585}
3586
3587static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3588{
3589 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3590
3591 if (sbinfo->max_blocks != shmem_default_max_blocks())
3592 seq_printf(seq, ",size=%luk",
3593 sbinfo->max_blocks << (PAGE_SHIFT - 10));
3594 if (sbinfo->max_inodes != shmem_default_max_inodes())
3595 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3596 if (sbinfo->mode != (0777 | S_ISVTX))
3597 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3598 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3599 seq_printf(seq, ",uid=%u",
3600 from_kuid_munged(&init_user_ns, sbinfo->uid));
3601 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3602 seq_printf(seq, ",gid=%u",
3603 from_kgid_munged(&init_user_ns, sbinfo->gid));
3604#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3605 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3606 if (sbinfo->huge)
3607 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3608#endif
3609 shmem_show_mpol(seq, sbinfo->mpol);
3610 return 0;
3611}
3612
3613#endif /* CONFIG_TMPFS */
3614
3615static void shmem_put_super(struct super_block *sb)
3616{
3617 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3618
3619 percpu_counter_destroy(&sbinfo->used_blocks);
3620 mpol_put(sbinfo->mpol);
3621 kfree(sbinfo);
3622 sb->s_fs_info = NULL;
3623}
3624
3625static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3626{
3627 struct shmem_options *ctx = fc->fs_private;
3628 struct inode *inode;
3629 struct shmem_sb_info *sbinfo;
3630 int err = -ENOMEM;
3631
3632 /* Round up to L1_CACHE_BYTES to resist false sharing */
3633 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3634 L1_CACHE_BYTES), GFP_KERNEL);
3635 if (!sbinfo)
3636 return -ENOMEM;
3637
3638 sb->s_fs_info = sbinfo;
3639
3640#ifdef CONFIG_TMPFS
3641 /*
3642 * Per default we only allow half of the physical ram per
3643 * tmpfs instance, limiting inodes to one per page of lowmem;
3644 * but the internal instance is left unlimited.
3645 */
3646 if (!(sb->s_flags & SB_KERNMOUNT)) {
3647 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3648 ctx->blocks = shmem_default_max_blocks();
3649 if (!(ctx->seen & SHMEM_SEEN_INODES))
3650 ctx->inodes = shmem_default_max_inodes();
3651 } else {
3652 sb->s_flags |= SB_NOUSER;
3653 }
3654 sb->s_export_op = &shmem_export_ops;
3655 sb->s_flags |= SB_NOSEC;
3656#else
3657 sb->s_flags |= SB_NOUSER;
3658#endif
3659 sbinfo->max_blocks = ctx->blocks;
3660 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3661 sbinfo->uid = ctx->uid;
3662 sbinfo->gid = ctx->gid;
3663 sbinfo->mode = ctx->mode;
3664 sbinfo->huge = ctx->huge;
3665 sbinfo->mpol = ctx->mpol;
3666 ctx->mpol = NULL;
3667
3668 spin_lock_init(&sbinfo->stat_lock);
3669 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3670 goto failed;
3671 spin_lock_init(&sbinfo->shrinklist_lock);
3672 INIT_LIST_HEAD(&sbinfo->shrinklist);
3673
3674 sb->s_maxbytes = MAX_LFS_FILESIZE;
3675 sb->s_blocksize = PAGE_SIZE;
3676 sb->s_blocksize_bits = PAGE_SHIFT;
3677 sb->s_magic = TMPFS_MAGIC;
3678 sb->s_op = &shmem_ops;
3679 sb->s_time_gran = 1;
3680#ifdef CONFIG_TMPFS_XATTR
3681 sb->s_xattr = shmem_xattr_handlers;
3682#endif
3683#ifdef CONFIG_TMPFS_POSIX_ACL
3684 sb->s_flags |= SB_POSIXACL;
3685#endif
3686 uuid_gen(&sb->s_uuid);
3687
3688 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3689 if (!inode)
3690 goto failed;
3691 inode->i_uid = sbinfo->uid;
3692 inode->i_gid = sbinfo->gid;
3693 sb->s_root = d_make_root(inode);
3694 if (!sb->s_root)
3695 goto failed;
3696 return 0;
3697
3698failed:
3699 shmem_put_super(sb);
3700 return err;
3701}
3702
3703static int shmem_get_tree(struct fs_context *fc)
3704{
3705 return get_tree_nodev(fc, shmem_fill_super);
3706}
3707
3708static void shmem_free_fc(struct fs_context *fc)
3709{
3710 struct shmem_options *ctx = fc->fs_private;
3711
3712 if (ctx) {
3713 mpol_put(ctx->mpol);
3714 kfree(ctx);
3715 }
3716}
3717
3718static const struct fs_context_operations shmem_fs_context_ops = {
3719 .free = shmem_free_fc,
3720 .get_tree = shmem_get_tree,
3721#ifdef CONFIG_TMPFS
3722 .parse_monolithic = shmem_parse_options,
3723 .parse_param = shmem_parse_one,
3724 .reconfigure = shmem_reconfigure,
3725#endif
3726};
3727
3728static struct kmem_cache *shmem_inode_cachep;
3729
3730static struct inode *shmem_alloc_inode(struct super_block *sb)
3731{
3732 struct shmem_inode_info *info;
3733 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3734 if (!info)
3735 return NULL;
3736 return &info->vfs_inode;
3737}
3738
3739static void shmem_free_in_core_inode(struct inode *inode)
3740{
3741 if (S_ISLNK(inode->i_mode))
3742 kfree(inode->i_link);
3743 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3744}
3745
3746static void shmem_destroy_inode(struct inode *inode)
3747{
3748 if (S_ISREG(inode->i_mode))
3749 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3750}
3751
3752static void shmem_init_inode(void *foo)
3753{
3754 struct shmem_inode_info *info = foo;
3755 inode_init_once(&info->vfs_inode);
3756}
3757
3758static void shmem_init_inodecache(void)
3759{
3760 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3761 sizeof(struct shmem_inode_info),
3762 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3763}
3764
3765static void shmem_destroy_inodecache(void)
3766{
3767 kmem_cache_destroy(shmem_inode_cachep);
3768}
3769
3770static const struct address_space_operations shmem_aops = {
3771 .writepage = shmem_writepage,
3772 .set_page_dirty = __set_page_dirty_no_writeback,
3773#ifdef CONFIG_TMPFS
3774 .write_begin = shmem_write_begin,
3775 .write_end = shmem_write_end,
3776#endif
3777#ifdef CONFIG_MIGRATION
3778 .migratepage = migrate_page,
3779#endif
3780 .error_remove_page = generic_error_remove_page,
3781};
3782
3783static const struct file_operations shmem_file_operations = {
3784 .mmap = shmem_mmap,
3785 .get_unmapped_area = shmem_get_unmapped_area,
3786#ifdef CONFIG_TMPFS
3787 .llseek = shmem_file_llseek,
3788 .read_iter = shmem_file_read_iter,
3789 .write_iter = generic_file_write_iter,
3790 .fsync = noop_fsync,
3791 .splice_read = generic_file_splice_read,
3792 .splice_write = iter_file_splice_write,
3793 .fallocate = shmem_fallocate,
3794#endif
3795};
3796
3797static const struct inode_operations shmem_inode_operations = {
3798 .getattr = shmem_getattr,
3799 .setattr = shmem_setattr,
3800#ifdef CONFIG_TMPFS_XATTR
3801 .listxattr = shmem_listxattr,
3802 .set_acl = simple_set_acl,
3803#endif
3804};
3805
3806static const struct inode_operations shmem_dir_inode_operations = {
3807#ifdef CONFIG_TMPFS
3808 .create = shmem_create,
3809 .lookup = simple_lookup,
3810 .link = shmem_link,
3811 .unlink = shmem_unlink,
3812 .symlink = shmem_symlink,
3813 .mkdir = shmem_mkdir,
3814 .rmdir = shmem_rmdir,
3815 .mknod = shmem_mknod,
3816 .rename = shmem_rename2,
3817 .tmpfile = shmem_tmpfile,
3818#endif
3819#ifdef CONFIG_TMPFS_XATTR
3820 .listxattr = shmem_listxattr,
3821#endif
3822#ifdef CONFIG_TMPFS_POSIX_ACL
3823 .setattr = shmem_setattr,
3824 .set_acl = simple_set_acl,
3825#endif
3826};
3827
3828static const struct inode_operations shmem_special_inode_operations = {
3829#ifdef CONFIG_TMPFS_XATTR
3830 .listxattr = shmem_listxattr,
3831#endif
3832#ifdef CONFIG_TMPFS_POSIX_ACL
3833 .setattr = shmem_setattr,
3834 .set_acl = simple_set_acl,
3835#endif
3836};
3837
3838static const struct super_operations shmem_ops = {
3839 .alloc_inode = shmem_alloc_inode,
3840 .free_inode = shmem_free_in_core_inode,
3841 .destroy_inode = shmem_destroy_inode,
3842#ifdef CONFIG_TMPFS
3843 .statfs = shmem_statfs,
3844 .show_options = shmem_show_options,
3845#endif
3846 .evict_inode = shmem_evict_inode,
3847 .drop_inode = generic_delete_inode,
3848 .put_super = shmem_put_super,
3849#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3850 .nr_cached_objects = shmem_unused_huge_count,
3851 .free_cached_objects = shmem_unused_huge_scan,
3852#endif
3853};
3854
3855static const struct vm_operations_struct shmem_vm_ops = {
3856 .fault = shmem_fault,
3857 .map_pages = filemap_map_pages,
3858#ifdef CONFIG_NUMA
3859 .set_policy = shmem_set_policy,
3860 .get_policy = shmem_get_policy,
3861#endif
3862};
3863
3864int shmem_init_fs_context(struct fs_context *fc)
3865{
3866 struct shmem_options *ctx;
3867
3868 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3869 if (!ctx)
3870 return -ENOMEM;
3871
3872 ctx->mode = 0777 | S_ISVTX;
3873 ctx->uid = current_fsuid();
3874 ctx->gid = current_fsgid();
3875
3876 fc->fs_private = ctx;
3877 fc->ops = &shmem_fs_context_ops;
3878 return 0;
3879}
3880
3881static struct file_system_type shmem_fs_type = {
3882 .owner = THIS_MODULE,
3883 .name = "tmpfs",
3884 .init_fs_context = shmem_init_fs_context,
3885#ifdef CONFIG_TMPFS
3886 .parameters = &shmem_fs_parameters,
3887#endif
3888 .kill_sb = kill_litter_super,
3889 .fs_flags = FS_USERNS_MOUNT,
3890};
3891
3892int __init shmem_init(void)
3893{
3894 int error;
3895
3896 shmem_init_inodecache();
3897
3898 error = register_filesystem(&shmem_fs_type);
3899 if (error) {
3900 pr_err("Could not register tmpfs\n");
3901 goto out2;
3902 }
3903
3904 shm_mnt = kern_mount(&shmem_fs_type);
3905 if (IS_ERR(shm_mnt)) {
3906 error = PTR_ERR(shm_mnt);
3907 pr_err("Could not kern_mount tmpfs\n");
3908 goto out1;
3909 }
3910
3911#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3912 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3913 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3914 else
3915 shmem_huge = 0; /* just in case it was patched */
3916#endif
3917 return 0;
3918
3919out1:
3920 unregister_filesystem(&shmem_fs_type);
3921out2:
3922 shmem_destroy_inodecache();
3923 shm_mnt = ERR_PTR(error);
3924 return error;
3925}
3926
3927#if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
3928static ssize_t shmem_enabled_show(struct kobject *kobj,
3929 struct kobj_attribute *attr, char *buf)
3930{
3931 int values[] = {
3932 SHMEM_HUGE_ALWAYS,
3933 SHMEM_HUGE_WITHIN_SIZE,
3934 SHMEM_HUGE_ADVISE,
3935 SHMEM_HUGE_NEVER,
3936 SHMEM_HUGE_DENY,
3937 SHMEM_HUGE_FORCE,
3938 };
3939 int i, count;
3940
3941 for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
3942 const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
3943
3944 count += sprintf(buf + count, fmt,
3945 shmem_format_huge(values[i]));
3946 }
3947 buf[count - 1] = '\n';
3948 return count;
3949}
3950
3951static ssize_t shmem_enabled_store(struct kobject *kobj,
3952 struct kobj_attribute *attr, const char *buf, size_t count)
3953{
3954 char tmp[16];
3955 int huge;
3956
3957 if (count + 1 > sizeof(tmp))
3958 return -EINVAL;
3959 memcpy(tmp, buf, count);
3960 tmp[count] = '\0';
3961 if (count && tmp[count - 1] == '\n')
3962 tmp[count - 1] = '\0';
3963
3964 huge = shmem_parse_huge(tmp);
3965 if (huge == -EINVAL)
3966 return -EINVAL;
3967 if (!has_transparent_hugepage() &&
3968 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3969 return -EINVAL;
3970
3971 shmem_huge = huge;
3972 if (shmem_huge > SHMEM_HUGE_DENY)
3973 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3974 return count;
3975}
3976
3977struct kobj_attribute shmem_enabled_attr =
3978 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3979#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3980
3981#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3982bool shmem_huge_enabled(struct vm_area_struct *vma)
3983{
3984 struct inode *inode = file_inode(vma->vm_file);
3985 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3986 loff_t i_size;
3987 pgoff_t off;
3988
3989 if ((vma->vm_flags & VM_NOHUGEPAGE) ||
3990 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
3991 return false;
3992 if (shmem_huge == SHMEM_HUGE_FORCE)
3993 return true;
3994 if (shmem_huge == SHMEM_HUGE_DENY)
3995 return false;
3996 switch (sbinfo->huge) {
3997 case SHMEM_HUGE_NEVER:
3998 return false;
3999 case SHMEM_HUGE_ALWAYS:
4000 return true;
4001 case SHMEM_HUGE_WITHIN_SIZE:
4002 off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4003 i_size = round_up(i_size_read(inode), PAGE_SIZE);
4004 if (i_size >= HPAGE_PMD_SIZE &&
4005 i_size >> PAGE_SHIFT >= off)
4006 return true;
4007 /* fall through */
4008 case SHMEM_HUGE_ADVISE:
4009 /* TODO: implement fadvise() hints */
4010 return (vma->vm_flags & VM_HUGEPAGE);
4011 default:
4012 VM_BUG_ON(1);
4013 return false;
4014 }
4015}
4016#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
4017
4018#else /* !CONFIG_SHMEM */
4019
4020/*
4021 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4022 *
4023 * This is intended for small system where the benefits of the full
4024 * shmem code (swap-backed and resource-limited) are outweighed by
4025 * their complexity. On systems without swap this code should be
4026 * effectively equivalent, but much lighter weight.
4027 */
4028
4029static struct file_system_type shmem_fs_type = {
4030 .name = "tmpfs",
4031 .init_fs_context = ramfs_init_fs_context,
4032 .parameters = &ramfs_fs_parameters,
4033 .kill_sb = kill_litter_super,
4034 .fs_flags = FS_USERNS_MOUNT,
4035};
4036
4037int __init shmem_init(void)
4038{
4039 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4040
4041 shm_mnt = kern_mount(&shmem_fs_type);
4042 BUG_ON(IS_ERR(shm_mnt));
4043
4044 return 0;
4045}
4046
4047int shmem_unuse(unsigned int type, bool frontswap,
4048 unsigned long *fs_pages_to_unuse)
4049{
4050 return 0;
4051}
4052
4053int shmem_lock(struct file *file, int lock, struct user_struct *user)
4054{
4055 return 0;
4056}
4057
4058void shmem_unlock_mapping(struct address_space *mapping)
4059{
4060}
4061
4062#ifdef CONFIG_MMU
4063unsigned long shmem_get_unmapped_area(struct file *file,
4064 unsigned long addr, unsigned long len,
4065 unsigned long pgoff, unsigned long flags)
4066{
4067 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4068}
4069#endif
4070
4071void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4072{
4073 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4074}
4075EXPORT_SYMBOL_GPL(shmem_truncate_range);
4076
4077#define shmem_vm_ops generic_file_vm_ops
4078#define shmem_file_operations ramfs_file_operations
4079#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
4080#define shmem_acct_size(flags, size) 0
4081#define shmem_unacct_size(flags, size) do {} while (0)
4082
4083#endif /* CONFIG_SHMEM */
4084
4085/* common code */
4086
4087static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4088 unsigned long flags, unsigned int i_flags)
4089{
4090 struct inode *inode;
4091 struct file *res;
4092
4093 if (IS_ERR(mnt))
4094 return ERR_CAST(mnt);
4095
4096 if (size < 0 || size > MAX_LFS_FILESIZE)
4097 return ERR_PTR(-EINVAL);
4098
4099 if (shmem_acct_size(flags, size))
4100 return ERR_PTR(-ENOMEM);
4101
4102 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4103 flags);
4104 if (unlikely(!inode)) {
4105 shmem_unacct_size(flags, size);
4106 return ERR_PTR(-ENOSPC);
4107 }
4108 inode->i_flags |= i_flags;
4109 inode->i_size = size;
4110 clear_nlink(inode); /* It is unlinked */
4111 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4112 if (!IS_ERR(res))
4113 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4114 &shmem_file_operations);
4115 if (IS_ERR(res))
4116 iput(inode);
4117 return res;
4118}
4119
4120/**
4121 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4122 * kernel internal. There will be NO LSM permission checks against the
4123 * underlying inode. So users of this interface must do LSM checks at a
4124 * higher layer. The users are the big_key and shm implementations. LSM
4125 * checks are provided at the key or shm level rather than the inode.
4126 * @name: name for dentry (to be seen in /proc/<pid>/maps
4127 * @size: size to be set for the file
4128 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4129 */
4130struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4131{
4132 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4133}
4134
4135/**
4136 * shmem_file_setup - get an unlinked file living in tmpfs
4137 * @name: name for dentry (to be seen in /proc/<pid>/maps
4138 * @size: size to be set for the file
4139 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4140 */
4141struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4142{
4143 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4144}
4145EXPORT_SYMBOL_GPL(shmem_file_setup);
4146
4147/**
4148 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4149 * @mnt: the tmpfs mount where the file will be created
4150 * @name: name for dentry (to be seen in /proc/<pid>/maps
4151 * @size: size to be set for the file
4152 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4153 */
4154struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4155 loff_t size, unsigned long flags)
4156{
4157 return __shmem_file_setup(mnt, name, size, flags, 0);
4158}
4159EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4160
4161/**
4162 * shmem_zero_setup - setup a shared anonymous mapping
4163 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
4164 */
4165int shmem_zero_setup(struct vm_area_struct *vma)
4166{
4167 struct file *file;
4168 loff_t size = vma->vm_end - vma->vm_start;
4169
4170 /*
4171 * Cloning a new file under mmap_sem leads to a lock ordering conflict
4172 * between XFS directory reading and selinux: since this file is only
4173 * accessible to the user through its mapping, use S_PRIVATE flag to
4174 * bypass file security, in the same way as shmem_kernel_file_setup().
4175 */
4176 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4177 if (IS_ERR(file))
4178 return PTR_ERR(file);
4179
4180 if (vma->vm_file)
4181 fput(vma->vm_file);
4182 vma->vm_file = file;
4183 vma->vm_ops = &shmem_vm_ops;
4184
4185 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
4186 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4187 (vma->vm_end & HPAGE_PMD_MASK)) {
4188 khugepaged_enter(vma, vma->vm_flags);
4189 }
4190
4191 return 0;
4192}
4193
4194/**
4195 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4196 * @mapping: the page's address_space
4197 * @index: the page index
4198 * @gfp: the page allocator flags to use if allocating
4199 *
4200 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4201 * with any new page allocations done using the specified allocation flags.
4202 * But read_cache_page_gfp() uses the ->readpage() method: which does not
4203 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4204 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4205 *
4206 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4207 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4208 */
4209struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4210 pgoff_t index, gfp_t gfp)
4211{
4212#ifdef CONFIG_SHMEM
4213 struct inode *inode = mapping->host;
4214 struct page *page;
4215 int error;
4216
4217 BUG_ON(mapping->a_ops != &shmem_aops);
4218 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4219 gfp, NULL, NULL, NULL);
4220 if (error)
4221 page = ERR_PTR(error);
4222 else
4223 unlock_page(page);
4224 return page;
4225#else
4226 /*
4227 * The tiny !SHMEM case uses ramfs without swap
4228 */
4229 return read_cache_page_gfp(mapping, index, gfp);
4230#endif
4231}
4232EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);