Loading...
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
28#include <linux/ramfs.h>
29#include <linux/pagemap.h>
30#include <linux/file.h>
31#include <linux/fileattr.h>
32#include <linux/mm.h>
33#include <linux/random.h>
34#include <linux/sched/signal.h>
35#include <linux/export.h>
36#include <linux/shmem_fs.h>
37#include <linux/swap.h>
38#include <linux/uio.h>
39#include <linux/hugetlb.h>
40#include <linux/fs_parser.h>
41#include <linux/swapfile.h>
42#include <linux/iversion.h>
43#include "swap.h"
44
45static struct vfsmount *shm_mnt __ro_after_init;
46
47#ifdef CONFIG_SHMEM
48/*
49 * This virtual memory filesystem is heavily based on the ramfs. It
50 * extends ramfs by the ability to use swap and honor resource limits
51 * which makes it a completely usable filesystem.
52 */
53
54#include <linux/xattr.h>
55#include <linux/exportfs.h>
56#include <linux/posix_acl.h>
57#include <linux/posix_acl_xattr.h>
58#include <linux/mman.h>
59#include <linux/string.h>
60#include <linux/slab.h>
61#include <linux/backing-dev.h>
62#include <linux/writeback.h>
63#include <linux/pagevec.h>
64#include <linux/percpu_counter.h>
65#include <linux/falloc.h>
66#include <linux/splice.h>
67#include <linux/security.h>
68#include <linux/swapops.h>
69#include <linux/mempolicy.h>
70#include <linux/namei.h>
71#include <linux/ctype.h>
72#include <linux/migrate.h>
73#include <linux/highmem.h>
74#include <linux/seq_file.h>
75#include <linux/magic.h>
76#include <linux/syscalls.h>
77#include <linux/fcntl.h>
78#include <uapi/linux/memfd.h>
79#include <linux/rmap.h>
80#include <linux/uuid.h>
81#include <linux/quotaops.h>
82#include <linux/rcupdate_wait.h>
83
84#include <linux/uaccess.h>
85
86#include "internal.h"
87
88#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
89#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
90
91/* Pretend that each entry is of this size in directory's i_size */
92#define BOGO_DIRENT_SIZE 20
93
94/* Pretend that one inode + its dentry occupy this much memory */
95#define BOGO_INODE_SIZE 1024
96
97/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98#define SHORT_SYMLINK_LEN 128
99
100/*
101 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
102 * inode->i_private (with i_rwsem making sure that it has only one user at
103 * a time): we would prefer not to enlarge the shmem inode just for that.
104 */
105struct shmem_falloc {
106 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
107 pgoff_t start; /* start of range currently being fallocated */
108 pgoff_t next; /* the next page offset to be fallocated */
109 pgoff_t nr_falloced; /* how many new pages have been fallocated */
110 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
111};
112
113struct shmem_options {
114 unsigned long long blocks;
115 unsigned long long inodes;
116 struct mempolicy *mpol;
117 kuid_t uid;
118 kgid_t gid;
119 umode_t mode;
120 bool full_inums;
121 int huge;
122 int seen;
123 bool noswap;
124 unsigned short quota_types;
125 struct shmem_quota_limits qlimits;
126#define SHMEM_SEEN_BLOCKS 1
127#define SHMEM_SEEN_INODES 2
128#define SHMEM_SEEN_HUGE 4
129#define SHMEM_SEEN_INUMS 8
130#define SHMEM_SEEN_NOSWAP 16
131#define SHMEM_SEEN_QUOTA 32
132};
133
134#ifdef CONFIG_TMPFS
135static unsigned long shmem_default_max_blocks(void)
136{
137 return totalram_pages() / 2;
138}
139
140static unsigned long shmem_default_max_inodes(void)
141{
142 unsigned long nr_pages = totalram_pages();
143
144 return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
145 ULONG_MAX / BOGO_INODE_SIZE);
146}
147#endif
148
149static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
150 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
151 struct mm_struct *fault_mm, vm_fault_t *fault_type);
152
153static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
154{
155 return sb->s_fs_info;
156}
157
158/*
159 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
160 * for shared memory and for shared anonymous (/dev/zero) mappings
161 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
162 * consistent with the pre-accounting of private mappings ...
163 */
164static inline int shmem_acct_size(unsigned long flags, loff_t size)
165{
166 return (flags & VM_NORESERVE) ?
167 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
168}
169
170static inline void shmem_unacct_size(unsigned long flags, loff_t size)
171{
172 if (!(flags & VM_NORESERVE))
173 vm_unacct_memory(VM_ACCT(size));
174}
175
176static inline int shmem_reacct_size(unsigned long flags,
177 loff_t oldsize, loff_t newsize)
178{
179 if (!(flags & VM_NORESERVE)) {
180 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
181 return security_vm_enough_memory_mm(current->mm,
182 VM_ACCT(newsize) - VM_ACCT(oldsize));
183 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
184 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
185 }
186 return 0;
187}
188
189/*
190 * ... whereas tmpfs objects are accounted incrementally as
191 * pages are allocated, in order to allow large sparse files.
192 * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
193 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
194 */
195static inline int shmem_acct_blocks(unsigned long flags, long pages)
196{
197 if (!(flags & VM_NORESERVE))
198 return 0;
199
200 return security_vm_enough_memory_mm(current->mm,
201 pages * VM_ACCT(PAGE_SIZE));
202}
203
204static inline void shmem_unacct_blocks(unsigned long flags, long pages)
205{
206 if (flags & VM_NORESERVE)
207 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
208}
209
210static int shmem_inode_acct_blocks(struct inode *inode, long pages)
211{
212 struct shmem_inode_info *info = SHMEM_I(inode);
213 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
214 int err = -ENOSPC;
215
216 if (shmem_acct_blocks(info->flags, pages))
217 return err;
218
219 might_sleep(); /* when quotas */
220 if (sbinfo->max_blocks) {
221 if (!percpu_counter_limited_add(&sbinfo->used_blocks,
222 sbinfo->max_blocks, pages))
223 goto unacct;
224
225 err = dquot_alloc_block_nodirty(inode, pages);
226 if (err) {
227 percpu_counter_sub(&sbinfo->used_blocks, pages);
228 goto unacct;
229 }
230 } else {
231 err = dquot_alloc_block_nodirty(inode, pages);
232 if (err)
233 goto unacct;
234 }
235
236 return 0;
237
238unacct:
239 shmem_unacct_blocks(info->flags, pages);
240 return err;
241}
242
243static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
244{
245 struct shmem_inode_info *info = SHMEM_I(inode);
246 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
247
248 might_sleep(); /* when quotas */
249 dquot_free_block_nodirty(inode, pages);
250
251 if (sbinfo->max_blocks)
252 percpu_counter_sub(&sbinfo->used_blocks, pages);
253 shmem_unacct_blocks(info->flags, pages);
254}
255
256static const struct super_operations shmem_ops;
257const struct address_space_operations shmem_aops;
258static const struct file_operations shmem_file_operations;
259static const struct inode_operations shmem_inode_operations;
260static const struct inode_operations shmem_dir_inode_operations;
261static const struct inode_operations shmem_special_inode_operations;
262static const struct vm_operations_struct shmem_vm_ops;
263static const struct vm_operations_struct shmem_anon_vm_ops;
264static struct file_system_type shmem_fs_type;
265
266bool vma_is_anon_shmem(struct vm_area_struct *vma)
267{
268 return vma->vm_ops == &shmem_anon_vm_ops;
269}
270
271bool vma_is_shmem(struct vm_area_struct *vma)
272{
273 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
274}
275
276static LIST_HEAD(shmem_swaplist);
277static DEFINE_MUTEX(shmem_swaplist_mutex);
278
279#ifdef CONFIG_TMPFS_QUOTA
280
281static int shmem_enable_quotas(struct super_block *sb,
282 unsigned short quota_types)
283{
284 int type, err = 0;
285
286 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
287 for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
288 if (!(quota_types & (1 << type)))
289 continue;
290 err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
291 DQUOT_USAGE_ENABLED |
292 DQUOT_LIMITS_ENABLED);
293 if (err)
294 goto out_err;
295 }
296 return 0;
297
298out_err:
299 pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
300 type, err);
301 for (type--; type >= 0; type--)
302 dquot_quota_off(sb, type);
303 return err;
304}
305
306static void shmem_disable_quotas(struct super_block *sb)
307{
308 int type;
309
310 for (type = 0; type < SHMEM_MAXQUOTAS; type++)
311 dquot_quota_off(sb, type);
312}
313
314static struct dquot **shmem_get_dquots(struct inode *inode)
315{
316 return SHMEM_I(inode)->i_dquot;
317}
318#endif /* CONFIG_TMPFS_QUOTA */
319
320/*
321 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
322 * produces a novel ino for the newly allocated inode.
323 *
324 * It may also be called when making a hard link to permit the space needed by
325 * each dentry. However, in that case, no new inode number is needed since that
326 * internally draws from another pool of inode numbers (currently global
327 * get_next_ino()). This case is indicated by passing NULL as inop.
328 */
329#define SHMEM_INO_BATCH 1024
330static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
331{
332 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
333 ino_t ino;
334
335 if (!(sb->s_flags & SB_KERNMOUNT)) {
336 raw_spin_lock(&sbinfo->stat_lock);
337 if (sbinfo->max_inodes) {
338 if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
339 raw_spin_unlock(&sbinfo->stat_lock);
340 return -ENOSPC;
341 }
342 sbinfo->free_ispace -= BOGO_INODE_SIZE;
343 }
344 if (inop) {
345 ino = sbinfo->next_ino++;
346 if (unlikely(is_zero_ino(ino)))
347 ino = sbinfo->next_ino++;
348 if (unlikely(!sbinfo->full_inums &&
349 ino > UINT_MAX)) {
350 /*
351 * Emulate get_next_ino uint wraparound for
352 * compatibility
353 */
354 if (IS_ENABLED(CONFIG_64BIT))
355 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
356 __func__, MINOR(sb->s_dev));
357 sbinfo->next_ino = 1;
358 ino = sbinfo->next_ino++;
359 }
360 *inop = ino;
361 }
362 raw_spin_unlock(&sbinfo->stat_lock);
363 } else if (inop) {
364 /*
365 * __shmem_file_setup, one of our callers, is lock-free: it
366 * doesn't hold stat_lock in shmem_reserve_inode since
367 * max_inodes is always 0, and is called from potentially
368 * unknown contexts. As such, use a per-cpu batched allocator
369 * which doesn't require the per-sb stat_lock unless we are at
370 * the batch boundary.
371 *
372 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
373 * shmem mounts are not exposed to userspace, so we don't need
374 * to worry about things like glibc compatibility.
375 */
376 ino_t *next_ino;
377
378 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
379 ino = *next_ino;
380 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
381 raw_spin_lock(&sbinfo->stat_lock);
382 ino = sbinfo->next_ino;
383 sbinfo->next_ino += SHMEM_INO_BATCH;
384 raw_spin_unlock(&sbinfo->stat_lock);
385 if (unlikely(is_zero_ino(ino)))
386 ino++;
387 }
388 *inop = ino;
389 *next_ino = ++ino;
390 put_cpu();
391 }
392
393 return 0;
394}
395
396static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
397{
398 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
399 if (sbinfo->max_inodes) {
400 raw_spin_lock(&sbinfo->stat_lock);
401 sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
402 raw_spin_unlock(&sbinfo->stat_lock);
403 }
404}
405
406/**
407 * shmem_recalc_inode - recalculate the block usage of an inode
408 * @inode: inode to recalc
409 * @alloced: the change in number of pages allocated to inode
410 * @swapped: the change in number of pages swapped from inode
411 *
412 * We have to calculate the free blocks since the mm can drop
413 * undirtied hole pages behind our back.
414 *
415 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
416 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
417 */
418static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
419{
420 struct shmem_inode_info *info = SHMEM_I(inode);
421 long freed;
422
423 spin_lock(&info->lock);
424 info->alloced += alloced;
425 info->swapped += swapped;
426 freed = info->alloced - info->swapped -
427 READ_ONCE(inode->i_mapping->nrpages);
428 /*
429 * Special case: whereas normally shmem_recalc_inode() is called
430 * after i_mapping->nrpages has already been adjusted (up or down),
431 * shmem_writepage() has to raise swapped before nrpages is lowered -
432 * to stop a racing shmem_recalc_inode() from thinking that a page has
433 * been freed. Compensate here, to avoid the need for a followup call.
434 */
435 if (swapped > 0)
436 freed += swapped;
437 if (freed > 0)
438 info->alloced -= freed;
439 spin_unlock(&info->lock);
440
441 /* The quota case may block */
442 if (freed > 0)
443 shmem_inode_unacct_blocks(inode, freed);
444}
445
446bool shmem_charge(struct inode *inode, long pages)
447{
448 struct address_space *mapping = inode->i_mapping;
449
450 if (shmem_inode_acct_blocks(inode, pages))
451 return false;
452
453 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
454 xa_lock_irq(&mapping->i_pages);
455 mapping->nrpages += pages;
456 xa_unlock_irq(&mapping->i_pages);
457
458 shmem_recalc_inode(inode, pages, 0);
459 return true;
460}
461
462void shmem_uncharge(struct inode *inode, long pages)
463{
464 /* pages argument is currently unused: keep it to help debugging */
465 /* nrpages adjustment done by __filemap_remove_folio() or caller */
466
467 shmem_recalc_inode(inode, 0, 0);
468}
469
470/*
471 * Replace item expected in xarray by a new item, while holding xa_lock.
472 */
473static int shmem_replace_entry(struct address_space *mapping,
474 pgoff_t index, void *expected, void *replacement)
475{
476 XA_STATE(xas, &mapping->i_pages, index);
477 void *item;
478
479 VM_BUG_ON(!expected);
480 VM_BUG_ON(!replacement);
481 item = xas_load(&xas);
482 if (item != expected)
483 return -ENOENT;
484 xas_store(&xas, replacement);
485 return 0;
486}
487
488/*
489 * Sometimes, before we decide whether to proceed or to fail, we must check
490 * that an entry was not already brought back from swap by a racing thread.
491 *
492 * Checking page is not enough: by the time a SwapCache page is locked, it
493 * might be reused, and again be SwapCache, using the same swap as before.
494 */
495static bool shmem_confirm_swap(struct address_space *mapping,
496 pgoff_t index, swp_entry_t swap)
497{
498 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
499}
500
501/*
502 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
503 *
504 * SHMEM_HUGE_NEVER:
505 * disables huge pages for the mount;
506 * SHMEM_HUGE_ALWAYS:
507 * enables huge pages for the mount;
508 * SHMEM_HUGE_WITHIN_SIZE:
509 * only allocate huge pages if the page will be fully within i_size,
510 * also respect fadvise()/madvise() hints;
511 * SHMEM_HUGE_ADVISE:
512 * only allocate huge pages if requested with fadvise()/madvise();
513 */
514
515#define SHMEM_HUGE_NEVER 0
516#define SHMEM_HUGE_ALWAYS 1
517#define SHMEM_HUGE_WITHIN_SIZE 2
518#define SHMEM_HUGE_ADVISE 3
519
520/*
521 * Special values.
522 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
523 *
524 * SHMEM_HUGE_DENY:
525 * disables huge on shm_mnt and all mounts, for emergency use;
526 * SHMEM_HUGE_FORCE:
527 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
528 *
529 */
530#define SHMEM_HUGE_DENY (-1)
531#define SHMEM_HUGE_FORCE (-2)
532
533#ifdef CONFIG_TRANSPARENT_HUGEPAGE
534/* ifdef here to avoid bloating shmem.o when not necessary */
535
536static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
537
538bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
539 struct mm_struct *mm, unsigned long vm_flags)
540{
541 loff_t i_size;
542
543 if (!S_ISREG(inode->i_mode))
544 return false;
545 if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
546 return false;
547 if (shmem_huge == SHMEM_HUGE_DENY)
548 return false;
549 if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
550 return true;
551
552 switch (SHMEM_SB(inode->i_sb)->huge) {
553 case SHMEM_HUGE_ALWAYS:
554 return true;
555 case SHMEM_HUGE_WITHIN_SIZE:
556 index = round_up(index + 1, HPAGE_PMD_NR);
557 i_size = round_up(i_size_read(inode), PAGE_SIZE);
558 if (i_size >> PAGE_SHIFT >= index)
559 return true;
560 fallthrough;
561 case SHMEM_HUGE_ADVISE:
562 if (mm && (vm_flags & VM_HUGEPAGE))
563 return true;
564 fallthrough;
565 default:
566 return false;
567 }
568}
569
570#if defined(CONFIG_SYSFS)
571static int shmem_parse_huge(const char *str)
572{
573 if (!strcmp(str, "never"))
574 return SHMEM_HUGE_NEVER;
575 if (!strcmp(str, "always"))
576 return SHMEM_HUGE_ALWAYS;
577 if (!strcmp(str, "within_size"))
578 return SHMEM_HUGE_WITHIN_SIZE;
579 if (!strcmp(str, "advise"))
580 return SHMEM_HUGE_ADVISE;
581 if (!strcmp(str, "deny"))
582 return SHMEM_HUGE_DENY;
583 if (!strcmp(str, "force"))
584 return SHMEM_HUGE_FORCE;
585 return -EINVAL;
586}
587#endif
588
589#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
590static const char *shmem_format_huge(int huge)
591{
592 switch (huge) {
593 case SHMEM_HUGE_NEVER:
594 return "never";
595 case SHMEM_HUGE_ALWAYS:
596 return "always";
597 case SHMEM_HUGE_WITHIN_SIZE:
598 return "within_size";
599 case SHMEM_HUGE_ADVISE:
600 return "advise";
601 case SHMEM_HUGE_DENY:
602 return "deny";
603 case SHMEM_HUGE_FORCE:
604 return "force";
605 default:
606 VM_BUG_ON(1);
607 return "bad_val";
608 }
609}
610#endif
611
612static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
613 struct shrink_control *sc, unsigned long nr_to_split)
614{
615 LIST_HEAD(list), *pos, *next;
616 LIST_HEAD(to_remove);
617 struct inode *inode;
618 struct shmem_inode_info *info;
619 struct folio *folio;
620 unsigned long batch = sc ? sc->nr_to_scan : 128;
621 int split = 0;
622
623 if (list_empty(&sbinfo->shrinklist))
624 return SHRINK_STOP;
625
626 spin_lock(&sbinfo->shrinklist_lock);
627 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
628 info = list_entry(pos, struct shmem_inode_info, shrinklist);
629
630 /* pin the inode */
631 inode = igrab(&info->vfs_inode);
632
633 /* inode is about to be evicted */
634 if (!inode) {
635 list_del_init(&info->shrinklist);
636 goto next;
637 }
638
639 /* Check if there's anything to gain */
640 if (round_up(inode->i_size, PAGE_SIZE) ==
641 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
642 list_move(&info->shrinklist, &to_remove);
643 goto next;
644 }
645
646 list_move(&info->shrinklist, &list);
647next:
648 sbinfo->shrinklist_len--;
649 if (!--batch)
650 break;
651 }
652 spin_unlock(&sbinfo->shrinklist_lock);
653
654 list_for_each_safe(pos, next, &to_remove) {
655 info = list_entry(pos, struct shmem_inode_info, shrinklist);
656 inode = &info->vfs_inode;
657 list_del_init(&info->shrinklist);
658 iput(inode);
659 }
660
661 list_for_each_safe(pos, next, &list) {
662 int ret;
663 pgoff_t index;
664
665 info = list_entry(pos, struct shmem_inode_info, shrinklist);
666 inode = &info->vfs_inode;
667
668 if (nr_to_split && split >= nr_to_split)
669 goto move_back;
670
671 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
672 folio = filemap_get_folio(inode->i_mapping, index);
673 if (IS_ERR(folio))
674 goto drop;
675
676 /* No huge page at the end of the file: nothing to split */
677 if (!folio_test_large(folio)) {
678 folio_put(folio);
679 goto drop;
680 }
681
682 /*
683 * Move the inode on the list back to shrinklist if we failed
684 * to lock the page at this time.
685 *
686 * Waiting for the lock may lead to deadlock in the
687 * reclaim path.
688 */
689 if (!folio_trylock(folio)) {
690 folio_put(folio);
691 goto move_back;
692 }
693
694 ret = split_folio(folio);
695 folio_unlock(folio);
696 folio_put(folio);
697
698 /* If split failed move the inode on the list back to shrinklist */
699 if (ret)
700 goto move_back;
701
702 split++;
703drop:
704 list_del_init(&info->shrinklist);
705 goto put;
706move_back:
707 /*
708 * Make sure the inode is either on the global list or deleted
709 * from any local list before iput() since it could be deleted
710 * in another thread once we put the inode (then the local list
711 * is corrupted).
712 */
713 spin_lock(&sbinfo->shrinklist_lock);
714 list_move(&info->shrinklist, &sbinfo->shrinklist);
715 sbinfo->shrinklist_len++;
716 spin_unlock(&sbinfo->shrinklist_lock);
717put:
718 iput(inode);
719 }
720
721 return split;
722}
723
724static long shmem_unused_huge_scan(struct super_block *sb,
725 struct shrink_control *sc)
726{
727 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
728
729 if (!READ_ONCE(sbinfo->shrinklist_len))
730 return SHRINK_STOP;
731
732 return shmem_unused_huge_shrink(sbinfo, sc, 0);
733}
734
735static long shmem_unused_huge_count(struct super_block *sb,
736 struct shrink_control *sc)
737{
738 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
739 return READ_ONCE(sbinfo->shrinklist_len);
740}
741#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
742
743#define shmem_huge SHMEM_HUGE_DENY
744
745bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
746 struct mm_struct *mm, unsigned long vm_flags)
747{
748 return false;
749}
750
751static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
752 struct shrink_control *sc, unsigned long nr_to_split)
753{
754 return 0;
755}
756#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
757
758/*
759 * Somewhat like filemap_add_folio, but error if expected item has gone.
760 */
761static int shmem_add_to_page_cache(struct folio *folio,
762 struct address_space *mapping,
763 pgoff_t index, void *expected, gfp_t gfp)
764{
765 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
766 long nr = folio_nr_pages(folio);
767
768 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
769 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
770 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
771 VM_BUG_ON(expected && folio_test_large(folio));
772
773 folio_ref_add(folio, nr);
774 folio->mapping = mapping;
775 folio->index = index;
776
777 gfp &= GFP_RECLAIM_MASK;
778 folio_throttle_swaprate(folio, gfp);
779
780 do {
781 xas_lock_irq(&xas);
782 if (expected != xas_find_conflict(&xas)) {
783 xas_set_err(&xas, -EEXIST);
784 goto unlock;
785 }
786 if (expected && xas_find_conflict(&xas)) {
787 xas_set_err(&xas, -EEXIST);
788 goto unlock;
789 }
790 xas_store(&xas, folio);
791 if (xas_error(&xas))
792 goto unlock;
793 if (folio_test_pmd_mappable(folio))
794 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
795 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
796 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
797 mapping->nrpages += nr;
798unlock:
799 xas_unlock_irq(&xas);
800 } while (xas_nomem(&xas, gfp));
801
802 if (xas_error(&xas)) {
803 folio->mapping = NULL;
804 folio_ref_sub(folio, nr);
805 return xas_error(&xas);
806 }
807
808 return 0;
809}
810
811/*
812 * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
813 */
814static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
815{
816 struct address_space *mapping = folio->mapping;
817 long nr = folio_nr_pages(folio);
818 int error;
819
820 xa_lock_irq(&mapping->i_pages);
821 error = shmem_replace_entry(mapping, folio->index, folio, radswap);
822 folio->mapping = NULL;
823 mapping->nrpages -= nr;
824 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
825 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
826 xa_unlock_irq(&mapping->i_pages);
827 folio_put(folio);
828 BUG_ON(error);
829}
830
831/*
832 * Remove swap entry from page cache, free the swap and its page cache.
833 */
834static int shmem_free_swap(struct address_space *mapping,
835 pgoff_t index, void *radswap)
836{
837 void *old;
838
839 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
840 if (old != radswap)
841 return -ENOENT;
842 free_swap_and_cache(radix_to_swp_entry(radswap));
843 return 0;
844}
845
846/*
847 * Determine (in bytes) how many of the shmem object's pages mapped by the
848 * given offsets are swapped out.
849 *
850 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
851 * as long as the inode doesn't go away and racy results are not a problem.
852 */
853unsigned long shmem_partial_swap_usage(struct address_space *mapping,
854 pgoff_t start, pgoff_t end)
855{
856 XA_STATE(xas, &mapping->i_pages, start);
857 struct page *page;
858 unsigned long swapped = 0;
859 unsigned long max = end - 1;
860
861 rcu_read_lock();
862 xas_for_each(&xas, page, max) {
863 if (xas_retry(&xas, page))
864 continue;
865 if (xa_is_value(page))
866 swapped++;
867 if (xas.xa_index == max)
868 break;
869 if (need_resched()) {
870 xas_pause(&xas);
871 cond_resched_rcu();
872 }
873 }
874 rcu_read_unlock();
875
876 return swapped << PAGE_SHIFT;
877}
878
879/*
880 * Determine (in bytes) how many of the shmem object's pages mapped by the
881 * given vma is swapped out.
882 *
883 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
884 * as long as the inode doesn't go away and racy results are not a problem.
885 */
886unsigned long shmem_swap_usage(struct vm_area_struct *vma)
887{
888 struct inode *inode = file_inode(vma->vm_file);
889 struct shmem_inode_info *info = SHMEM_I(inode);
890 struct address_space *mapping = inode->i_mapping;
891 unsigned long swapped;
892
893 /* Be careful as we don't hold info->lock */
894 swapped = READ_ONCE(info->swapped);
895
896 /*
897 * The easier cases are when the shmem object has nothing in swap, or
898 * the vma maps it whole. Then we can simply use the stats that we
899 * already track.
900 */
901 if (!swapped)
902 return 0;
903
904 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
905 return swapped << PAGE_SHIFT;
906
907 /* Here comes the more involved part */
908 return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
909 vma->vm_pgoff + vma_pages(vma));
910}
911
912/*
913 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
914 */
915void shmem_unlock_mapping(struct address_space *mapping)
916{
917 struct folio_batch fbatch;
918 pgoff_t index = 0;
919
920 folio_batch_init(&fbatch);
921 /*
922 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
923 */
924 while (!mapping_unevictable(mapping) &&
925 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
926 check_move_unevictable_folios(&fbatch);
927 folio_batch_release(&fbatch);
928 cond_resched();
929 }
930}
931
932static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
933{
934 struct folio *folio;
935
936 /*
937 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
938 * beyond i_size, and reports fallocated folios as holes.
939 */
940 folio = filemap_get_entry(inode->i_mapping, index);
941 if (!folio)
942 return folio;
943 if (!xa_is_value(folio)) {
944 folio_lock(folio);
945 if (folio->mapping == inode->i_mapping)
946 return folio;
947 /* The folio has been swapped out */
948 folio_unlock(folio);
949 folio_put(folio);
950 }
951 /*
952 * But read a folio back from swap if any of it is within i_size
953 * (although in some cases this is just a waste of time).
954 */
955 folio = NULL;
956 shmem_get_folio(inode, index, &folio, SGP_READ);
957 return folio;
958}
959
960/*
961 * Remove range of pages and swap entries from page cache, and free them.
962 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
963 */
964static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
965 bool unfalloc)
966{
967 struct address_space *mapping = inode->i_mapping;
968 struct shmem_inode_info *info = SHMEM_I(inode);
969 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
970 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
971 struct folio_batch fbatch;
972 pgoff_t indices[PAGEVEC_SIZE];
973 struct folio *folio;
974 bool same_folio;
975 long nr_swaps_freed = 0;
976 pgoff_t index;
977 int i;
978
979 if (lend == -1)
980 end = -1; /* unsigned, so actually very big */
981
982 if (info->fallocend > start && info->fallocend <= end && !unfalloc)
983 info->fallocend = start;
984
985 folio_batch_init(&fbatch);
986 index = start;
987 while (index < end && find_lock_entries(mapping, &index, end - 1,
988 &fbatch, indices)) {
989 for (i = 0; i < folio_batch_count(&fbatch); i++) {
990 folio = fbatch.folios[i];
991
992 if (xa_is_value(folio)) {
993 if (unfalloc)
994 continue;
995 nr_swaps_freed += !shmem_free_swap(mapping,
996 indices[i], folio);
997 continue;
998 }
999
1000 if (!unfalloc || !folio_test_uptodate(folio))
1001 truncate_inode_folio(mapping, folio);
1002 folio_unlock(folio);
1003 }
1004 folio_batch_remove_exceptionals(&fbatch);
1005 folio_batch_release(&fbatch);
1006 cond_resched();
1007 }
1008
1009 /*
1010 * When undoing a failed fallocate, we want none of the partial folio
1011 * zeroing and splitting below, but shall want to truncate the whole
1012 * folio when !uptodate indicates that it was added by this fallocate,
1013 * even when [lstart, lend] covers only a part of the folio.
1014 */
1015 if (unfalloc)
1016 goto whole_folios;
1017
1018 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1019 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1020 if (folio) {
1021 same_folio = lend < folio_pos(folio) + folio_size(folio);
1022 folio_mark_dirty(folio);
1023 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1024 start = folio_next_index(folio);
1025 if (same_folio)
1026 end = folio->index;
1027 }
1028 folio_unlock(folio);
1029 folio_put(folio);
1030 folio = NULL;
1031 }
1032
1033 if (!same_folio)
1034 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1035 if (folio) {
1036 folio_mark_dirty(folio);
1037 if (!truncate_inode_partial_folio(folio, lstart, lend))
1038 end = folio->index;
1039 folio_unlock(folio);
1040 folio_put(folio);
1041 }
1042
1043whole_folios:
1044
1045 index = start;
1046 while (index < end) {
1047 cond_resched();
1048
1049 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1050 indices)) {
1051 /* If all gone or hole-punch or unfalloc, we're done */
1052 if (index == start || end != -1)
1053 break;
1054 /* But if truncating, restart to make sure all gone */
1055 index = start;
1056 continue;
1057 }
1058 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1059 folio = fbatch.folios[i];
1060
1061 if (xa_is_value(folio)) {
1062 if (unfalloc)
1063 continue;
1064 if (shmem_free_swap(mapping, indices[i], folio)) {
1065 /* Swap was replaced by page: retry */
1066 index = indices[i];
1067 break;
1068 }
1069 nr_swaps_freed++;
1070 continue;
1071 }
1072
1073 folio_lock(folio);
1074
1075 if (!unfalloc || !folio_test_uptodate(folio)) {
1076 if (folio_mapping(folio) != mapping) {
1077 /* Page was replaced by swap: retry */
1078 folio_unlock(folio);
1079 index = indices[i];
1080 break;
1081 }
1082 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1083 folio);
1084
1085 if (!folio_test_large(folio)) {
1086 truncate_inode_folio(mapping, folio);
1087 } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1088 /*
1089 * If we split a page, reset the loop so
1090 * that we pick up the new sub pages.
1091 * Otherwise the THP was entirely
1092 * dropped or the target range was
1093 * zeroed, so just continue the loop as
1094 * is.
1095 */
1096 if (!folio_test_large(folio)) {
1097 folio_unlock(folio);
1098 index = start;
1099 break;
1100 }
1101 }
1102 }
1103 folio_unlock(folio);
1104 }
1105 folio_batch_remove_exceptionals(&fbatch);
1106 folio_batch_release(&fbatch);
1107 }
1108
1109 shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1110}
1111
1112void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1113{
1114 shmem_undo_range(inode, lstart, lend, false);
1115 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1116 inode_inc_iversion(inode);
1117}
1118EXPORT_SYMBOL_GPL(shmem_truncate_range);
1119
1120static int shmem_getattr(struct mnt_idmap *idmap,
1121 const struct path *path, struct kstat *stat,
1122 u32 request_mask, unsigned int query_flags)
1123{
1124 struct inode *inode = path->dentry->d_inode;
1125 struct shmem_inode_info *info = SHMEM_I(inode);
1126
1127 if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1128 shmem_recalc_inode(inode, 0, 0);
1129
1130 if (info->fsflags & FS_APPEND_FL)
1131 stat->attributes |= STATX_ATTR_APPEND;
1132 if (info->fsflags & FS_IMMUTABLE_FL)
1133 stat->attributes |= STATX_ATTR_IMMUTABLE;
1134 if (info->fsflags & FS_NODUMP_FL)
1135 stat->attributes |= STATX_ATTR_NODUMP;
1136 stat->attributes_mask |= (STATX_ATTR_APPEND |
1137 STATX_ATTR_IMMUTABLE |
1138 STATX_ATTR_NODUMP);
1139 generic_fillattr(idmap, request_mask, inode, stat);
1140
1141 if (shmem_is_huge(inode, 0, false, NULL, 0))
1142 stat->blksize = HPAGE_PMD_SIZE;
1143
1144 if (request_mask & STATX_BTIME) {
1145 stat->result_mask |= STATX_BTIME;
1146 stat->btime.tv_sec = info->i_crtime.tv_sec;
1147 stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1148 }
1149
1150 return 0;
1151}
1152
1153static int shmem_setattr(struct mnt_idmap *idmap,
1154 struct dentry *dentry, struct iattr *attr)
1155{
1156 struct inode *inode = d_inode(dentry);
1157 struct shmem_inode_info *info = SHMEM_I(inode);
1158 int error;
1159 bool update_mtime = false;
1160 bool update_ctime = true;
1161
1162 error = setattr_prepare(idmap, dentry, attr);
1163 if (error)
1164 return error;
1165
1166 if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1167 if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1168 return -EPERM;
1169 }
1170 }
1171
1172 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1173 loff_t oldsize = inode->i_size;
1174 loff_t newsize = attr->ia_size;
1175
1176 /* protected by i_rwsem */
1177 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1178 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1179 return -EPERM;
1180
1181 if (newsize != oldsize) {
1182 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1183 oldsize, newsize);
1184 if (error)
1185 return error;
1186 i_size_write(inode, newsize);
1187 update_mtime = true;
1188 } else {
1189 update_ctime = false;
1190 }
1191 if (newsize <= oldsize) {
1192 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1193 if (oldsize > holebegin)
1194 unmap_mapping_range(inode->i_mapping,
1195 holebegin, 0, 1);
1196 if (info->alloced)
1197 shmem_truncate_range(inode,
1198 newsize, (loff_t)-1);
1199 /* unmap again to remove racily COWed private pages */
1200 if (oldsize > holebegin)
1201 unmap_mapping_range(inode->i_mapping,
1202 holebegin, 0, 1);
1203 }
1204 }
1205
1206 if (is_quota_modification(idmap, inode, attr)) {
1207 error = dquot_initialize(inode);
1208 if (error)
1209 return error;
1210 }
1211
1212 /* Transfer quota accounting */
1213 if (i_uid_needs_update(idmap, attr, inode) ||
1214 i_gid_needs_update(idmap, attr, inode)) {
1215 error = dquot_transfer(idmap, inode, attr);
1216 if (error)
1217 return error;
1218 }
1219
1220 setattr_copy(idmap, inode, attr);
1221 if (attr->ia_valid & ATTR_MODE)
1222 error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1223 if (!error && update_ctime) {
1224 inode_set_ctime_current(inode);
1225 if (update_mtime)
1226 inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1227 inode_inc_iversion(inode);
1228 }
1229 return error;
1230}
1231
1232static void shmem_evict_inode(struct inode *inode)
1233{
1234 struct shmem_inode_info *info = SHMEM_I(inode);
1235 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1236 size_t freed = 0;
1237
1238 if (shmem_mapping(inode->i_mapping)) {
1239 shmem_unacct_size(info->flags, inode->i_size);
1240 inode->i_size = 0;
1241 mapping_set_exiting(inode->i_mapping);
1242 shmem_truncate_range(inode, 0, (loff_t)-1);
1243 if (!list_empty(&info->shrinklist)) {
1244 spin_lock(&sbinfo->shrinklist_lock);
1245 if (!list_empty(&info->shrinklist)) {
1246 list_del_init(&info->shrinklist);
1247 sbinfo->shrinklist_len--;
1248 }
1249 spin_unlock(&sbinfo->shrinklist_lock);
1250 }
1251 while (!list_empty(&info->swaplist)) {
1252 /* Wait while shmem_unuse() is scanning this inode... */
1253 wait_var_event(&info->stop_eviction,
1254 !atomic_read(&info->stop_eviction));
1255 mutex_lock(&shmem_swaplist_mutex);
1256 /* ...but beware of the race if we peeked too early */
1257 if (!atomic_read(&info->stop_eviction))
1258 list_del_init(&info->swaplist);
1259 mutex_unlock(&shmem_swaplist_mutex);
1260 }
1261 }
1262
1263 simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1264 shmem_free_inode(inode->i_sb, freed);
1265 WARN_ON(inode->i_blocks);
1266 clear_inode(inode);
1267#ifdef CONFIG_TMPFS_QUOTA
1268 dquot_free_inode(inode);
1269 dquot_drop(inode);
1270#endif
1271}
1272
1273static int shmem_find_swap_entries(struct address_space *mapping,
1274 pgoff_t start, struct folio_batch *fbatch,
1275 pgoff_t *indices, unsigned int type)
1276{
1277 XA_STATE(xas, &mapping->i_pages, start);
1278 struct folio *folio;
1279 swp_entry_t entry;
1280
1281 rcu_read_lock();
1282 xas_for_each(&xas, folio, ULONG_MAX) {
1283 if (xas_retry(&xas, folio))
1284 continue;
1285
1286 if (!xa_is_value(folio))
1287 continue;
1288
1289 entry = radix_to_swp_entry(folio);
1290 /*
1291 * swapin error entries can be found in the mapping. But they're
1292 * deliberately ignored here as we've done everything we can do.
1293 */
1294 if (swp_type(entry) != type)
1295 continue;
1296
1297 indices[folio_batch_count(fbatch)] = xas.xa_index;
1298 if (!folio_batch_add(fbatch, folio))
1299 break;
1300
1301 if (need_resched()) {
1302 xas_pause(&xas);
1303 cond_resched_rcu();
1304 }
1305 }
1306 rcu_read_unlock();
1307
1308 return xas.xa_index;
1309}
1310
1311/*
1312 * Move the swapped pages for an inode to page cache. Returns the count
1313 * of pages swapped in, or the error in case of failure.
1314 */
1315static int shmem_unuse_swap_entries(struct inode *inode,
1316 struct folio_batch *fbatch, pgoff_t *indices)
1317{
1318 int i = 0;
1319 int ret = 0;
1320 int error = 0;
1321 struct address_space *mapping = inode->i_mapping;
1322
1323 for (i = 0; i < folio_batch_count(fbatch); i++) {
1324 struct folio *folio = fbatch->folios[i];
1325
1326 if (!xa_is_value(folio))
1327 continue;
1328 error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1329 mapping_gfp_mask(mapping), NULL, NULL);
1330 if (error == 0) {
1331 folio_unlock(folio);
1332 folio_put(folio);
1333 ret++;
1334 }
1335 if (error == -ENOMEM)
1336 break;
1337 error = 0;
1338 }
1339 return error ? error : ret;
1340}
1341
1342/*
1343 * If swap found in inode, free it and move page from swapcache to filecache.
1344 */
1345static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1346{
1347 struct address_space *mapping = inode->i_mapping;
1348 pgoff_t start = 0;
1349 struct folio_batch fbatch;
1350 pgoff_t indices[PAGEVEC_SIZE];
1351 int ret = 0;
1352
1353 do {
1354 folio_batch_init(&fbatch);
1355 shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1356 if (folio_batch_count(&fbatch) == 0) {
1357 ret = 0;
1358 break;
1359 }
1360
1361 ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1362 if (ret < 0)
1363 break;
1364
1365 start = indices[folio_batch_count(&fbatch) - 1];
1366 } while (true);
1367
1368 return ret;
1369}
1370
1371/*
1372 * Read all the shared memory data that resides in the swap
1373 * device 'type' back into memory, so the swap device can be
1374 * unused.
1375 */
1376int shmem_unuse(unsigned int type)
1377{
1378 struct shmem_inode_info *info, *next;
1379 int error = 0;
1380
1381 if (list_empty(&shmem_swaplist))
1382 return 0;
1383
1384 mutex_lock(&shmem_swaplist_mutex);
1385 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1386 if (!info->swapped) {
1387 list_del_init(&info->swaplist);
1388 continue;
1389 }
1390 /*
1391 * Drop the swaplist mutex while searching the inode for swap;
1392 * but before doing so, make sure shmem_evict_inode() will not
1393 * remove placeholder inode from swaplist, nor let it be freed
1394 * (igrab() would protect from unlink, but not from unmount).
1395 */
1396 atomic_inc(&info->stop_eviction);
1397 mutex_unlock(&shmem_swaplist_mutex);
1398
1399 error = shmem_unuse_inode(&info->vfs_inode, type);
1400 cond_resched();
1401
1402 mutex_lock(&shmem_swaplist_mutex);
1403 next = list_next_entry(info, swaplist);
1404 if (!info->swapped)
1405 list_del_init(&info->swaplist);
1406 if (atomic_dec_and_test(&info->stop_eviction))
1407 wake_up_var(&info->stop_eviction);
1408 if (error)
1409 break;
1410 }
1411 mutex_unlock(&shmem_swaplist_mutex);
1412
1413 return error;
1414}
1415
1416/*
1417 * Move the page from the page cache to the swap cache.
1418 */
1419static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1420{
1421 struct folio *folio = page_folio(page);
1422 struct address_space *mapping = folio->mapping;
1423 struct inode *inode = mapping->host;
1424 struct shmem_inode_info *info = SHMEM_I(inode);
1425 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1426 swp_entry_t swap;
1427 pgoff_t index;
1428
1429 /*
1430 * Our capabilities prevent regular writeback or sync from ever calling
1431 * shmem_writepage; but a stacking filesystem might use ->writepage of
1432 * its underlying filesystem, in which case tmpfs should write out to
1433 * swap only in response to memory pressure, and not for the writeback
1434 * threads or sync.
1435 */
1436 if (WARN_ON_ONCE(!wbc->for_reclaim))
1437 goto redirty;
1438
1439 if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap))
1440 goto redirty;
1441
1442 if (!total_swap_pages)
1443 goto redirty;
1444
1445 /*
1446 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
1447 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
1448 * and its shmem_writeback() needs them to be split when swapping.
1449 */
1450 if (folio_test_large(folio)) {
1451 /* Ensure the subpages are still dirty */
1452 folio_test_set_dirty(folio);
1453 if (split_huge_page(page) < 0)
1454 goto redirty;
1455 folio = page_folio(page);
1456 folio_clear_dirty(folio);
1457 }
1458
1459 index = folio->index;
1460
1461 /*
1462 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1463 * value into swapfile.c, the only way we can correctly account for a
1464 * fallocated folio arriving here is now to initialize it and write it.
1465 *
1466 * That's okay for a folio already fallocated earlier, but if we have
1467 * not yet completed the fallocation, then (a) we want to keep track
1468 * of this folio in case we have to undo it, and (b) it may not be a
1469 * good idea to continue anyway, once we're pushing into swap. So
1470 * reactivate the folio, and let shmem_fallocate() quit when too many.
1471 */
1472 if (!folio_test_uptodate(folio)) {
1473 if (inode->i_private) {
1474 struct shmem_falloc *shmem_falloc;
1475 spin_lock(&inode->i_lock);
1476 shmem_falloc = inode->i_private;
1477 if (shmem_falloc &&
1478 !shmem_falloc->waitq &&
1479 index >= shmem_falloc->start &&
1480 index < shmem_falloc->next)
1481 shmem_falloc->nr_unswapped++;
1482 else
1483 shmem_falloc = NULL;
1484 spin_unlock(&inode->i_lock);
1485 if (shmem_falloc)
1486 goto redirty;
1487 }
1488 folio_zero_range(folio, 0, folio_size(folio));
1489 flush_dcache_folio(folio);
1490 folio_mark_uptodate(folio);
1491 }
1492
1493 swap = folio_alloc_swap(folio);
1494 if (!swap.val)
1495 goto redirty;
1496
1497 /*
1498 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1499 * if it's not already there. Do it now before the folio is
1500 * moved to swap cache, when its pagelock no longer protects
1501 * the inode from eviction. But don't unlock the mutex until
1502 * we've incremented swapped, because shmem_unuse_inode() will
1503 * prune a !swapped inode from the swaplist under this mutex.
1504 */
1505 mutex_lock(&shmem_swaplist_mutex);
1506 if (list_empty(&info->swaplist))
1507 list_add(&info->swaplist, &shmem_swaplist);
1508
1509 if (add_to_swap_cache(folio, swap,
1510 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1511 NULL) == 0) {
1512 shmem_recalc_inode(inode, 0, 1);
1513 swap_shmem_alloc(swap);
1514 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1515
1516 mutex_unlock(&shmem_swaplist_mutex);
1517 BUG_ON(folio_mapped(folio));
1518 return swap_writepage(&folio->page, wbc);
1519 }
1520
1521 mutex_unlock(&shmem_swaplist_mutex);
1522 put_swap_folio(folio, swap);
1523redirty:
1524 folio_mark_dirty(folio);
1525 if (wbc->for_reclaim)
1526 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1527 folio_unlock(folio);
1528 return 0;
1529}
1530
1531#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1532static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1533{
1534 char buffer[64];
1535
1536 if (!mpol || mpol->mode == MPOL_DEFAULT)
1537 return; /* show nothing */
1538
1539 mpol_to_str(buffer, sizeof(buffer), mpol);
1540
1541 seq_printf(seq, ",mpol=%s", buffer);
1542}
1543
1544static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1545{
1546 struct mempolicy *mpol = NULL;
1547 if (sbinfo->mpol) {
1548 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1549 mpol = sbinfo->mpol;
1550 mpol_get(mpol);
1551 raw_spin_unlock(&sbinfo->stat_lock);
1552 }
1553 return mpol;
1554}
1555#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1556static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1557{
1558}
1559static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1560{
1561 return NULL;
1562}
1563#endif /* CONFIG_NUMA && CONFIG_TMPFS */
1564
1565static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
1566 pgoff_t index, unsigned int order, pgoff_t *ilx);
1567
1568static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1569 struct shmem_inode_info *info, pgoff_t index)
1570{
1571 struct mempolicy *mpol;
1572 pgoff_t ilx;
1573 struct folio *folio;
1574
1575 mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1576 folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1577 mpol_cond_put(mpol);
1578
1579 return folio;
1580}
1581
1582/*
1583 * Make sure huge_gfp is always more limited than limit_gfp.
1584 * Some of the flags set permissions, while others set limitations.
1585 */
1586static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1587{
1588 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1589 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1590 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1591 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1592
1593 /* Allow allocations only from the originally specified zones. */
1594 result |= zoneflags;
1595
1596 /*
1597 * Minimize the result gfp by taking the union with the deny flags,
1598 * and the intersection of the allow flags.
1599 */
1600 result |= (limit_gfp & denyflags);
1601 result |= (huge_gfp & limit_gfp) & allowflags;
1602
1603 return result;
1604}
1605
1606static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
1607 struct shmem_inode_info *info, pgoff_t index)
1608{
1609 struct mempolicy *mpol;
1610 pgoff_t ilx;
1611 struct page *page;
1612
1613 mpol = shmem_get_pgoff_policy(info, index, HPAGE_PMD_ORDER, &ilx);
1614 page = alloc_pages_mpol(gfp, HPAGE_PMD_ORDER, mpol, ilx, numa_node_id());
1615 mpol_cond_put(mpol);
1616
1617 return page_rmappable_folio(page);
1618}
1619
1620static struct folio *shmem_alloc_folio(gfp_t gfp,
1621 struct shmem_inode_info *info, pgoff_t index)
1622{
1623 struct mempolicy *mpol;
1624 pgoff_t ilx;
1625 struct page *page;
1626
1627 mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1628 page = alloc_pages_mpol(gfp, 0, mpol, ilx, numa_node_id());
1629 mpol_cond_put(mpol);
1630
1631 return (struct folio *)page;
1632}
1633
1634static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
1635 struct inode *inode, pgoff_t index,
1636 struct mm_struct *fault_mm, bool huge)
1637{
1638 struct address_space *mapping = inode->i_mapping;
1639 struct shmem_inode_info *info = SHMEM_I(inode);
1640 struct folio *folio;
1641 long pages;
1642 int error;
1643
1644 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1645 huge = false;
1646
1647 if (huge) {
1648 pages = HPAGE_PMD_NR;
1649 index = round_down(index, HPAGE_PMD_NR);
1650
1651 /*
1652 * Check for conflict before waiting on a huge allocation.
1653 * Conflict might be that a huge page has just been allocated
1654 * and added to page cache by a racing thread, or that there
1655 * is already at least one small page in the huge extent.
1656 * Be careful to retry when appropriate, but not forever!
1657 * Elsewhere -EEXIST would be the right code, but not here.
1658 */
1659 if (xa_find(&mapping->i_pages, &index,
1660 index + HPAGE_PMD_NR - 1, XA_PRESENT))
1661 return ERR_PTR(-E2BIG);
1662
1663 folio = shmem_alloc_hugefolio(gfp, info, index);
1664 if (!folio)
1665 count_vm_event(THP_FILE_FALLBACK);
1666 } else {
1667 pages = 1;
1668 folio = shmem_alloc_folio(gfp, info, index);
1669 }
1670 if (!folio)
1671 return ERR_PTR(-ENOMEM);
1672
1673 __folio_set_locked(folio);
1674 __folio_set_swapbacked(folio);
1675
1676 gfp &= GFP_RECLAIM_MASK;
1677 error = mem_cgroup_charge(folio, fault_mm, gfp);
1678 if (error) {
1679 if (xa_find(&mapping->i_pages, &index,
1680 index + pages - 1, XA_PRESENT)) {
1681 error = -EEXIST;
1682 } else if (huge) {
1683 count_vm_event(THP_FILE_FALLBACK);
1684 count_vm_event(THP_FILE_FALLBACK_CHARGE);
1685 }
1686 goto unlock;
1687 }
1688
1689 error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
1690 if (error)
1691 goto unlock;
1692
1693 error = shmem_inode_acct_blocks(inode, pages);
1694 if (error) {
1695 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1696 long freed;
1697 /*
1698 * Try to reclaim some space by splitting a few
1699 * large folios beyond i_size on the filesystem.
1700 */
1701 shmem_unused_huge_shrink(sbinfo, NULL, 2);
1702 /*
1703 * And do a shmem_recalc_inode() to account for freed pages:
1704 * except our folio is there in cache, so not quite balanced.
1705 */
1706 spin_lock(&info->lock);
1707 freed = pages + info->alloced - info->swapped -
1708 READ_ONCE(mapping->nrpages);
1709 if (freed > 0)
1710 info->alloced -= freed;
1711 spin_unlock(&info->lock);
1712 if (freed > 0)
1713 shmem_inode_unacct_blocks(inode, freed);
1714 error = shmem_inode_acct_blocks(inode, pages);
1715 if (error) {
1716 filemap_remove_folio(folio);
1717 goto unlock;
1718 }
1719 }
1720
1721 shmem_recalc_inode(inode, pages, 0);
1722 folio_add_lru(folio);
1723 return folio;
1724
1725unlock:
1726 folio_unlock(folio);
1727 folio_put(folio);
1728 return ERR_PTR(error);
1729}
1730
1731/*
1732 * When a page is moved from swapcache to shmem filecache (either by the
1733 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
1734 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1735 * ignorance of the mapping it belongs to. If that mapping has special
1736 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1737 * we may need to copy to a suitable page before moving to filecache.
1738 *
1739 * In a future release, this may well be extended to respect cpuset and
1740 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1741 * but for now it is a simple matter of zone.
1742 */
1743static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1744{
1745 return folio_zonenum(folio) > gfp_zone(gfp);
1746}
1747
1748static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1749 struct shmem_inode_info *info, pgoff_t index)
1750{
1751 struct folio *old, *new;
1752 struct address_space *swap_mapping;
1753 swp_entry_t entry;
1754 pgoff_t swap_index;
1755 int error;
1756
1757 old = *foliop;
1758 entry = old->swap;
1759 swap_index = swp_offset(entry);
1760 swap_mapping = swap_address_space(entry);
1761
1762 /*
1763 * We have arrived here because our zones are constrained, so don't
1764 * limit chance of success by further cpuset and node constraints.
1765 */
1766 gfp &= ~GFP_CONSTRAINT_MASK;
1767 VM_BUG_ON_FOLIO(folio_test_large(old), old);
1768 new = shmem_alloc_folio(gfp, info, index);
1769 if (!new)
1770 return -ENOMEM;
1771
1772 folio_get(new);
1773 folio_copy(new, old);
1774 flush_dcache_folio(new);
1775
1776 __folio_set_locked(new);
1777 __folio_set_swapbacked(new);
1778 folio_mark_uptodate(new);
1779 new->swap = entry;
1780 folio_set_swapcache(new);
1781
1782 /*
1783 * Our caller will very soon move newpage out of swapcache, but it's
1784 * a nice clean interface for us to replace oldpage by newpage there.
1785 */
1786 xa_lock_irq(&swap_mapping->i_pages);
1787 error = shmem_replace_entry(swap_mapping, swap_index, old, new);
1788 if (!error) {
1789 mem_cgroup_migrate(old, new);
1790 __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
1791 __lruvec_stat_mod_folio(new, NR_SHMEM, 1);
1792 __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
1793 __lruvec_stat_mod_folio(old, NR_SHMEM, -1);
1794 }
1795 xa_unlock_irq(&swap_mapping->i_pages);
1796
1797 if (unlikely(error)) {
1798 /*
1799 * Is this possible? I think not, now that our callers check
1800 * both PageSwapCache and page_private after getting page lock;
1801 * but be defensive. Reverse old to newpage for clear and free.
1802 */
1803 old = new;
1804 } else {
1805 folio_add_lru(new);
1806 *foliop = new;
1807 }
1808
1809 folio_clear_swapcache(old);
1810 old->private = NULL;
1811
1812 folio_unlock(old);
1813 folio_put_refs(old, 2);
1814 return error;
1815}
1816
1817static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1818 struct folio *folio, swp_entry_t swap)
1819{
1820 struct address_space *mapping = inode->i_mapping;
1821 swp_entry_t swapin_error;
1822 void *old;
1823
1824 swapin_error = make_poisoned_swp_entry();
1825 old = xa_cmpxchg_irq(&mapping->i_pages, index,
1826 swp_to_radix_entry(swap),
1827 swp_to_radix_entry(swapin_error), 0);
1828 if (old != swp_to_radix_entry(swap))
1829 return;
1830
1831 folio_wait_writeback(folio);
1832 delete_from_swap_cache(folio);
1833 /*
1834 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
1835 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
1836 * in shmem_evict_inode().
1837 */
1838 shmem_recalc_inode(inode, -1, -1);
1839 swap_free(swap);
1840}
1841
1842/*
1843 * Swap in the folio pointed to by *foliop.
1844 * Caller has to make sure that *foliop contains a valid swapped folio.
1845 * Returns 0 and the folio in foliop if success. On failure, returns the
1846 * error code and NULL in *foliop.
1847 */
1848static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1849 struct folio **foliop, enum sgp_type sgp,
1850 gfp_t gfp, struct mm_struct *fault_mm,
1851 vm_fault_t *fault_type)
1852{
1853 struct address_space *mapping = inode->i_mapping;
1854 struct shmem_inode_info *info = SHMEM_I(inode);
1855 struct swap_info_struct *si;
1856 struct folio *folio = NULL;
1857 swp_entry_t swap;
1858 int error;
1859
1860 VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
1861 swap = radix_to_swp_entry(*foliop);
1862 *foliop = NULL;
1863
1864 if (is_poisoned_swp_entry(swap))
1865 return -EIO;
1866
1867 si = get_swap_device(swap);
1868 if (!si) {
1869 if (!shmem_confirm_swap(mapping, index, swap))
1870 return -EEXIST;
1871 else
1872 return -EINVAL;
1873 }
1874
1875 /* Look it up and read it in.. */
1876 folio = swap_cache_get_folio(swap, NULL, 0);
1877 if (!folio) {
1878 /* Or update major stats only when swapin succeeds?? */
1879 if (fault_type) {
1880 *fault_type |= VM_FAULT_MAJOR;
1881 count_vm_event(PGMAJFAULT);
1882 count_memcg_event_mm(fault_mm, PGMAJFAULT);
1883 }
1884 /* Here we actually start the io */
1885 folio = shmem_swapin_cluster(swap, gfp, info, index);
1886 if (!folio) {
1887 error = -ENOMEM;
1888 goto failed;
1889 }
1890 }
1891
1892 /* We have to do this with folio locked to prevent races */
1893 folio_lock(folio);
1894 if (!folio_test_swapcache(folio) ||
1895 folio->swap.val != swap.val ||
1896 !shmem_confirm_swap(mapping, index, swap)) {
1897 error = -EEXIST;
1898 goto unlock;
1899 }
1900 if (!folio_test_uptodate(folio)) {
1901 error = -EIO;
1902 goto failed;
1903 }
1904 folio_wait_writeback(folio);
1905
1906 /*
1907 * Some architectures may have to restore extra metadata to the
1908 * folio after reading from swap.
1909 */
1910 arch_swap_restore(swap, folio);
1911
1912 if (shmem_should_replace_folio(folio, gfp)) {
1913 error = shmem_replace_folio(&folio, gfp, info, index);
1914 if (error)
1915 goto failed;
1916 }
1917
1918 error = shmem_add_to_page_cache(folio, mapping, index,
1919 swp_to_radix_entry(swap), gfp);
1920 if (error)
1921 goto failed;
1922
1923 shmem_recalc_inode(inode, 0, -1);
1924
1925 if (sgp == SGP_WRITE)
1926 folio_mark_accessed(folio);
1927
1928 delete_from_swap_cache(folio);
1929 folio_mark_dirty(folio);
1930 swap_free(swap);
1931 put_swap_device(si);
1932
1933 *foliop = folio;
1934 return 0;
1935failed:
1936 if (!shmem_confirm_swap(mapping, index, swap))
1937 error = -EEXIST;
1938 if (error == -EIO)
1939 shmem_set_folio_swapin_error(inode, index, folio, swap);
1940unlock:
1941 if (folio) {
1942 folio_unlock(folio);
1943 folio_put(folio);
1944 }
1945 put_swap_device(si);
1946
1947 return error;
1948}
1949
1950/*
1951 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
1952 *
1953 * If we allocate a new one we do not mark it dirty. That's up to the
1954 * vm. If we swap it in we mark it dirty since we also free the swap
1955 * entry since a page cannot live in both the swap and page cache.
1956 *
1957 * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
1958 */
1959static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
1960 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
1961 struct vm_fault *vmf, vm_fault_t *fault_type)
1962{
1963 struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1964 struct mm_struct *fault_mm;
1965 struct folio *folio;
1966 int error;
1967 bool alloced;
1968
1969 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1970 return -EFBIG;
1971repeat:
1972 if (sgp <= SGP_CACHE &&
1973 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
1974 return -EINVAL;
1975
1976 alloced = false;
1977 fault_mm = vma ? vma->vm_mm : NULL;
1978
1979 folio = filemap_get_entry(inode->i_mapping, index);
1980 if (folio && vma && userfaultfd_minor(vma)) {
1981 if (!xa_is_value(folio))
1982 folio_put(folio);
1983 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1984 return 0;
1985 }
1986
1987 if (xa_is_value(folio)) {
1988 error = shmem_swapin_folio(inode, index, &folio,
1989 sgp, gfp, fault_mm, fault_type);
1990 if (error == -EEXIST)
1991 goto repeat;
1992
1993 *foliop = folio;
1994 return error;
1995 }
1996
1997 if (folio) {
1998 folio_lock(folio);
1999
2000 /* Has the folio been truncated or swapped out? */
2001 if (unlikely(folio->mapping != inode->i_mapping)) {
2002 folio_unlock(folio);
2003 folio_put(folio);
2004 goto repeat;
2005 }
2006 if (sgp == SGP_WRITE)
2007 folio_mark_accessed(folio);
2008 if (folio_test_uptodate(folio))
2009 goto out;
2010 /* fallocated folio */
2011 if (sgp != SGP_READ)
2012 goto clear;
2013 folio_unlock(folio);
2014 folio_put(folio);
2015 }
2016
2017 /*
2018 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2019 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2020 */
2021 *foliop = NULL;
2022 if (sgp == SGP_READ)
2023 return 0;
2024 if (sgp == SGP_NOALLOC)
2025 return -ENOENT;
2026
2027 /*
2028 * Fast cache lookup and swap lookup did not find it: allocate.
2029 */
2030
2031 if (vma && userfaultfd_missing(vma)) {
2032 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2033 return 0;
2034 }
2035
2036 if (shmem_is_huge(inode, index, false, fault_mm,
2037 vma ? vma->vm_flags : 0)) {
2038 gfp_t huge_gfp;
2039
2040 huge_gfp = vma_thp_gfp_mask(vma);
2041 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2042 folio = shmem_alloc_and_add_folio(huge_gfp,
2043 inode, index, fault_mm, true);
2044 if (!IS_ERR(folio)) {
2045 count_vm_event(THP_FILE_ALLOC);
2046 goto alloced;
2047 }
2048 if (PTR_ERR(folio) == -EEXIST)
2049 goto repeat;
2050 }
2051
2052 folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, false);
2053 if (IS_ERR(folio)) {
2054 error = PTR_ERR(folio);
2055 if (error == -EEXIST)
2056 goto repeat;
2057 folio = NULL;
2058 goto unlock;
2059 }
2060
2061alloced:
2062 alloced = true;
2063 if (folio_test_pmd_mappable(folio) &&
2064 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2065 folio_next_index(folio) - 1) {
2066 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2067 struct shmem_inode_info *info = SHMEM_I(inode);
2068 /*
2069 * Part of the large folio is beyond i_size: subject
2070 * to shrink under memory pressure.
2071 */
2072 spin_lock(&sbinfo->shrinklist_lock);
2073 /*
2074 * _careful to defend against unlocked access to
2075 * ->shrink_list in shmem_unused_huge_shrink()
2076 */
2077 if (list_empty_careful(&info->shrinklist)) {
2078 list_add_tail(&info->shrinklist,
2079 &sbinfo->shrinklist);
2080 sbinfo->shrinklist_len++;
2081 }
2082 spin_unlock(&sbinfo->shrinklist_lock);
2083 }
2084
2085 if (sgp == SGP_WRITE)
2086 folio_set_referenced(folio);
2087 /*
2088 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2089 */
2090 if (sgp == SGP_FALLOC)
2091 sgp = SGP_WRITE;
2092clear:
2093 /*
2094 * Let SGP_WRITE caller clear ends if write does not fill folio;
2095 * but SGP_FALLOC on a folio fallocated earlier must initialize
2096 * it now, lest undo on failure cancel our earlier guarantee.
2097 */
2098 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2099 long i, n = folio_nr_pages(folio);
2100
2101 for (i = 0; i < n; i++)
2102 clear_highpage(folio_page(folio, i));
2103 flush_dcache_folio(folio);
2104 folio_mark_uptodate(folio);
2105 }
2106
2107 /* Perhaps the file has been truncated since we checked */
2108 if (sgp <= SGP_CACHE &&
2109 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2110 error = -EINVAL;
2111 goto unlock;
2112 }
2113out:
2114 *foliop = folio;
2115 return 0;
2116
2117 /*
2118 * Error recovery.
2119 */
2120unlock:
2121 if (alloced)
2122 filemap_remove_folio(folio);
2123 shmem_recalc_inode(inode, 0, 0);
2124 if (folio) {
2125 folio_unlock(folio);
2126 folio_put(folio);
2127 }
2128 return error;
2129}
2130
2131int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
2132 enum sgp_type sgp)
2133{
2134 return shmem_get_folio_gfp(inode, index, foliop, sgp,
2135 mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2136}
2137
2138/*
2139 * This is like autoremove_wake_function, but it removes the wait queue
2140 * entry unconditionally - even if something else had already woken the
2141 * target.
2142 */
2143static int synchronous_wake_function(wait_queue_entry_t *wait,
2144 unsigned int mode, int sync, void *key)
2145{
2146 int ret = default_wake_function(wait, mode, sync, key);
2147 list_del_init(&wait->entry);
2148 return ret;
2149}
2150
2151/*
2152 * Trinity finds that probing a hole which tmpfs is punching can
2153 * prevent the hole-punch from ever completing: which in turn
2154 * locks writers out with its hold on i_rwsem. So refrain from
2155 * faulting pages into the hole while it's being punched. Although
2156 * shmem_undo_range() does remove the additions, it may be unable to
2157 * keep up, as each new page needs its own unmap_mapping_range() call,
2158 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2159 *
2160 * It does not matter if we sometimes reach this check just before the
2161 * hole-punch begins, so that one fault then races with the punch:
2162 * we just need to make racing faults a rare case.
2163 *
2164 * The implementation below would be much simpler if we just used a
2165 * standard mutex or completion: but we cannot take i_rwsem in fault,
2166 * and bloating every shmem inode for this unlikely case would be sad.
2167 */
2168static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2169{
2170 struct shmem_falloc *shmem_falloc;
2171 struct file *fpin = NULL;
2172 vm_fault_t ret = 0;
2173
2174 spin_lock(&inode->i_lock);
2175 shmem_falloc = inode->i_private;
2176 if (shmem_falloc &&
2177 shmem_falloc->waitq &&
2178 vmf->pgoff >= shmem_falloc->start &&
2179 vmf->pgoff < shmem_falloc->next) {
2180 wait_queue_head_t *shmem_falloc_waitq;
2181 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2182
2183 ret = VM_FAULT_NOPAGE;
2184 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2185 shmem_falloc_waitq = shmem_falloc->waitq;
2186 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2187 TASK_UNINTERRUPTIBLE);
2188 spin_unlock(&inode->i_lock);
2189 schedule();
2190
2191 /*
2192 * shmem_falloc_waitq points into the shmem_fallocate()
2193 * stack of the hole-punching task: shmem_falloc_waitq
2194 * is usually invalid by the time we reach here, but
2195 * finish_wait() does not dereference it in that case;
2196 * though i_lock needed lest racing with wake_up_all().
2197 */
2198 spin_lock(&inode->i_lock);
2199 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2200 }
2201 spin_unlock(&inode->i_lock);
2202 if (fpin) {
2203 fput(fpin);
2204 ret = VM_FAULT_RETRY;
2205 }
2206 return ret;
2207}
2208
2209static vm_fault_t shmem_fault(struct vm_fault *vmf)
2210{
2211 struct inode *inode = file_inode(vmf->vma->vm_file);
2212 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2213 struct folio *folio = NULL;
2214 vm_fault_t ret = 0;
2215 int err;
2216
2217 /*
2218 * Trinity finds that probing a hole which tmpfs is punching can
2219 * prevent the hole-punch from ever completing: noted in i_private.
2220 */
2221 if (unlikely(inode->i_private)) {
2222 ret = shmem_falloc_wait(vmf, inode);
2223 if (ret)
2224 return ret;
2225 }
2226
2227 WARN_ON_ONCE(vmf->page != NULL);
2228 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
2229 gfp, vmf, &ret);
2230 if (err)
2231 return vmf_error(err);
2232 if (folio) {
2233 vmf->page = folio_file_page(folio, vmf->pgoff);
2234 ret |= VM_FAULT_LOCKED;
2235 }
2236 return ret;
2237}
2238
2239unsigned long shmem_get_unmapped_area(struct file *file,
2240 unsigned long uaddr, unsigned long len,
2241 unsigned long pgoff, unsigned long flags)
2242{
2243 unsigned long (*get_area)(struct file *,
2244 unsigned long, unsigned long, unsigned long, unsigned long);
2245 unsigned long addr;
2246 unsigned long offset;
2247 unsigned long inflated_len;
2248 unsigned long inflated_addr;
2249 unsigned long inflated_offset;
2250
2251 if (len > TASK_SIZE)
2252 return -ENOMEM;
2253
2254 get_area = current->mm->get_unmapped_area;
2255 addr = get_area(file, uaddr, len, pgoff, flags);
2256
2257 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2258 return addr;
2259 if (IS_ERR_VALUE(addr))
2260 return addr;
2261 if (addr & ~PAGE_MASK)
2262 return addr;
2263 if (addr > TASK_SIZE - len)
2264 return addr;
2265
2266 if (shmem_huge == SHMEM_HUGE_DENY)
2267 return addr;
2268 if (len < HPAGE_PMD_SIZE)
2269 return addr;
2270 if (flags & MAP_FIXED)
2271 return addr;
2272 /*
2273 * Our priority is to support MAP_SHARED mapped hugely;
2274 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2275 * But if caller specified an address hint and we allocated area there
2276 * successfully, respect that as before.
2277 */
2278 if (uaddr == addr)
2279 return addr;
2280
2281 if (shmem_huge != SHMEM_HUGE_FORCE) {
2282 struct super_block *sb;
2283
2284 if (file) {
2285 VM_BUG_ON(file->f_op != &shmem_file_operations);
2286 sb = file_inode(file)->i_sb;
2287 } else {
2288 /*
2289 * Called directly from mm/mmap.c, or drivers/char/mem.c
2290 * for "/dev/zero", to create a shared anonymous object.
2291 */
2292 if (IS_ERR(shm_mnt))
2293 return addr;
2294 sb = shm_mnt->mnt_sb;
2295 }
2296 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2297 return addr;
2298 }
2299
2300 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2301 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2302 return addr;
2303 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2304 return addr;
2305
2306 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2307 if (inflated_len > TASK_SIZE)
2308 return addr;
2309 if (inflated_len < len)
2310 return addr;
2311
2312 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2313 if (IS_ERR_VALUE(inflated_addr))
2314 return addr;
2315 if (inflated_addr & ~PAGE_MASK)
2316 return addr;
2317
2318 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2319 inflated_addr += offset - inflated_offset;
2320 if (inflated_offset > offset)
2321 inflated_addr += HPAGE_PMD_SIZE;
2322
2323 if (inflated_addr > TASK_SIZE - len)
2324 return addr;
2325 return inflated_addr;
2326}
2327
2328#ifdef CONFIG_NUMA
2329static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2330{
2331 struct inode *inode = file_inode(vma->vm_file);
2332 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2333}
2334
2335static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2336 unsigned long addr, pgoff_t *ilx)
2337{
2338 struct inode *inode = file_inode(vma->vm_file);
2339 pgoff_t index;
2340
2341 /*
2342 * Bias interleave by inode number to distribute better across nodes;
2343 * but this interface is independent of which page order is used, so
2344 * supplies only that bias, letting caller apply the offset (adjusted
2345 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()).
2346 */
2347 *ilx = inode->i_ino;
2348 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2349 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2350}
2351
2352static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2353 pgoff_t index, unsigned int order, pgoff_t *ilx)
2354{
2355 struct mempolicy *mpol;
2356
2357 /* Bias interleave by inode number to distribute better across nodes */
2358 *ilx = info->vfs_inode.i_ino + (index >> order);
2359
2360 mpol = mpol_shared_policy_lookup(&info->policy, index);
2361 return mpol ? mpol : get_task_policy(current);
2362}
2363#else
2364static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2365 pgoff_t index, unsigned int order, pgoff_t *ilx)
2366{
2367 *ilx = 0;
2368 return NULL;
2369}
2370#endif /* CONFIG_NUMA */
2371
2372int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2373{
2374 struct inode *inode = file_inode(file);
2375 struct shmem_inode_info *info = SHMEM_I(inode);
2376 int retval = -ENOMEM;
2377
2378 /*
2379 * What serializes the accesses to info->flags?
2380 * ipc_lock_object() when called from shmctl_do_lock(),
2381 * no serialization needed when called from shm_destroy().
2382 */
2383 if (lock && !(info->flags & VM_LOCKED)) {
2384 if (!user_shm_lock(inode->i_size, ucounts))
2385 goto out_nomem;
2386 info->flags |= VM_LOCKED;
2387 mapping_set_unevictable(file->f_mapping);
2388 }
2389 if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2390 user_shm_unlock(inode->i_size, ucounts);
2391 info->flags &= ~VM_LOCKED;
2392 mapping_clear_unevictable(file->f_mapping);
2393 }
2394 retval = 0;
2395
2396out_nomem:
2397 return retval;
2398}
2399
2400static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2401{
2402 struct inode *inode = file_inode(file);
2403 struct shmem_inode_info *info = SHMEM_I(inode);
2404 int ret;
2405
2406 ret = seal_check_write(info->seals, vma);
2407 if (ret)
2408 return ret;
2409
2410 /* arm64 - allow memory tagging on RAM-based files */
2411 vm_flags_set(vma, VM_MTE_ALLOWED);
2412
2413 file_accessed(file);
2414 /* This is anonymous shared memory if it is unlinked at the time of mmap */
2415 if (inode->i_nlink)
2416 vma->vm_ops = &shmem_vm_ops;
2417 else
2418 vma->vm_ops = &shmem_anon_vm_ops;
2419 return 0;
2420}
2421
2422static int shmem_file_open(struct inode *inode, struct file *file)
2423{
2424 file->f_mode |= FMODE_CAN_ODIRECT;
2425 return generic_file_open(inode, file);
2426}
2427
2428#ifdef CONFIG_TMPFS_XATTR
2429static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2430
2431/*
2432 * chattr's fsflags are unrelated to extended attributes,
2433 * but tmpfs has chosen to enable them under the same config option.
2434 */
2435static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2436{
2437 unsigned int i_flags = 0;
2438
2439 if (fsflags & FS_NOATIME_FL)
2440 i_flags |= S_NOATIME;
2441 if (fsflags & FS_APPEND_FL)
2442 i_flags |= S_APPEND;
2443 if (fsflags & FS_IMMUTABLE_FL)
2444 i_flags |= S_IMMUTABLE;
2445 /*
2446 * But FS_NODUMP_FL does not require any action in i_flags.
2447 */
2448 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2449}
2450#else
2451static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2452{
2453}
2454#define shmem_initxattrs NULL
2455#endif
2456
2457static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
2458{
2459 return &SHMEM_I(inode)->dir_offsets;
2460}
2461
2462static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
2463 struct super_block *sb,
2464 struct inode *dir, umode_t mode,
2465 dev_t dev, unsigned long flags)
2466{
2467 struct inode *inode;
2468 struct shmem_inode_info *info;
2469 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2470 ino_t ino;
2471 int err;
2472
2473 err = shmem_reserve_inode(sb, &ino);
2474 if (err)
2475 return ERR_PTR(err);
2476
2477 inode = new_inode(sb);
2478 if (!inode) {
2479 shmem_free_inode(sb, 0);
2480 return ERR_PTR(-ENOSPC);
2481 }
2482
2483 inode->i_ino = ino;
2484 inode_init_owner(idmap, inode, dir, mode);
2485 inode->i_blocks = 0;
2486 simple_inode_init_ts(inode);
2487 inode->i_generation = get_random_u32();
2488 info = SHMEM_I(inode);
2489 memset(info, 0, (char *)inode - (char *)info);
2490 spin_lock_init(&info->lock);
2491 atomic_set(&info->stop_eviction, 0);
2492 info->seals = F_SEAL_SEAL;
2493 info->flags = flags & VM_NORESERVE;
2494 info->i_crtime = inode_get_mtime(inode);
2495 info->fsflags = (dir == NULL) ? 0 :
2496 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2497 if (info->fsflags)
2498 shmem_set_inode_flags(inode, info->fsflags);
2499 INIT_LIST_HEAD(&info->shrinklist);
2500 INIT_LIST_HEAD(&info->swaplist);
2501 simple_xattrs_init(&info->xattrs);
2502 cache_no_acl(inode);
2503 if (sbinfo->noswap)
2504 mapping_set_unevictable(inode->i_mapping);
2505 mapping_set_large_folios(inode->i_mapping);
2506
2507 switch (mode & S_IFMT) {
2508 default:
2509 inode->i_op = &shmem_special_inode_operations;
2510 init_special_inode(inode, mode, dev);
2511 break;
2512 case S_IFREG:
2513 inode->i_mapping->a_ops = &shmem_aops;
2514 inode->i_op = &shmem_inode_operations;
2515 inode->i_fop = &shmem_file_operations;
2516 mpol_shared_policy_init(&info->policy,
2517 shmem_get_sbmpol(sbinfo));
2518 break;
2519 case S_IFDIR:
2520 inc_nlink(inode);
2521 /* Some things misbehave if size == 0 on a directory */
2522 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2523 inode->i_op = &shmem_dir_inode_operations;
2524 inode->i_fop = &simple_offset_dir_operations;
2525 simple_offset_init(shmem_get_offset_ctx(inode));
2526 break;
2527 case S_IFLNK:
2528 /*
2529 * Must not load anything in the rbtree,
2530 * mpol_free_shared_policy will not be called.
2531 */
2532 mpol_shared_policy_init(&info->policy, NULL);
2533 break;
2534 }
2535
2536 lockdep_annotate_inode_mutex_key(inode);
2537 return inode;
2538}
2539
2540#ifdef CONFIG_TMPFS_QUOTA
2541static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2542 struct super_block *sb, struct inode *dir,
2543 umode_t mode, dev_t dev, unsigned long flags)
2544{
2545 int err;
2546 struct inode *inode;
2547
2548 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2549 if (IS_ERR(inode))
2550 return inode;
2551
2552 err = dquot_initialize(inode);
2553 if (err)
2554 goto errout;
2555
2556 err = dquot_alloc_inode(inode);
2557 if (err) {
2558 dquot_drop(inode);
2559 goto errout;
2560 }
2561 return inode;
2562
2563errout:
2564 inode->i_flags |= S_NOQUOTA;
2565 iput(inode);
2566 return ERR_PTR(err);
2567}
2568#else
2569static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2570 struct super_block *sb, struct inode *dir,
2571 umode_t mode, dev_t dev, unsigned long flags)
2572{
2573 return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2574}
2575#endif /* CONFIG_TMPFS_QUOTA */
2576
2577#ifdef CONFIG_USERFAULTFD
2578int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
2579 struct vm_area_struct *dst_vma,
2580 unsigned long dst_addr,
2581 unsigned long src_addr,
2582 uffd_flags_t flags,
2583 struct folio **foliop)
2584{
2585 struct inode *inode = file_inode(dst_vma->vm_file);
2586 struct shmem_inode_info *info = SHMEM_I(inode);
2587 struct address_space *mapping = inode->i_mapping;
2588 gfp_t gfp = mapping_gfp_mask(mapping);
2589 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2590 void *page_kaddr;
2591 struct folio *folio;
2592 int ret;
2593 pgoff_t max_off;
2594
2595 if (shmem_inode_acct_blocks(inode, 1)) {
2596 /*
2597 * We may have got a page, returned -ENOENT triggering a retry,
2598 * and now we find ourselves with -ENOMEM. Release the page, to
2599 * avoid a BUG_ON in our caller.
2600 */
2601 if (unlikely(*foliop)) {
2602 folio_put(*foliop);
2603 *foliop = NULL;
2604 }
2605 return -ENOMEM;
2606 }
2607
2608 if (!*foliop) {
2609 ret = -ENOMEM;
2610 folio = shmem_alloc_folio(gfp, info, pgoff);
2611 if (!folio)
2612 goto out_unacct_blocks;
2613
2614 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
2615 page_kaddr = kmap_local_folio(folio, 0);
2616 /*
2617 * The read mmap_lock is held here. Despite the
2618 * mmap_lock being read recursive a deadlock is still
2619 * possible if a writer has taken a lock. For example:
2620 *
2621 * process A thread 1 takes read lock on own mmap_lock
2622 * process A thread 2 calls mmap, blocks taking write lock
2623 * process B thread 1 takes page fault, read lock on own mmap lock
2624 * process B thread 2 calls mmap, blocks taking write lock
2625 * process A thread 1 blocks taking read lock on process B
2626 * process B thread 1 blocks taking read lock on process A
2627 *
2628 * Disable page faults to prevent potential deadlock
2629 * and retry the copy outside the mmap_lock.
2630 */
2631 pagefault_disable();
2632 ret = copy_from_user(page_kaddr,
2633 (const void __user *)src_addr,
2634 PAGE_SIZE);
2635 pagefault_enable();
2636 kunmap_local(page_kaddr);
2637
2638 /* fallback to copy_from_user outside mmap_lock */
2639 if (unlikely(ret)) {
2640 *foliop = folio;
2641 ret = -ENOENT;
2642 /* don't free the page */
2643 goto out_unacct_blocks;
2644 }
2645
2646 flush_dcache_folio(folio);
2647 } else { /* ZEROPAGE */
2648 clear_user_highpage(&folio->page, dst_addr);
2649 }
2650 } else {
2651 folio = *foliop;
2652 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2653 *foliop = NULL;
2654 }
2655
2656 VM_BUG_ON(folio_test_locked(folio));
2657 VM_BUG_ON(folio_test_swapbacked(folio));
2658 __folio_set_locked(folio);
2659 __folio_set_swapbacked(folio);
2660 __folio_mark_uptodate(folio);
2661
2662 ret = -EFAULT;
2663 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2664 if (unlikely(pgoff >= max_off))
2665 goto out_release;
2666
2667 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
2668 if (ret)
2669 goto out_release;
2670 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
2671 if (ret)
2672 goto out_release;
2673
2674 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
2675 &folio->page, true, flags);
2676 if (ret)
2677 goto out_delete_from_cache;
2678
2679 shmem_recalc_inode(inode, 1, 0);
2680 folio_unlock(folio);
2681 return 0;
2682out_delete_from_cache:
2683 filemap_remove_folio(folio);
2684out_release:
2685 folio_unlock(folio);
2686 folio_put(folio);
2687out_unacct_blocks:
2688 shmem_inode_unacct_blocks(inode, 1);
2689 return ret;
2690}
2691#endif /* CONFIG_USERFAULTFD */
2692
2693#ifdef CONFIG_TMPFS
2694static const struct inode_operations shmem_symlink_inode_operations;
2695static const struct inode_operations shmem_short_symlink_operations;
2696
2697static int
2698shmem_write_begin(struct file *file, struct address_space *mapping,
2699 loff_t pos, unsigned len,
2700 struct page **pagep, void **fsdata)
2701{
2702 struct inode *inode = mapping->host;
2703 struct shmem_inode_info *info = SHMEM_I(inode);
2704 pgoff_t index = pos >> PAGE_SHIFT;
2705 struct folio *folio;
2706 int ret = 0;
2707
2708 /* i_rwsem is held by caller */
2709 if (unlikely(info->seals & (F_SEAL_GROW |
2710 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2711 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2712 return -EPERM;
2713 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2714 return -EPERM;
2715 }
2716
2717 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
2718 if (ret)
2719 return ret;
2720
2721 *pagep = folio_file_page(folio, index);
2722 if (PageHWPoison(*pagep)) {
2723 folio_unlock(folio);
2724 folio_put(folio);
2725 *pagep = NULL;
2726 return -EIO;
2727 }
2728
2729 return 0;
2730}
2731
2732static int
2733shmem_write_end(struct file *file, struct address_space *mapping,
2734 loff_t pos, unsigned len, unsigned copied,
2735 struct page *page, void *fsdata)
2736{
2737 struct folio *folio = page_folio(page);
2738 struct inode *inode = mapping->host;
2739
2740 if (pos + copied > inode->i_size)
2741 i_size_write(inode, pos + copied);
2742
2743 if (!folio_test_uptodate(folio)) {
2744 if (copied < folio_size(folio)) {
2745 size_t from = offset_in_folio(folio, pos);
2746 folio_zero_segments(folio, 0, from,
2747 from + copied, folio_size(folio));
2748 }
2749 folio_mark_uptodate(folio);
2750 }
2751 folio_mark_dirty(folio);
2752 folio_unlock(folio);
2753 folio_put(folio);
2754
2755 return copied;
2756}
2757
2758static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2759{
2760 struct file *file = iocb->ki_filp;
2761 struct inode *inode = file_inode(file);
2762 struct address_space *mapping = inode->i_mapping;
2763 pgoff_t index;
2764 unsigned long offset;
2765 int error = 0;
2766 ssize_t retval = 0;
2767 loff_t *ppos = &iocb->ki_pos;
2768
2769 index = *ppos >> PAGE_SHIFT;
2770 offset = *ppos & ~PAGE_MASK;
2771
2772 for (;;) {
2773 struct folio *folio = NULL;
2774 struct page *page = NULL;
2775 pgoff_t end_index;
2776 unsigned long nr, ret;
2777 loff_t i_size = i_size_read(inode);
2778
2779 end_index = i_size >> PAGE_SHIFT;
2780 if (index > end_index)
2781 break;
2782 if (index == end_index) {
2783 nr = i_size & ~PAGE_MASK;
2784 if (nr <= offset)
2785 break;
2786 }
2787
2788 error = shmem_get_folio(inode, index, &folio, SGP_READ);
2789 if (error) {
2790 if (error == -EINVAL)
2791 error = 0;
2792 break;
2793 }
2794 if (folio) {
2795 folio_unlock(folio);
2796
2797 page = folio_file_page(folio, index);
2798 if (PageHWPoison(page)) {
2799 folio_put(folio);
2800 error = -EIO;
2801 break;
2802 }
2803 }
2804
2805 /*
2806 * We must evaluate after, since reads (unlike writes)
2807 * are called without i_rwsem protection against truncate
2808 */
2809 nr = PAGE_SIZE;
2810 i_size = i_size_read(inode);
2811 end_index = i_size >> PAGE_SHIFT;
2812 if (index == end_index) {
2813 nr = i_size & ~PAGE_MASK;
2814 if (nr <= offset) {
2815 if (folio)
2816 folio_put(folio);
2817 break;
2818 }
2819 }
2820 nr -= offset;
2821
2822 if (folio) {
2823 /*
2824 * If users can be writing to this page using arbitrary
2825 * virtual addresses, take care about potential aliasing
2826 * before reading the page on the kernel side.
2827 */
2828 if (mapping_writably_mapped(mapping))
2829 flush_dcache_page(page);
2830 /*
2831 * Mark the page accessed if we read the beginning.
2832 */
2833 if (!offset)
2834 folio_mark_accessed(folio);
2835 /*
2836 * Ok, we have the page, and it's up-to-date, so
2837 * now we can copy it to user space...
2838 */
2839 ret = copy_page_to_iter(page, offset, nr, to);
2840 folio_put(folio);
2841
2842 } else if (user_backed_iter(to)) {
2843 /*
2844 * Copy to user tends to be so well optimized, but
2845 * clear_user() not so much, that it is noticeably
2846 * faster to copy the zero page instead of clearing.
2847 */
2848 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
2849 } else {
2850 /*
2851 * But submitting the same page twice in a row to
2852 * splice() - or others? - can result in confusion:
2853 * so don't attempt that optimization on pipes etc.
2854 */
2855 ret = iov_iter_zero(nr, to);
2856 }
2857
2858 retval += ret;
2859 offset += ret;
2860 index += offset >> PAGE_SHIFT;
2861 offset &= ~PAGE_MASK;
2862
2863 if (!iov_iter_count(to))
2864 break;
2865 if (ret < nr) {
2866 error = -EFAULT;
2867 break;
2868 }
2869 cond_resched();
2870 }
2871
2872 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2873 file_accessed(file);
2874 return retval ? retval : error;
2875}
2876
2877static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2878{
2879 struct file *file = iocb->ki_filp;
2880 struct inode *inode = file->f_mapping->host;
2881 ssize_t ret;
2882
2883 inode_lock(inode);
2884 ret = generic_write_checks(iocb, from);
2885 if (ret <= 0)
2886 goto unlock;
2887 ret = file_remove_privs(file);
2888 if (ret)
2889 goto unlock;
2890 ret = file_update_time(file);
2891 if (ret)
2892 goto unlock;
2893 ret = generic_perform_write(iocb, from);
2894unlock:
2895 inode_unlock(inode);
2896 return ret;
2897}
2898
2899static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
2900 struct pipe_buffer *buf)
2901{
2902 return true;
2903}
2904
2905static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
2906 struct pipe_buffer *buf)
2907{
2908}
2909
2910static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
2911 struct pipe_buffer *buf)
2912{
2913 return false;
2914}
2915
2916static const struct pipe_buf_operations zero_pipe_buf_ops = {
2917 .release = zero_pipe_buf_release,
2918 .try_steal = zero_pipe_buf_try_steal,
2919 .get = zero_pipe_buf_get,
2920};
2921
2922static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
2923 loff_t fpos, size_t size)
2924{
2925 size_t offset = fpos & ~PAGE_MASK;
2926
2927 size = min_t(size_t, size, PAGE_SIZE - offset);
2928
2929 if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2930 struct pipe_buffer *buf = pipe_head_buf(pipe);
2931
2932 *buf = (struct pipe_buffer) {
2933 .ops = &zero_pipe_buf_ops,
2934 .page = ZERO_PAGE(0),
2935 .offset = offset,
2936 .len = size,
2937 };
2938 pipe->head++;
2939 }
2940
2941 return size;
2942}
2943
2944static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
2945 struct pipe_inode_info *pipe,
2946 size_t len, unsigned int flags)
2947{
2948 struct inode *inode = file_inode(in);
2949 struct address_space *mapping = inode->i_mapping;
2950 struct folio *folio = NULL;
2951 size_t total_spliced = 0, used, npages, n, part;
2952 loff_t isize;
2953 int error = 0;
2954
2955 /* Work out how much data we can actually add into the pipe */
2956 used = pipe_occupancy(pipe->head, pipe->tail);
2957 npages = max_t(ssize_t, pipe->max_usage - used, 0);
2958 len = min_t(size_t, len, npages * PAGE_SIZE);
2959
2960 do {
2961 if (*ppos >= i_size_read(inode))
2962 break;
2963
2964 error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
2965 SGP_READ);
2966 if (error) {
2967 if (error == -EINVAL)
2968 error = 0;
2969 break;
2970 }
2971 if (folio) {
2972 folio_unlock(folio);
2973
2974 if (folio_test_hwpoison(folio) ||
2975 (folio_test_large(folio) &&
2976 folio_test_has_hwpoisoned(folio))) {
2977 error = -EIO;
2978 break;
2979 }
2980 }
2981
2982 /*
2983 * i_size must be checked after we know the pages are Uptodate.
2984 *
2985 * Checking i_size after the check allows us to calculate
2986 * the correct value for "nr", which means the zero-filled
2987 * part of the page is not copied back to userspace (unless
2988 * another truncate extends the file - this is desired though).
2989 */
2990 isize = i_size_read(inode);
2991 if (unlikely(*ppos >= isize))
2992 break;
2993 part = min_t(loff_t, isize - *ppos, len);
2994
2995 if (folio) {
2996 /*
2997 * If users can be writing to this page using arbitrary
2998 * virtual addresses, take care about potential aliasing
2999 * before reading the page on the kernel side.
3000 */
3001 if (mapping_writably_mapped(mapping))
3002 flush_dcache_folio(folio);
3003 folio_mark_accessed(folio);
3004 /*
3005 * Ok, we have the page, and it's up-to-date, so we can
3006 * now splice it into the pipe.
3007 */
3008 n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3009 folio_put(folio);
3010 folio = NULL;
3011 } else {
3012 n = splice_zeropage_into_pipe(pipe, *ppos, part);
3013 }
3014
3015 if (!n)
3016 break;
3017 len -= n;
3018 total_spliced += n;
3019 *ppos += n;
3020 in->f_ra.prev_pos = *ppos;
3021 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
3022 break;
3023
3024 cond_resched();
3025 } while (len);
3026
3027 if (folio)
3028 folio_put(folio);
3029
3030 file_accessed(in);
3031 return total_spliced ? total_spliced : error;
3032}
3033
3034static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3035{
3036 struct address_space *mapping = file->f_mapping;
3037 struct inode *inode = mapping->host;
3038
3039 if (whence != SEEK_DATA && whence != SEEK_HOLE)
3040 return generic_file_llseek_size(file, offset, whence,
3041 MAX_LFS_FILESIZE, i_size_read(inode));
3042 if (offset < 0)
3043 return -ENXIO;
3044
3045 inode_lock(inode);
3046 /* We're holding i_rwsem so we can access i_size directly */
3047 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3048 if (offset >= 0)
3049 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
3050 inode_unlock(inode);
3051 return offset;
3052}
3053
3054static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3055 loff_t len)
3056{
3057 struct inode *inode = file_inode(file);
3058 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3059 struct shmem_inode_info *info = SHMEM_I(inode);
3060 struct shmem_falloc shmem_falloc;
3061 pgoff_t start, index, end, undo_fallocend;
3062 int error;
3063
3064 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3065 return -EOPNOTSUPP;
3066
3067 inode_lock(inode);
3068
3069 if (mode & FALLOC_FL_PUNCH_HOLE) {
3070 struct address_space *mapping = file->f_mapping;
3071 loff_t unmap_start = round_up(offset, PAGE_SIZE);
3072 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3073 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3074
3075 /* protected by i_rwsem */
3076 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3077 error = -EPERM;
3078 goto out;
3079 }
3080
3081 shmem_falloc.waitq = &shmem_falloc_waitq;
3082 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3083 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3084 spin_lock(&inode->i_lock);
3085 inode->i_private = &shmem_falloc;
3086 spin_unlock(&inode->i_lock);
3087
3088 if ((u64)unmap_end > (u64)unmap_start)
3089 unmap_mapping_range(mapping, unmap_start,
3090 1 + unmap_end - unmap_start, 0);
3091 shmem_truncate_range(inode, offset, offset + len - 1);
3092 /* No need to unmap again: hole-punching leaves COWed pages */
3093
3094 spin_lock(&inode->i_lock);
3095 inode->i_private = NULL;
3096 wake_up_all(&shmem_falloc_waitq);
3097 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3098 spin_unlock(&inode->i_lock);
3099 error = 0;
3100 goto out;
3101 }
3102
3103 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3104 error = inode_newsize_ok(inode, offset + len);
3105 if (error)
3106 goto out;
3107
3108 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3109 error = -EPERM;
3110 goto out;
3111 }
3112
3113 start = offset >> PAGE_SHIFT;
3114 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3115 /* Try to avoid a swapstorm if len is impossible to satisfy */
3116 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3117 error = -ENOSPC;
3118 goto out;
3119 }
3120
3121 shmem_falloc.waitq = NULL;
3122 shmem_falloc.start = start;
3123 shmem_falloc.next = start;
3124 shmem_falloc.nr_falloced = 0;
3125 shmem_falloc.nr_unswapped = 0;
3126 spin_lock(&inode->i_lock);
3127 inode->i_private = &shmem_falloc;
3128 spin_unlock(&inode->i_lock);
3129
3130 /*
3131 * info->fallocend is only relevant when huge pages might be
3132 * involved: to prevent split_huge_page() freeing fallocated
3133 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3134 */
3135 undo_fallocend = info->fallocend;
3136 if (info->fallocend < end)
3137 info->fallocend = end;
3138
3139 for (index = start; index < end; ) {
3140 struct folio *folio;
3141
3142 /*
3143 * Good, the fallocate(2) manpage permits EINTR: we may have
3144 * been interrupted because we are using up too much memory.
3145 */
3146 if (signal_pending(current))
3147 error = -EINTR;
3148 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3149 error = -ENOMEM;
3150 else
3151 error = shmem_get_folio(inode, index, &folio,
3152 SGP_FALLOC);
3153 if (error) {
3154 info->fallocend = undo_fallocend;
3155 /* Remove the !uptodate folios we added */
3156 if (index > start) {
3157 shmem_undo_range(inode,
3158 (loff_t)start << PAGE_SHIFT,
3159 ((loff_t)index << PAGE_SHIFT) - 1, true);
3160 }
3161 goto undone;
3162 }
3163
3164 /*
3165 * Here is a more important optimization than it appears:
3166 * a second SGP_FALLOC on the same large folio will clear it,
3167 * making it uptodate and un-undoable if we fail later.
3168 */
3169 index = folio_next_index(folio);
3170 /* Beware 32-bit wraparound */
3171 if (!index)
3172 index--;
3173
3174 /*
3175 * Inform shmem_writepage() how far we have reached.
3176 * No need for lock or barrier: we have the page lock.
3177 */
3178 if (!folio_test_uptodate(folio))
3179 shmem_falloc.nr_falloced += index - shmem_falloc.next;
3180 shmem_falloc.next = index;
3181
3182 /*
3183 * If !uptodate, leave it that way so that freeable folios
3184 * can be recognized if we need to rollback on error later.
3185 * But mark it dirty so that memory pressure will swap rather
3186 * than free the folios we are allocating (and SGP_CACHE folios
3187 * might still be clean: we now need to mark those dirty too).
3188 */
3189 folio_mark_dirty(folio);
3190 folio_unlock(folio);
3191 folio_put(folio);
3192 cond_resched();
3193 }
3194
3195 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3196 i_size_write(inode, offset + len);
3197undone:
3198 spin_lock(&inode->i_lock);
3199 inode->i_private = NULL;
3200 spin_unlock(&inode->i_lock);
3201out:
3202 if (!error)
3203 file_modified(file);
3204 inode_unlock(inode);
3205 return error;
3206}
3207
3208static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3209{
3210 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3211
3212 buf->f_type = TMPFS_MAGIC;
3213 buf->f_bsize = PAGE_SIZE;
3214 buf->f_namelen = NAME_MAX;
3215 if (sbinfo->max_blocks) {
3216 buf->f_blocks = sbinfo->max_blocks;
3217 buf->f_bavail =
3218 buf->f_bfree = sbinfo->max_blocks -
3219 percpu_counter_sum(&sbinfo->used_blocks);
3220 }
3221 if (sbinfo->max_inodes) {
3222 buf->f_files = sbinfo->max_inodes;
3223 buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3224 }
3225 /* else leave those fields 0 like simple_statfs */
3226
3227 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3228
3229 return 0;
3230}
3231
3232/*
3233 * File creation. Allocate an inode, and we're done..
3234 */
3235static int
3236shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3237 struct dentry *dentry, umode_t mode, dev_t dev)
3238{
3239 struct inode *inode;
3240 int error;
3241
3242 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3243 if (IS_ERR(inode))
3244 return PTR_ERR(inode);
3245
3246 error = simple_acl_create(dir, inode);
3247 if (error)
3248 goto out_iput;
3249 error = security_inode_init_security(inode, dir, &dentry->d_name,
3250 shmem_initxattrs, NULL);
3251 if (error && error != -EOPNOTSUPP)
3252 goto out_iput;
3253
3254 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3255 if (error)
3256 goto out_iput;
3257
3258 dir->i_size += BOGO_DIRENT_SIZE;
3259 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3260 inode_inc_iversion(dir);
3261 d_instantiate(dentry, inode);
3262 dget(dentry); /* Extra count - pin the dentry in core */
3263 return error;
3264
3265out_iput:
3266 iput(inode);
3267 return error;
3268}
3269
3270static int
3271shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3272 struct file *file, umode_t mode)
3273{
3274 struct inode *inode;
3275 int error;
3276
3277 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3278 if (IS_ERR(inode)) {
3279 error = PTR_ERR(inode);
3280 goto err_out;
3281 }
3282 error = security_inode_init_security(inode, dir, NULL,
3283 shmem_initxattrs, NULL);
3284 if (error && error != -EOPNOTSUPP)
3285 goto out_iput;
3286 error = simple_acl_create(dir, inode);
3287 if (error)
3288 goto out_iput;
3289 d_tmpfile(file, inode);
3290
3291err_out:
3292 return finish_open_simple(file, error);
3293out_iput:
3294 iput(inode);
3295 return error;
3296}
3297
3298static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3299 struct dentry *dentry, umode_t mode)
3300{
3301 int error;
3302
3303 error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3304 if (error)
3305 return error;
3306 inc_nlink(dir);
3307 return 0;
3308}
3309
3310static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3311 struct dentry *dentry, umode_t mode, bool excl)
3312{
3313 return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3314}
3315
3316/*
3317 * Link a file..
3318 */
3319static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3320 struct dentry *dentry)
3321{
3322 struct inode *inode = d_inode(old_dentry);
3323 int ret = 0;
3324
3325 /*
3326 * No ordinary (disk based) filesystem counts links as inodes;
3327 * but each new link needs a new dentry, pinning lowmem, and
3328 * tmpfs dentries cannot be pruned until they are unlinked.
3329 * But if an O_TMPFILE file is linked into the tmpfs, the
3330 * first link must skip that, to get the accounting right.
3331 */
3332 if (inode->i_nlink) {
3333 ret = shmem_reserve_inode(inode->i_sb, NULL);
3334 if (ret)
3335 goto out;
3336 }
3337
3338 ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3339 if (ret) {
3340 if (inode->i_nlink)
3341 shmem_free_inode(inode->i_sb, 0);
3342 goto out;
3343 }
3344
3345 dir->i_size += BOGO_DIRENT_SIZE;
3346 inode_set_mtime_to_ts(dir,
3347 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3348 inode_inc_iversion(dir);
3349 inc_nlink(inode);
3350 ihold(inode); /* New dentry reference */
3351 dget(dentry); /* Extra pinning count for the created dentry */
3352 d_instantiate(dentry, inode);
3353out:
3354 return ret;
3355}
3356
3357static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3358{
3359 struct inode *inode = d_inode(dentry);
3360
3361 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3362 shmem_free_inode(inode->i_sb, 0);
3363
3364 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3365
3366 dir->i_size -= BOGO_DIRENT_SIZE;
3367 inode_set_mtime_to_ts(dir,
3368 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3369 inode_inc_iversion(dir);
3370 drop_nlink(inode);
3371 dput(dentry); /* Undo the count from "create" - does all the work */
3372 return 0;
3373}
3374
3375static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3376{
3377 if (!simple_empty(dentry))
3378 return -ENOTEMPTY;
3379
3380 drop_nlink(d_inode(dentry));
3381 drop_nlink(dir);
3382 return shmem_unlink(dir, dentry);
3383}
3384
3385static int shmem_whiteout(struct mnt_idmap *idmap,
3386 struct inode *old_dir, struct dentry *old_dentry)
3387{
3388 struct dentry *whiteout;
3389 int error;
3390
3391 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3392 if (!whiteout)
3393 return -ENOMEM;
3394
3395 error = shmem_mknod(idmap, old_dir, whiteout,
3396 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3397 dput(whiteout);
3398 if (error)
3399 return error;
3400
3401 /*
3402 * Cheat and hash the whiteout while the old dentry is still in
3403 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3404 *
3405 * d_lookup() will consistently find one of them at this point,
3406 * not sure which one, but that isn't even important.
3407 */
3408 d_rehash(whiteout);
3409 return 0;
3410}
3411
3412/*
3413 * The VFS layer already does all the dentry stuff for rename,
3414 * we just have to decrement the usage count for the target if
3415 * it exists so that the VFS layer correctly free's it when it
3416 * gets overwritten.
3417 */
3418static int shmem_rename2(struct mnt_idmap *idmap,
3419 struct inode *old_dir, struct dentry *old_dentry,
3420 struct inode *new_dir, struct dentry *new_dentry,
3421 unsigned int flags)
3422{
3423 struct inode *inode = d_inode(old_dentry);
3424 int they_are_dirs = S_ISDIR(inode->i_mode);
3425 int error;
3426
3427 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3428 return -EINVAL;
3429
3430 if (flags & RENAME_EXCHANGE)
3431 return simple_offset_rename_exchange(old_dir, old_dentry,
3432 new_dir, new_dentry);
3433
3434 if (!simple_empty(new_dentry))
3435 return -ENOTEMPTY;
3436
3437 if (flags & RENAME_WHITEOUT) {
3438 error = shmem_whiteout(idmap, old_dir, old_dentry);
3439 if (error)
3440 return error;
3441 }
3442
3443 simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry);
3444 error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry);
3445 if (error)
3446 return error;
3447
3448 if (d_really_is_positive(new_dentry)) {
3449 (void) shmem_unlink(new_dir, new_dentry);
3450 if (they_are_dirs) {
3451 drop_nlink(d_inode(new_dentry));
3452 drop_nlink(old_dir);
3453 }
3454 } else if (they_are_dirs) {
3455 drop_nlink(old_dir);
3456 inc_nlink(new_dir);
3457 }
3458
3459 old_dir->i_size -= BOGO_DIRENT_SIZE;
3460 new_dir->i_size += BOGO_DIRENT_SIZE;
3461 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
3462 inode_inc_iversion(old_dir);
3463 inode_inc_iversion(new_dir);
3464 return 0;
3465}
3466
3467static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
3468 struct dentry *dentry, const char *symname)
3469{
3470 int error;
3471 int len;
3472 struct inode *inode;
3473 struct folio *folio;
3474
3475 len = strlen(symname) + 1;
3476 if (len > PAGE_SIZE)
3477 return -ENAMETOOLONG;
3478
3479 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
3480 VM_NORESERVE);
3481 if (IS_ERR(inode))
3482 return PTR_ERR(inode);
3483
3484 error = security_inode_init_security(inode, dir, &dentry->d_name,
3485 shmem_initxattrs, NULL);
3486 if (error && error != -EOPNOTSUPP)
3487 goto out_iput;
3488
3489 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3490 if (error)
3491 goto out_iput;
3492
3493 inode->i_size = len-1;
3494 if (len <= SHORT_SYMLINK_LEN) {
3495 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3496 if (!inode->i_link) {
3497 error = -ENOMEM;
3498 goto out_remove_offset;
3499 }
3500 inode->i_op = &shmem_short_symlink_operations;
3501 } else {
3502 inode_nohighmem(inode);
3503 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
3504 if (error)
3505 goto out_remove_offset;
3506 inode->i_mapping->a_ops = &shmem_aops;
3507 inode->i_op = &shmem_symlink_inode_operations;
3508 memcpy(folio_address(folio), symname, len);
3509 folio_mark_uptodate(folio);
3510 folio_mark_dirty(folio);
3511 folio_unlock(folio);
3512 folio_put(folio);
3513 }
3514 dir->i_size += BOGO_DIRENT_SIZE;
3515 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3516 inode_inc_iversion(dir);
3517 d_instantiate(dentry, inode);
3518 dget(dentry);
3519 return 0;
3520
3521out_remove_offset:
3522 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3523out_iput:
3524 iput(inode);
3525 return error;
3526}
3527
3528static void shmem_put_link(void *arg)
3529{
3530 folio_mark_accessed(arg);
3531 folio_put(arg);
3532}
3533
3534static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
3535 struct delayed_call *done)
3536{
3537 struct folio *folio = NULL;
3538 int error;
3539
3540 if (!dentry) {
3541 folio = filemap_get_folio(inode->i_mapping, 0);
3542 if (IS_ERR(folio))
3543 return ERR_PTR(-ECHILD);
3544 if (PageHWPoison(folio_page(folio, 0)) ||
3545 !folio_test_uptodate(folio)) {
3546 folio_put(folio);
3547 return ERR_PTR(-ECHILD);
3548 }
3549 } else {
3550 error = shmem_get_folio(inode, 0, &folio, SGP_READ);
3551 if (error)
3552 return ERR_PTR(error);
3553 if (!folio)
3554 return ERR_PTR(-ECHILD);
3555 if (PageHWPoison(folio_page(folio, 0))) {
3556 folio_unlock(folio);
3557 folio_put(folio);
3558 return ERR_PTR(-ECHILD);
3559 }
3560 folio_unlock(folio);
3561 }
3562 set_delayed_call(done, shmem_put_link, folio);
3563 return folio_address(folio);
3564}
3565
3566#ifdef CONFIG_TMPFS_XATTR
3567
3568static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3569{
3570 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3571
3572 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3573
3574 return 0;
3575}
3576
3577static int shmem_fileattr_set(struct mnt_idmap *idmap,
3578 struct dentry *dentry, struct fileattr *fa)
3579{
3580 struct inode *inode = d_inode(dentry);
3581 struct shmem_inode_info *info = SHMEM_I(inode);
3582
3583 if (fileattr_has_fsx(fa))
3584 return -EOPNOTSUPP;
3585 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3586 return -EOPNOTSUPP;
3587
3588 info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3589 (fa->flags & SHMEM_FL_USER_MODIFIABLE);
3590
3591 shmem_set_inode_flags(inode, info->fsflags);
3592 inode_set_ctime_current(inode);
3593 inode_inc_iversion(inode);
3594 return 0;
3595}
3596
3597/*
3598 * Superblocks without xattr inode operations may get some security.* xattr
3599 * support from the LSM "for free". As soon as we have any other xattrs
3600 * like ACLs, we also need to implement the security.* handlers at
3601 * filesystem level, though.
3602 */
3603
3604/*
3605 * Callback for security_inode_init_security() for acquiring xattrs.
3606 */
3607static int shmem_initxattrs(struct inode *inode,
3608 const struct xattr *xattr_array, void *fs_info)
3609{
3610 struct shmem_inode_info *info = SHMEM_I(inode);
3611 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3612 const struct xattr *xattr;
3613 struct simple_xattr *new_xattr;
3614 size_t ispace = 0;
3615 size_t len;
3616
3617 if (sbinfo->max_inodes) {
3618 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3619 ispace += simple_xattr_space(xattr->name,
3620 xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
3621 }
3622 if (ispace) {
3623 raw_spin_lock(&sbinfo->stat_lock);
3624 if (sbinfo->free_ispace < ispace)
3625 ispace = 0;
3626 else
3627 sbinfo->free_ispace -= ispace;
3628 raw_spin_unlock(&sbinfo->stat_lock);
3629 if (!ispace)
3630 return -ENOSPC;
3631 }
3632 }
3633
3634 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3635 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3636 if (!new_xattr)
3637 break;
3638
3639 len = strlen(xattr->name) + 1;
3640 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3641 GFP_KERNEL_ACCOUNT);
3642 if (!new_xattr->name) {
3643 kvfree(new_xattr);
3644 break;
3645 }
3646
3647 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3648 XATTR_SECURITY_PREFIX_LEN);
3649 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3650 xattr->name, len);
3651
3652 simple_xattr_add(&info->xattrs, new_xattr);
3653 }
3654
3655 if (xattr->name != NULL) {
3656 if (ispace) {
3657 raw_spin_lock(&sbinfo->stat_lock);
3658 sbinfo->free_ispace += ispace;
3659 raw_spin_unlock(&sbinfo->stat_lock);
3660 }
3661 simple_xattrs_free(&info->xattrs, NULL);
3662 return -ENOMEM;
3663 }
3664
3665 return 0;
3666}
3667
3668static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3669 struct dentry *unused, struct inode *inode,
3670 const char *name, void *buffer, size_t size)
3671{
3672 struct shmem_inode_info *info = SHMEM_I(inode);
3673
3674 name = xattr_full_name(handler, name);
3675 return simple_xattr_get(&info->xattrs, name, buffer, size);
3676}
3677
3678static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3679 struct mnt_idmap *idmap,
3680 struct dentry *unused, struct inode *inode,
3681 const char *name, const void *value,
3682 size_t size, int flags)
3683{
3684 struct shmem_inode_info *info = SHMEM_I(inode);
3685 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3686 struct simple_xattr *old_xattr;
3687 size_t ispace = 0;
3688
3689 name = xattr_full_name(handler, name);
3690 if (value && sbinfo->max_inodes) {
3691 ispace = simple_xattr_space(name, size);
3692 raw_spin_lock(&sbinfo->stat_lock);
3693 if (sbinfo->free_ispace < ispace)
3694 ispace = 0;
3695 else
3696 sbinfo->free_ispace -= ispace;
3697 raw_spin_unlock(&sbinfo->stat_lock);
3698 if (!ispace)
3699 return -ENOSPC;
3700 }
3701
3702 old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
3703 if (!IS_ERR(old_xattr)) {
3704 ispace = 0;
3705 if (old_xattr && sbinfo->max_inodes)
3706 ispace = simple_xattr_space(old_xattr->name,
3707 old_xattr->size);
3708 simple_xattr_free(old_xattr);
3709 old_xattr = NULL;
3710 inode_set_ctime_current(inode);
3711 inode_inc_iversion(inode);
3712 }
3713 if (ispace) {
3714 raw_spin_lock(&sbinfo->stat_lock);
3715 sbinfo->free_ispace += ispace;
3716 raw_spin_unlock(&sbinfo->stat_lock);
3717 }
3718 return PTR_ERR(old_xattr);
3719}
3720
3721static const struct xattr_handler shmem_security_xattr_handler = {
3722 .prefix = XATTR_SECURITY_PREFIX,
3723 .get = shmem_xattr_handler_get,
3724 .set = shmem_xattr_handler_set,
3725};
3726
3727static const struct xattr_handler shmem_trusted_xattr_handler = {
3728 .prefix = XATTR_TRUSTED_PREFIX,
3729 .get = shmem_xattr_handler_get,
3730 .set = shmem_xattr_handler_set,
3731};
3732
3733static const struct xattr_handler shmem_user_xattr_handler = {
3734 .prefix = XATTR_USER_PREFIX,
3735 .get = shmem_xattr_handler_get,
3736 .set = shmem_xattr_handler_set,
3737};
3738
3739static const struct xattr_handler * const shmem_xattr_handlers[] = {
3740 &shmem_security_xattr_handler,
3741 &shmem_trusted_xattr_handler,
3742 &shmem_user_xattr_handler,
3743 NULL
3744};
3745
3746static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3747{
3748 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3749 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3750}
3751#endif /* CONFIG_TMPFS_XATTR */
3752
3753static const struct inode_operations shmem_short_symlink_operations = {
3754 .getattr = shmem_getattr,
3755 .setattr = shmem_setattr,
3756 .get_link = simple_get_link,
3757#ifdef CONFIG_TMPFS_XATTR
3758 .listxattr = shmem_listxattr,
3759#endif
3760};
3761
3762static const struct inode_operations shmem_symlink_inode_operations = {
3763 .getattr = shmem_getattr,
3764 .setattr = shmem_setattr,
3765 .get_link = shmem_get_link,
3766#ifdef CONFIG_TMPFS_XATTR
3767 .listxattr = shmem_listxattr,
3768#endif
3769};
3770
3771static struct dentry *shmem_get_parent(struct dentry *child)
3772{
3773 return ERR_PTR(-ESTALE);
3774}
3775
3776static int shmem_match(struct inode *ino, void *vfh)
3777{
3778 __u32 *fh = vfh;
3779 __u64 inum = fh[2];
3780 inum = (inum << 32) | fh[1];
3781 return ino->i_ino == inum && fh[0] == ino->i_generation;
3782}
3783
3784/* Find any alias of inode, but prefer a hashed alias */
3785static struct dentry *shmem_find_alias(struct inode *inode)
3786{
3787 struct dentry *alias = d_find_alias(inode);
3788
3789 return alias ?: d_find_any_alias(inode);
3790}
3791
3792static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3793 struct fid *fid, int fh_len, int fh_type)
3794{
3795 struct inode *inode;
3796 struct dentry *dentry = NULL;
3797 u64 inum;
3798
3799 if (fh_len < 3)
3800 return NULL;
3801
3802 inum = fid->raw[2];
3803 inum = (inum << 32) | fid->raw[1];
3804
3805 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3806 shmem_match, fid->raw);
3807 if (inode) {
3808 dentry = shmem_find_alias(inode);
3809 iput(inode);
3810 }
3811
3812 return dentry;
3813}
3814
3815static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3816 struct inode *parent)
3817{
3818 if (*len < 3) {
3819 *len = 3;
3820 return FILEID_INVALID;
3821 }
3822
3823 if (inode_unhashed(inode)) {
3824 /* Unfortunately insert_inode_hash is not idempotent,
3825 * so as we hash inodes here rather than at creation
3826 * time, we need a lock to ensure we only try
3827 * to do it once
3828 */
3829 static DEFINE_SPINLOCK(lock);
3830 spin_lock(&lock);
3831 if (inode_unhashed(inode))
3832 __insert_inode_hash(inode,
3833 inode->i_ino + inode->i_generation);
3834 spin_unlock(&lock);
3835 }
3836
3837 fh[0] = inode->i_generation;
3838 fh[1] = inode->i_ino;
3839 fh[2] = ((__u64)inode->i_ino) >> 32;
3840
3841 *len = 3;
3842 return 1;
3843}
3844
3845static const struct export_operations shmem_export_ops = {
3846 .get_parent = shmem_get_parent,
3847 .encode_fh = shmem_encode_fh,
3848 .fh_to_dentry = shmem_fh_to_dentry,
3849};
3850
3851enum shmem_param {
3852 Opt_gid,
3853 Opt_huge,
3854 Opt_mode,
3855 Opt_mpol,
3856 Opt_nr_blocks,
3857 Opt_nr_inodes,
3858 Opt_size,
3859 Opt_uid,
3860 Opt_inode32,
3861 Opt_inode64,
3862 Opt_noswap,
3863 Opt_quota,
3864 Opt_usrquota,
3865 Opt_grpquota,
3866 Opt_usrquota_block_hardlimit,
3867 Opt_usrquota_inode_hardlimit,
3868 Opt_grpquota_block_hardlimit,
3869 Opt_grpquota_inode_hardlimit,
3870};
3871
3872static const struct constant_table shmem_param_enums_huge[] = {
3873 {"never", SHMEM_HUGE_NEVER },
3874 {"always", SHMEM_HUGE_ALWAYS },
3875 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3876 {"advise", SHMEM_HUGE_ADVISE },
3877 {}
3878};
3879
3880const struct fs_parameter_spec shmem_fs_parameters[] = {
3881 fsparam_u32 ("gid", Opt_gid),
3882 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
3883 fsparam_u32oct("mode", Opt_mode),
3884 fsparam_string("mpol", Opt_mpol),
3885 fsparam_string("nr_blocks", Opt_nr_blocks),
3886 fsparam_string("nr_inodes", Opt_nr_inodes),
3887 fsparam_string("size", Opt_size),
3888 fsparam_u32 ("uid", Opt_uid),
3889 fsparam_flag ("inode32", Opt_inode32),
3890 fsparam_flag ("inode64", Opt_inode64),
3891 fsparam_flag ("noswap", Opt_noswap),
3892#ifdef CONFIG_TMPFS_QUOTA
3893 fsparam_flag ("quota", Opt_quota),
3894 fsparam_flag ("usrquota", Opt_usrquota),
3895 fsparam_flag ("grpquota", Opt_grpquota),
3896 fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
3897 fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
3898 fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
3899 fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
3900#endif
3901 {}
3902};
3903
3904static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3905{
3906 struct shmem_options *ctx = fc->fs_private;
3907 struct fs_parse_result result;
3908 unsigned long long size;
3909 char *rest;
3910 int opt;
3911 kuid_t kuid;
3912 kgid_t kgid;
3913
3914 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3915 if (opt < 0)
3916 return opt;
3917
3918 switch (opt) {
3919 case Opt_size:
3920 size = memparse(param->string, &rest);
3921 if (*rest == '%') {
3922 size <<= PAGE_SHIFT;
3923 size *= totalram_pages();
3924 do_div(size, 100);
3925 rest++;
3926 }
3927 if (*rest)
3928 goto bad_value;
3929 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3930 ctx->seen |= SHMEM_SEEN_BLOCKS;
3931 break;
3932 case Opt_nr_blocks:
3933 ctx->blocks = memparse(param->string, &rest);
3934 if (*rest || ctx->blocks > LONG_MAX)
3935 goto bad_value;
3936 ctx->seen |= SHMEM_SEEN_BLOCKS;
3937 break;
3938 case Opt_nr_inodes:
3939 ctx->inodes = memparse(param->string, &rest);
3940 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
3941 goto bad_value;
3942 ctx->seen |= SHMEM_SEEN_INODES;
3943 break;
3944 case Opt_mode:
3945 ctx->mode = result.uint_32 & 07777;
3946 break;
3947 case Opt_uid:
3948 kuid = make_kuid(current_user_ns(), result.uint_32);
3949 if (!uid_valid(kuid))
3950 goto bad_value;
3951
3952 /*
3953 * The requested uid must be representable in the
3954 * filesystem's idmapping.
3955 */
3956 if (!kuid_has_mapping(fc->user_ns, kuid))
3957 goto bad_value;
3958
3959 ctx->uid = kuid;
3960 break;
3961 case Opt_gid:
3962 kgid = make_kgid(current_user_ns(), result.uint_32);
3963 if (!gid_valid(kgid))
3964 goto bad_value;
3965
3966 /*
3967 * The requested gid must be representable in the
3968 * filesystem's idmapping.
3969 */
3970 if (!kgid_has_mapping(fc->user_ns, kgid))
3971 goto bad_value;
3972
3973 ctx->gid = kgid;
3974 break;
3975 case Opt_huge:
3976 ctx->huge = result.uint_32;
3977 if (ctx->huge != SHMEM_HUGE_NEVER &&
3978 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3979 has_transparent_hugepage()))
3980 goto unsupported_parameter;
3981 ctx->seen |= SHMEM_SEEN_HUGE;
3982 break;
3983 case Opt_mpol:
3984 if (IS_ENABLED(CONFIG_NUMA)) {
3985 mpol_put(ctx->mpol);
3986 ctx->mpol = NULL;
3987 if (mpol_parse_str(param->string, &ctx->mpol))
3988 goto bad_value;
3989 break;
3990 }
3991 goto unsupported_parameter;
3992 case Opt_inode32:
3993 ctx->full_inums = false;
3994 ctx->seen |= SHMEM_SEEN_INUMS;
3995 break;
3996 case Opt_inode64:
3997 if (sizeof(ino_t) < 8) {
3998 return invalfc(fc,
3999 "Cannot use inode64 with <64bit inums in kernel\n");
4000 }
4001 ctx->full_inums = true;
4002 ctx->seen |= SHMEM_SEEN_INUMS;
4003 break;
4004 case Opt_noswap:
4005 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
4006 return invalfc(fc,
4007 "Turning off swap in unprivileged tmpfs mounts unsupported");
4008 }
4009 ctx->noswap = true;
4010 ctx->seen |= SHMEM_SEEN_NOSWAP;
4011 break;
4012 case Opt_quota:
4013 if (fc->user_ns != &init_user_ns)
4014 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4015 ctx->seen |= SHMEM_SEEN_QUOTA;
4016 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4017 break;
4018 case Opt_usrquota:
4019 if (fc->user_ns != &init_user_ns)
4020 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4021 ctx->seen |= SHMEM_SEEN_QUOTA;
4022 ctx->quota_types |= QTYPE_MASK_USR;
4023 break;
4024 case Opt_grpquota:
4025 if (fc->user_ns != &init_user_ns)
4026 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4027 ctx->seen |= SHMEM_SEEN_QUOTA;
4028 ctx->quota_types |= QTYPE_MASK_GRP;
4029 break;
4030 case Opt_usrquota_block_hardlimit:
4031 size = memparse(param->string, &rest);
4032 if (*rest || !size)
4033 goto bad_value;
4034 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4035 return invalfc(fc,
4036 "User quota block hardlimit too large.");
4037 ctx->qlimits.usrquota_bhardlimit = size;
4038 break;
4039 case Opt_grpquota_block_hardlimit:
4040 size = memparse(param->string, &rest);
4041 if (*rest || !size)
4042 goto bad_value;
4043 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4044 return invalfc(fc,
4045 "Group quota block hardlimit too large.");
4046 ctx->qlimits.grpquota_bhardlimit = size;
4047 break;
4048 case Opt_usrquota_inode_hardlimit:
4049 size = memparse(param->string, &rest);
4050 if (*rest || !size)
4051 goto bad_value;
4052 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4053 return invalfc(fc,
4054 "User quota inode hardlimit too large.");
4055 ctx->qlimits.usrquota_ihardlimit = size;
4056 break;
4057 case Opt_grpquota_inode_hardlimit:
4058 size = memparse(param->string, &rest);
4059 if (*rest || !size)
4060 goto bad_value;
4061 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4062 return invalfc(fc,
4063 "Group quota inode hardlimit too large.");
4064 ctx->qlimits.grpquota_ihardlimit = size;
4065 break;
4066 }
4067 return 0;
4068
4069unsupported_parameter:
4070 return invalfc(fc, "Unsupported parameter '%s'", param->key);
4071bad_value:
4072 return invalfc(fc, "Bad value for '%s'", param->key);
4073}
4074
4075static int shmem_parse_options(struct fs_context *fc, void *data)
4076{
4077 char *options = data;
4078
4079 if (options) {
4080 int err = security_sb_eat_lsm_opts(options, &fc->security);
4081 if (err)
4082 return err;
4083 }
4084
4085 while (options != NULL) {
4086 char *this_char = options;
4087 for (;;) {
4088 /*
4089 * NUL-terminate this option: unfortunately,
4090 * mount options form a comma-separated list,
4091 * but mpol's nodelist may also contain commas.
4092 */
4093 options = strchr(options, ',');
4094 if (options == NULL)
4095 break;
4096 options++;
4097 if (!isdigit(*options)) {
4098 options[-1] = '\0';
4099 break;
4100 }
4101 }
4102 if (*this_char) {
4103 char *value = strchr(this_char, '=');
4104 size_t len = 0;
4105 int err;
4106
4107 if (value) {
4108 *value++ = '\0';
4109 len = strlen(value);
4110 }
4111 err = vfs_parse_fs_string(fc, this_char, value, len);
4112 if (err < 0)
4113 return err;
4114 }
4115 }
4116 return 0;
4117}
4118
4119/*
4120 * Reconfigure a shmem filesystem.
4121 */
4122static int shmem_reconfigure(struct fs_context *fc)
4123{
4124 struct shmem_options *ctx = fc->fs_private;
4125 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4126 unsigned long used_isp;
4127 struct mempolicy *mpol = NULL;
4128 const char *err;
4129
4130 raw_spin_lock(&sbinfo->stat_lock);
4131 used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
4132
4133 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4134 if (!sbinfo->max_blocks) {
4135 err = "Cannot retroactively limit size";
4136 goto out;
4137 }
4138 if (percpu_counter_compare(&sbinfo->used_blocks,
4139 ctx->blocks) > 0) {
4140 err = "Too small a size for current use";
4141 goto out;
4142 }
4143 }
4144 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4145 if (!sbinfo->max_inodes) {
4146 err = "Cannot retroactively limit inodes";
4147 goto out;
4148 }
4149 if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4150 err = "Too few inodes for current use";
4151 goto out;
4152 }
4153 }
4154
4155 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4156 sbinfo->next_ino > UINT_MAX) {
4157 err = "Current inum too high to switch to 32-bit inums";
4158 goto out;
4159 }
4160 if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
4161 err = "Cannot disable swap on remount";
4162 goto out;
4163 }
4164 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
4165 err = "Cannot enable swap on remount if it was disabled on first mount";
4166 goto out;
4167 }
4168
4169 if (ctx->seen & SHMEM_SEEN_QUOTA &&
4170 !sb_any_quota_loaded(fc->root->d_sb)) {
4171 err = "Cannot enable quota on remount";
4172 goto out;
4173 }
4174
4175#ifdef CONFIG_TMPFS_QUOTA
4176#define CHANGED_LIMIT(name) \
4177 (ctx->qlimits.name## hardlimit && \
4178 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4179
4180 if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4181 CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4182 err = "Cannot change global quota limit on remount";
4183 goto out;
4184 }
4185#endif /* CONFIG_TMPFS_QUOTA */
4186
4187 if (ctx->seen & SHMEM_SEEN_HUGE)
4188 sbinfo->huge = ctx->huge;
4189 if (ctx->seen & SHMEM_SEEN_INUMS)
4190 sbinfo->full_inums = ctx->full_inums;
4191 if (ctx->seen & SHMEM_SEEN_BLOCKS)
4192 sbinfo->max_blocks = ctx->blocks;
4193 if (ctx->seen & SHMEM_SEEN_INODES) {
4194 sbinfo->max_inodes = ctx->inodes;
4195 sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
4196 }
4197
4198 /*
4199 * Preserve previous mempolicy unless mpol remount option was specified.
4200 */
4201 if (ctx->mpol) {
4202 mpol = sbinfo->mpol;
4203 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
4204 ctx->mpol = NULL;
4205 }
4206
4207 if (ctx->noswap)
4208 sbinfo->noswap = true;
4209
4210 raw_spin_unlock(&sbinfo->stat_lock);
4211 mpol_put(mpol);
4212 return 0;
4213out:
4214 raw_spin_unlock(&sbinfo->stat_lock);
4215 return invalfc(fc, "%s", err);
4216}
4217
4218static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4219{
4220 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4221 struct mempolicy *mpol;
4222
4223 if (sbinfo->max_blocks != shmem_default_max_blocks())
4224 seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
4225 if (sbinfo->max_inodes != shmem_default_max_inodes())
4226 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4227 if (sbinfo->mode != (0777 | S_ISVTX))
4228 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4229 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4230 seq_printf(seq, ",uid=%u",
4231 from_kuid_munged(&init_user_ns, sbinfo->uid));
4232 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4233 seq_printf(seq, ",gid=%u",
4234 from_kgid_munged(&init_user_ns, sbinfo->gid));
4235
4236 /*
4237 * Showing inode{64,32} might be useful even if it's the system default,
4238 * since then people don't have to resort to checking both here and
4239 * /proc/config.gz to confirm 64-bit inums were successfully applied
4240 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4241 *
4242 * We hide it when inode64 isn't the default and we are using 32-bit
4243 * inodes, since that probably just means the feature isn't even under
4244 * consideration.
4245 *
4246 * As such:
4247 *
4248 * +-----------------+-----------------+
4249 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
4250 * +------------------+-----------------+-----------------+
4251 * | full_inums=true | show | show |
4252 * | full_inums=false | show | hide |
4253 * +------------------+-----------------+-----------------+
4254 *
4255 */
4256 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4257 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4258#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4259 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4260 if (sbinfo->huge)
4261 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4262#endif
4263 mpol = shmem_get_sbmpol(sbinfo);
4264 shmem_show_mpol(seq, mpol);
4265 mpol_put(mpol);
4266 if (sbinfo->noswap)
4267 seq_printf(seq, ",noswap");
4268 return 0;
4269}
4270
4271#endif /* CONFIG_TMPFS */
4272
4273static void shmem_put_super(struct super_block *sb)
4274{
4275 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4276
4277#ifdef CONFIG_TMPFS_QUOTA
4278 shmem_disable_quotas(sb);
4279#endif
4280 free_percpu(sbinfo->ino_batch);
4281 percpu_counter_destroy(&sbinfo->used_blocks);
4282 mpol_put(sbinfo->mpol);
4283 kfree(sbinfo);
4284 sb->s_fs_info = NULL;
4285}
4286
4287static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
4288{
4289 struct shmem_options *ctx = fc->fs_private;
4290 struct inode *inode;
4291 struct shmem_sb_info *sbinfo;
4292 int error = -ENOMEM;
4293
4294 /* Round up to L1_CACHE_BYTES to resist false sharing */
4295 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
4296 L1_CACHE_BYTES), GFP_KERNEL);
4297 if (!sbinfo)
4298 return error;
4299
4300 sb->s_fs_info = sbinfo;
4301
4302#ifdef CONFIG_TMPFS
4303 /*
4304 * Per default we only allow half of the physical ram per
4305 * tmpfs instance, limiting inodes to one per page of lowmem;
4306 * but the internal instance is left unlimited.
4307 */
4308 if (!(sb->s_flags & SB_KERNMOUNT)) {
4309 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
4310 ctx->blocks = shmem_default_max_blocks();
4311 if (!(ctx->seen & SHMEM_SEEN_INODES))
4312 ctx->inodes = shmem_default_max_inodes();
4313 if (!(ctx->seen & SHMEM_SEEN_INUMS))
4314 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
4315 sbinfo->noswap = ctx->noswap;
4316 } else {
4317 sb->s_flags |= SB_NOUSER;
4318 }
4319 sb->s_export_op = &shmem_export_ops;
4320 sb->s_flags |= SB_NOSEC | SB_I_VERSION;
4321#else
4322 sb->s_flags |= SB_NOUSER;
4323#endif
4324 sbinfo->max_blocks = ctx->blocks;
4325 sbinfo->max_inodes = ctx->inodes;
4326 sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
4327 if (sb->s_flags & SB_KERNMOUNT) {
4328 sbinfo->ino_batch = alloc_percpu(ino_t);
4329 if (!sbinfo->ino_batch)
4330 goto failed;
4331 }
4332 sbinfo->uid = ctx->uid;
4333 sbinfo->gid = ctx->gid;
4334 sbinfo->full_inums = ctx->full_inums;
4335 sbinfo->mode = ctx->mode;
4336 sbinfo->huge = ctx->huge;
4337 sbinfo->mpol = ctx->mpol;
4338 ctx->mpol = NULL;
4339
4340 raw_spin_lock_init(&sbinfo->stat_lock);
4341 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
4342 goto failed;
4343 spin_lock_init(&sbinfo->shrinklist_lock);
4344 INIT_LIST_HEAD(&sbinfo->shrinklist);
4345
4346 sb->s_maxbytes = MAX_LFS_FILESIZE;
4347 sb->s_blocksize = PAGE_SIZE;
4348 sb->s_blocksize_bits = PAGE_SHIFT;
4349 sb->s_magic = TMPFS_MAGIC;
4350 sb->s_op = &shmem_ops;
4351 sb->s_time_gran = 1;
4352#ifdef CONFIG_TMPFS_XATTR
4353 sb->s_xattr = shmem_xattr_handlers;
4354#endif
4355#ifdef CONFIG_TMPFS_POSIX_ACL
4356 sb->s_flags |= SB_POSIXACL;
4357#endif
4358 uuid_gen(&sb->s_uuid);
4359
4360#ifdef CONFIG_TMPFS_QUOTA
4361 if (ctx->seen & SHMEM_SEEN_QUOTA) {
4362 sb->dq_op = &shmem_quota_operations;
4363 sb->s_qcop = &dquot_quotactl_sysfile_ops;
4364 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
4365
4366 /* Copy the default limits from ctx into sbinfo */
4367 memcpy(&sbinfo->qlimits, &ctx->qlimits,
4368 sizeof(struct shmem_quota_limits));
4369
4370 if (shmem_enable_quotas(sb, ctx->quota_types))
4371 goto failed;
4372 }
4373#endif /* CONFIG_TMPFS_QUOTA */
4374
4375 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
4376 S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
4377 if (IS_ERR(inode)) {
4378 error = PTR_ERR(inode);
4379 goto failed;
4380 }
4381 inode->i_uid = sbinfo->uid;
4382 inode->i_gid = sbinfo->gid;
4383 sb->s_root = d_make_root(inode);
4384 if (!sb->s_root)
4385 goto failed;
4386 return 0;
4387
4388failed:
4389 shmem_put_super(sb);
4390 return error;
4391}
4392
4393static int shmem_get_tree(struct fs_context *fc)
4394{
4395 return get_tree_nodev(fc, shmem_fill_super);
4396}
4397
4398static void shmem_free_fc(struct fs_context *fc)
4399{
4400 struct shmem_options *ctx = fc->fs_private;
4401
4402 if (ctx) {
4403 mpol_put(ctx->mpol);
4404 kfree(ctx);
4405 }
4406}
4407
4408static const struct fs_context_operations shmem_fs_context_ops = {
4409 .free = shmem_free_fc,
4410 .get_tree = shmem_get_tree,
4411#ifdef CONFIG_TMPFS
4412 .parse_monolithic = shmem_parse_options,
4413 .parse_param = shmem_parse_one,
4414 .reconfigure = shmem_reconfigure,
4415#endif
4416};
4417
4418static struct kmem_cache *shmem_inode_cachep __ro_after_init;
4419
4420static struct inode *shmem_alloc_inode(struct super_block *sb)
4421{
4422 struct shmem_inode_info *info;
4423 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
4424 if (!info)
4425 return NULL;
4426 return &info->vfs_inode;
4427}
4428
4429static void shmem_free_in_core_inode(struct inode *inode)
4430{
4431 if (S_ISLNK(inode->i_mode))
4432 kfree(inode->i_link);
4433 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4434}
4435
4436static void shmem_destroy_inode(struct inode *inode)
4437{
4438 if (S_ISREG(inode->i_mode))
4439 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
4440 if (S_ISDIR(inode->i_mode))
4441 simple_offset_destroy(shmem_get_offset_ctx(inode));
4442}
4443
4444static void shmem_init_inode(void *foo)
4445{
4446 struct shmem_inode_info *info = foo;
4447 inode_init_once(&info->vfs_inode);
4448}
4449
4450static void __init shmem_init_inodecache(void)
4451{
4452 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
4453 sizeof(struct shmem_inode_info),
4454 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
4455}
4456
4457static void __init shmem_destroy_inodecache(void)
4458{
4459 kmem_cache_destroy(shmem_inode_cachep);
4460}
4461
4462/* Keep the page in page cache instead of truncating it */
4463static int shmem_error_remove_folio(struct address_space *mapping,
4464 struct folio *folio)
4465{
4466 return 0;
4467}
4468
4469const struct address_space_operations shmem_aops = {
4470 .writepage = shmem_writepage,
4471 .dirty_folio = noop_dirty_folio,
4472#ifdef CONFIG_TMPFS
4473 .write_begin = shmem_write_begin,
4474 .write_end = shmem_write_end,
4475#endif
4476#ifdef CONFIG_MIGRATION
4477 .migrate_folio = migrate_folio,
4478#endif
4479 .error_remove_folio = shmem_error_remove_folio,
4480};
4481EXPORT_SYMBOL(shmem_aops);
4482
4483static const struct file_operations shmem_file_operations = {
4484 .mmap = shmem_mmap,
4485 .open = shmem_file_open,
4486 .get_unmapped_area = shmem_get_unmapped_area,
4487#ifdef CONFIG_TMPFS
4488 .llseek = shmem_file_llseek,
4489 .read_iter = shmem_file_read_iter,
4490 .write_iter = shmem_file_write_iter,
4491 .fsync = noop_fsync,
4492 .splice_read = shmem_file_splice_read,
4493 .splice_write = iter_file_splice_write,
4494 .fallocate = shmem_fallocate,
4495#endif
4496};
4497
4498static const struct inode_operations shmem_inode_operations = {
4499 .getattr = shmem_getattr,
4500 .setattr = shmem_setattr,
4501#ifdef CONFIG_TMPFS_XATTR
4502 .listxattr = shmem_listxattr,
4503 .set_acl = simple_set_acl,
4504 .fileattr_get = shmem_fileattr_get,
4505 .fileattr_set = shmem_fileattr_set,
4506#endif
4507};
4508
4509static const struct inode_operations shmem_dir_inode_operations = {
4510#ifdef CONFIG_TMPFS
4511 .getattr = shmem_getattr,
4512 .create = shmem_create,
4513 .lookup = simple_lookup,
4514 .link = shmem_link,
4515 .unlink = shmem_unlink,
4516 .symlink = shmem_symlink,
4517 .mkdir = shmem_mkdir,
4518 .rmdir = shmem_rmdir,
4519 .mknod = shmem_mknod,
4520 .rename = shmem_rename2,
4521 .tmpfile = shmem_tmpfile,
4522 .get_offset_ctx = shmem_get_offset_ctx,
4523#endif
4524#ifdef CONFIG_TMPFS_XATTR
4525 .listxattr = shmem_listxattr,
4526 .fileattr_get = shmem_fileattr_get,
4527 .fileattr_set = shmem_fileattr_set,
4528#endif
4529#ifdef CONFIG_TMPFS_POSIX_ACL
4530 .setattr = shmem_setattr,
4531 .set_acl = simple_set_acl,
4532#endif
4533};
4534
4535static const struct inode_operations shmem_special_inode_operations = {
4536 .getattr = shmem_getattr,
4537#ifdef CONFIG_TMPFS_XATTR
4538 .listxattr = shmem_listxattr,
4539#endif
4540#ifdef CONFIG_TMPFS_POSIX_ACL
4541 .setattr = shmem_setattr,
4542 .set_acl = simple_set_acl,
4543#endif
4544};
4545
4546static const struct super_operations shmem_ops = {
4547 .alloc_inode = shmem_alloc_inode,
4548 .free_inode = shmem_free_in_core_inode,
4549 .destroy_inode = shmem_destroy_inode,
4550#ifdef CONFIG_TMPFS
4551 .statfs = shmem_statfs,
4552 .show_options = shmem_show_options,
4553#endif
4554#ifdef CONFIG_TMPFS_QUOTA
4555 .get_dquots = shmem_get_dquots,
4556#endif
4557 .evict_inode = shmem_evict_inode,
4558 .drop_inode = generic_delete_inode,
4559 .put_super = shmem_put_super,
4560#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4561 .nr_cached_objects = shmem_unused_huge_count,
4562 .free_cached_objects = shmem_unused_huge_scan,
4563#endif
4564};
4565
4566static const struct vm_operations_struct shmem_vm_ops = {
4567 .fault = shmem_fault,
4568 .map_pages = filemap_map_pages,
4569#ifdef CONFIG_NUMA
4570 .set_policy = shmem_set_policy,
4571 .get_policy = shmem_get_policy,
4572#endif
4573};
4574
4575static const struct vm_operations_struct shmem_anon_vm_ops = {
4576 .fault = shmem_fault,
4577 .map_pages = filemap_map_pages,
4578#ifdef CONFIG_NUMA
4579 .set_policy = shmem_set_policy,
4580 .get_policy = shmem_get_policy,
4581#endif
4582};
4583
4584int shmem_init_fs_context(struct fs_context *fc)
4585{
4586 struct shmem_options *ctx;
4587
4588 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
4589 if (!ctx)
4590 return -ENOMEM;
4591
4592 ctx->mode = 0777 | S_ISVTX;
4593 ctx->uid = current_fsuid();
4594 ctx->gid = current_fsgid();
4595
4596 fc->fs_private = ctx;
4597 fc->ops = &shmem_fs_context_ops;
4598 return 0;
4599}
4600
4601static struct file_system_type shmem_fs_type = {
4602 .owner = THIS_MODULE,
4603 .name = "tmpfs",
4604 .init_fs_context = shmem_init_fs_context,
4605#ifdef CONFIG_TMPFS
4606 .parameters = shmem_fs_parameters,
4607#endif
4608 .kill_sb = kill_litter_super,
4609 .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
4610};
4611
4612void __init shmem_init(void)
4613{
4614 int error;
4615
4616 shmem_init_inodecache();
4617
4618#ifdef CONFIG_TMPFS_QUOTA
4619 error = register_quota_format(&shmem_quota_format);
4620 if (error < 0) {
4621 pr_err("Could not register quota format\n");
4622 goto out3;
4623 }
4624#endif
4625
4626 error = register_filesystem(&shmem_fs_type);
4627 if (error) {
4628 pr_err("Could not register tmpfs\n");
4629 goto out2;
4630 }
4631
4632 shm_mnt = kern_mount(&shmem_fs_type);
4633 if (IS_ERR(shm_mnt)) {
4634 error = PTR_ERR(shm_mnt);
4635 pr_err("Could not kern_mount tmpfs\n");
4636 goto out1;
4637 }
4638
4639#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4640 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
4641 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4642 else
4643 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
4644#endif
4645 return;
4646
4647out1:
4648 unregister_filesystem(&shmem_fs_type);
4649out2:
4650#ifdef CONFIG_TMPFS_QUOTA
4651 unregister_quota_format(&shmem_quota_format);
4652out3:
4653#endif
4654 shmem_destroy_inodecache();
4655 shm_mnt = ERR_PTR(error);
4656}
4657
4658#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
4659static ssize_t shmem_enabled_show(struct kobject *kobj,
4660 struct kobj_attribute *attr, char *buf)
4661{
4662 static const int values[] = {
4663 SHMEM_HUGE_ALWAYS,
4664 SHMEM_HUGE_WITHIN_SIZE,
4665 SHMEM_HUGE_ADVISE,
4666 SHMEM_HUGE_NEVER,
4667 SHMEM_HUGE_DENY,
4668 SHMEM_HUGE_FORCE,
4669 };
4670 int len = 0;
4671 int i;
4672
4673 for (i = 0; i < ARRAY_SIZE(values); i++) {
4674 len += sysfs_emit_at(buf, len,
4675 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
4676 i ? " " : "", shmem_format_huge(values[i]));
4677 }
4678 len += sysfs_emit_at(buf, len, "\n");
4679
4680 return len;
4681}
4682
4683static ssize_t shmem_enabled_store(struct kobject *kobj,
4684 struct kobj_attribute *attr, const char *buf, size_t count)
4685{
4686 char tmp[16];
4687 int huge;
4688
4689 if (count + 1 > sizeof(tmp))
4690 return -EINVAL;
4691 memcpy(tmp, buf, count);
4692 tmp[count] = '\0';
4693 if (count && tmp[count - 1] == '\n')
4694 tmp[count - 1] = '\0';
4695
4696 huge = shmem_parse_huge(tmp);
4697 if (huge == -EINVAL)
4698 return -EINVAL;
4699 if (!has_transparent_hugepage() &&
4700 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4701 return -EINVAL;
4702
4703 shmem_huge = huge;
4704 if (shmem_huge > SHMEM_HUGE_DENY)
4705 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4706 return count;
4707}
4708
4709struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
4710#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4711
4712#else /* !CONFIG_SHMEM */
4713
4714/*
4715 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4716 *
4717 * This is intended for small system where the benefits of the full
4718 * shmem code (swap-backed and resource-limited) are outweighed by
4719 * their complexity. On systems without swap this code should be
4720 * effectively equivalent, but much lighter weight.
4721 */
4722
4723static struct file_system_type shmem_fs_type = {
4724 .name = "tmpfs",
4725 .init_fs_context = ramfs_init_fs_context,
4726 .parameters = ramfs_fs_parameters,
4727 .kill_sb = ramfs_kill_sb,
4728 .fs_flags = FS_USERNS_MOUNT,
4729};
4730
4731void __init shmem_init(void)
4732{
4733 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4734
4735 shm_mnt = kern_mount(&shmem_fs_type);
4736 BUG_ON(IS_ERR(shm_mnt));
4737}
4738
4739int shmem_unuse(unsigned int type)
4740{
4741 return 0;
4742}
4743
4744int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
4745{
4746 return 0;
4747}
4748
4749void shmem_unlock_mapping(struct address_space *mapping)
4750{
4751}
4752
4753#ifdef CONFIG_MMU
4754unsigned long shmem_get_unmapped_area(struct file *file,
4755 unsigned long addr, unsigned long len,
4756 unsigned long pgoff, unsigned long flags)
4757{
4758 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4759}
4760#endif
4761
4762void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4763{
4764 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4765}
4766EXPORT_SYMBOL_GPL(shmem_truncate_range);
4767
4768#define shmem_vm_ops generic_file_vm_ops
4769#define shmem_anon_vm_ops generic_file_vm_ops
4770#define shmem_file_operations ramfs_file_operations
4771#define shmem_acct_size(flags, size) 0
4772#define shmem_unacct_size(flags, size) do {} while (0)
4773
4774static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
4775 struct super_block *sb, struct inode *dir,
4776 umode_t mode, dev_t dev, unsigned long flags)
4777{
4778 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
4779 return inode ? inode : ERR_PTR(-ENOSPC);
4780}
4781
4782#endif /* CONFIG_SHMEM */
4783
4784/* common code */
4785
4786static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
4787 loff_t size, unsigned long flags, unsigned int i_flags)
4788{
4789 struct inode *inode;
4790 struct file *res;
4791
4792 if (IS_ERR(mnt))
4793 return ERR_CAST(mnt);
4794
4795 if (size < 0 || size > MAX_LFS_FILESIZE)
4796 return ERR_PTR(-EINVAL);
4797
4798 if (shmem_acct_size(flags, size))
4799 return ERR_PTR(-ENOMEM);
4800
4801 if (is_idmapped_mnt(mnt))
4802 return ERR_PTR(-EINVAL);
4803
4804 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
4805 S_IFREG | S_IRWXUGO, 0, flags);
4806 if (IS_ERR(inode)) {
4807 shmem_unacct_size(flags, size);
4808 return ERR_CAST(inode);
4809 }
4810 inode->i_flags |= i_flags;
4811 inode->i_size = size;
4812 clear_nlink(inode); /* It is unlinked */
4813 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4814 if (!IS_ERR(res))
4815 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4816 &shmem_file_operations);
4817 if (IS_ERR(res))
4818 iput(inode);
4819 return res;
4820}
4821
4822/**
4823 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4824 * kernel internal. There will be NO LSM permission checks against the
4825 * underlying inode. So users of this interface must do LSM checks at a
4826 * higher layer. The users are the big_key and shm implementations. LSM
4827 * checks are provided at the key or shm level rather than the inode.
4828 * @name: name for dentry (to be seen in /proc/<pid>/maps
4829 * @size: size to be set for the file
4830 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4831 */
4832struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4833{
4834 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4835}
4836
4837/**
4838 * shmem_file_setup - get an unlinked file living in tmpfs
4839 * @name: name for dentry (to be seen in /proc/<pid>/maps
4840 * @size: size to be set for the file
4841 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4842 */
4843struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4844{
4845 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4846}
4847EXPORT_SYMBOL_GPL(shmem_file_setup);
4848
4849/**
4850 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4851 * @mnt: the tmpfs mount where the file will be created
4852 * @name: name for dentry (to be seen in /proc/<pid>/maps
4853 * @size: size to be set for the file
4854 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4855 */
4856struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4857 loff_t size, unsigned long flags)
4858{
4859 return __shmem_file_setup(mnt, name, size, flags, 0);
4860}
4861EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4862
4863/**
4864 * shmem_zero_setup - setup a shared anonymous mapping
4865 * @vma: the vma to be mmapped is prepared by do_mmap
4866 */
4867int shmem_zero_setup(struct vm_area_struct *vma)
4868{
4869 struct file *file;
4870 loff_t size = vma->vm_end - vma->vm_start;
4871
4872 /*
4873 * Cloning a new file under mmap_lock leads to a lock ordering conflict
4874 * between XFS directory reading and selinux: since this file is only
4875 * accessible to the user through its mapping, use S_PRIVATE flag to
4876 * bypass file security, in the same way as shmem_kernel_file_setup().
4877 */
4878 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4879 if (IS_ERR(file))
4880 return PTR_ERR(file);
4881
4882 if (vma->vm_file)
4883 fput(vma->vm_file);
4884 vma->vm_file = file;
4885 vma->vm_ops = &shmem_anon_vm_ops;
4886
4887 return 0;
4888}
4889
4890/**
4891 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
4892 * @mapping: the folio's address_space
4893 * @index: the folio index
4894 * @gfp: the page allocator flags to use if allocating
4895 *
4896 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4897 * with any new page allocations done using the specified allocation flags.
4898 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
4899 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4900 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4901 *
4902 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4903 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4904 */
4905struct folio *shmem_read_folio_gfp(struct address_space *mapping,
4906 pgoff_t index, gfp_t gfp)
4907{
4908#ifdef CONFIG_SHMEM
4909 struct inode *inode = mapping->host;
4910 struct folio *folio;
4911 int error;
4912
4913 BUG_ON(!shmem_mapping(mapping));
4914 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
4915 gfp, NULL, NULL);
4916 if (error)
4917 return ERR_PTR(error);
4918
4919 folio_unlock(folio);
4920 return folio;
4921#else
4922 /*
4923 * The tiny !SHMEM case uses ramfs without swap
4924 */
4925 return mapping_read_folio_gfp(mapping, index, gfp);
4926#endif
4927}
4928EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
4929
4930struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4931 pgoff_t index, gfp_t gfp)
4932{
4933 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
4934 struct page *page;
4935
4936 if (IS_ERR(folio))
4937 return &folio->page;
4938
4939 page = folio_file_page(folio, index);
4940 if (PageHWPoison(page)) {
4941 folio_put(folio);
4942 return ERR_PTR(-EIO);
4943 }
4944
4945 return page;
4946}
4947EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/vfs.h>
27#include <linux/mount.h>
28#include <linux/pagemap.h>
29#include <linux/file.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/swap.h>
33
34static struct vfsmount *shm_mnt;
35
36#ifdef CONFIG_SHMEM
37/*
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
41 */
42
43#include <linux/xattr.h>
44#include <linux/exportfs.h>
45#include <linux/posix_acl.h>
46#include <linux/generic_acl.h>
47#include <linux/mman.h>
48#include <linux/string.h>
49#include <linux/slab.h>
50#include <linux/backing-dev.h>
51#include <linux/shmem_fs.h>
52#include <linux/writeback.h>
53#include <linux/blkdev.h>
54#include <linux/pagevec.h>
55#include <linux/percpu_counter.h>
56#include <linux/splice.h>
57#include <linux/security.h>
58#include <linux/swapops.h>
59#include <linux/mempolicy.h>
60#include <linux/namei.h>
61#include <linux/ctype.h>
62#include <linux/migrate.h>
63#include <linux/highmem.h>
64#include <linux/seq_file.h>
65#include <linux/magic.h>
66
67#include <asm/uaccess.h>
68#include <asm/pgtable.h>
69
70#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
71#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
72
73/* Pretend that each entry is of this size in directory's i_size */
74#define BOGO_DIRENT_SIZE 20
75
76/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77#define SHORT_SYMLINK_LEN 128
78
79struct shmem_xattr {
80 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
81 char *name; /* xattr name */
82 size_t size;
83 char value[0];
84};
85
86/* Flag allocation requirements to shmem_getpage */
87enum sgp_type {
88 SGP_READ, /* don't exceed i_size, don't allocate page */
89 SGP_CACHE, /* don't exceed i_size, may allocate page */
90 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
91 SGP_WRITE, /* may exceed i_size, may allocate page */
92};
93
94#ifdef CONFIG_TMPFS
95static unsigned long shmem_default_max_blocks(void)
96{
97 return totalram_pages / 2;
98}
99
100static unsigned long shmem_default_max_inodes(void)
101{
102 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
103}
104#endif
105
106static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
107 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
108
109static inline int shmem_getpage(struct inode *inode, pgoff_t index,
110 struct page **pagep, enum sgp_type sgp, int *fault_type)
111{
112 return shmem_getpage_gfp(inode, index, pagep, sgp,
113 mapping_gfp_mask(inode->i_mapping), fault_type);
114}
115
116static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
117{
118 return sb->s_fs_info;
119}
120
121/*
122 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
123 * for shared memory and for shared anonymous (/dev/zero) mappings
124 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
125 * consistent with the pre-accounting of private mappings ...
126 */
127static inline int shmem_acct_size(unsigned long flags, loff_t size)
128{
129 return (flags & VM_NORESERVE) ?
130 0 : security_vm_enough_memory_kern(VM_ACCT(size));
131}
132
133static inline void shmem_unacct_size(unsigned long flags, loff_t size)
134{
135 if (!(flags & VM_NORESERVE))
136 vm_unacct_memory(VM_ACCT(size));
137}
138
139/*
140 * ... whereas tmpfs objects are accounted incrementally as
141 * pages are allocated, in order to allow huge sparse files.
142 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
143 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
144 */
145static inline int shmem_acct_block(unsigned long flags)
146{
147 return (flags & VM_NORESERVE) ?
148 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
149}
150
151static inline void shmem_unacct_blocks(unsigned long flags, long pages)
152{
153 if (flags & VM_NORESERVE)
154 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
155}
156
157static const struct super_operations shmem_ops;
158static const struct address_space_operations shmem_aops;
159static const struct file_operations shmem_file_operations;
160static const struct inode_operations shmem_inode_operations;
161static const struct inode_operations shmem_dir_inode_operations;
162static const struct inode_operations shmem_special_inode_operations;
163static const struct vm_operations_struct shmem_vm_ops;
164
165static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
166 .ra_pages = 0, /* No readahead */
167 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
168};
169
170static LIST_HEAD(shmem_swaplist);
171static DEFINE_MUTEX(shmem_swaplist_mutex);
172
173static int shmem_reserve_inode(struct super_block *sb)
174{
175 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
176 if (sbinfo->max_inodes) {
177 spin_lock(&sbinfo->stat_lock);
178 if (!sbinfo->free_inodes) {
179 spin_unlock(&sbinfo->stat_lock);
180 return -ENOSPC;
181 }
182 sbinfo->free_inodes--;
183 spin_unlock(&sbinfo->stat_lock);
184 }
185 return 0;
186}
187
188static void shmem_free_inode(struct super_block *sb)
189{
190 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
191 if (sbinfo->max_inodes) {
192 spin_lock(&sbinfo->stat_lock);
193 sbinfo->free_inodes++;
194 spin_unlock(&sbinfo->stat_lock);
195 }
196}
197
198/**
199 * shmem_recalc_inode - recalculate the block usage of an inode
200 * @inode: inode to recalc
201 *
202 * We have to calculate the free blocks since the mm can drop
203 * undirtied hole pages behind our back.
204 *
205 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
206 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
207 *
208 * It has to be called with the spinlock held.
209 */
210static void shmem_recalc_inode(struct inode *inode)
211{
212 struct shmem_inode_info *info = SHMEM_I(inode);
213 long freed;
214
215 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
216 if (freed > 0) {
217 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
218 if (sbinfo->max_blocks)
219 percpu_counter_add(&sbinfo->used_blocks, -freed);
220 info->alloced -= freed;
221 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
222 shmem_unacct_blocks(info->flags, freed);
223 }
224}
225
226/*
227 * Replace item expected in radix tree by a new item, while holding tree lock.
228 */
229static int shmem_radix_tree_replace(struct address_space *mapping,
230 pgoff_t index, void *expected, void *replacement)
231{
232 void **pslot;
233 void *item = NULL;
234
235 VM_BUG_ON(!expected);
236 pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
237 if (pslot)
238 item = radix_tree_deref_slot_protected(pslot,
239 &mapping->tree_lock);
240 if (item != expected)
241 return -ENOENT;
242 if (replacement)
243 radix_tree_replace_slot(pslot, replacement);
244 else
245 radix_tree_delete(&mapping->page_tree, index);
246 return 0;
247}
248
249/*
250 * Like add_to_page_cache_locked, but error if expected item has gone.
251 */
252static int shmem_add_to_page_cache(struct page *page,
253 struct address_space *mapping,
254 pgoff_t index, gfp_t gfp, void *expected)
255{
256 int error = 0;
257
258 VM_BUG_ON(!PageLocked(page));
259 VM_BUG_ON(!PageSwapBacked(page));
260
261 if (!expected)
262 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
263 if (!error) {
264 page_cache_get(page);
265 page->mapping = mapping;
266 page->index = index;
267
268 spin_lock_irq(&mapping->tree_lock);
269 if (!expected)
270 error = radix_tree_insert(&mapping->page_tree,
271 index, page);
272 else
273 error = shmem_radix_tree_replace(mapping, index,
274 expected, page);
275 if (!error) {
276 mapping->nrpages++;
277 __inc_zone_page_state(page, NR_FILE_PAGES);
278 __inc_zone_page_state(page, NR_SHMEM);
279 spin_unlock_irq(&mapping->tree_lock);
280 } else {
281 page->mapping = NULL;
282 spin_unlock_irq(&mapping->tree_lock);
283 page_cache_release(page);
284 }
285 if (!expected)
286 radix_tree_preload_end();
287 }
288 if (error)
289 mem_cgroup_uncharge_cache_page(page);
290 return error;
291}
292
293/*
294 * Like delete_from_page_cache, but substitutes swap for page.
295 */
296static void shmem_delete_from_page_cache(struct page *page, void *radswap)
297{
298 struct address_space *mapping = page->mapping;
299 int error;
300
301 spin_lock_irq(&mapping->tree_lock);
302 error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
303 page->mapping = NULL;
304 mapping->nrpages--;
305 __dec_zone_page_state(page, NR_FILE_PAGES);
306 __dec_zone_page_state(page, NR_SHMEM);
307 spin_unlock_irq(&mapping->tree_lock);
308 page_cache_release(page);
309 BUG_ON(error);
310}
311
312/*
313 * Like find_get_pages, but collecting swap entries as well as pages.
314 */
315static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
316 pgoff_t start, unsigned int nr_pages,
317 struct page **pages, pgoff_t *indices)
318{
319 unsigned int i;
320 unsigned int ret;
321 unsigned int nr_found;
322
323 rcu_read_lock();
324restart:
325 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
326 (void ***)pages, indices, start, nr_pages);
327 ret = 0;
328 for (i = 0; i < nr_found; i++) {
329 struct page *page;
330repeat:
331 page = radix_tree_deref_slot((void **)pages[i]);
332 if (unlikely(!page))
333 continue;
334 if (radix_tree_exception(page)) {
335 if (radix_tree_deref_retry(page))
336 goto restart;
337 /*
338 * Otherwise, we must be storing a swap entry
339 * here as an exceptional entry: so return it
340 * without attempting to raise page count.
341 */
342 goto export;
343 }
344 if (!page_cache_get_speculative(page))
345 goto repeat;
346
347 /* Has the page moved? */
348 if (unlikely(page != *((void **)pages[i]))) {
349 page_cache_release(page);
350 goto repeat;
351 }
352export:
353 indices[ret] = indices[i];
354 pages[ret] = page;
355 ret++;
356 }
357 if (unlikely(!ret && nr_found))
358 goto restart;
359 rcu_read_unlock();
360 return ret;
361}
362
363/*
364 * Remove swap entry from radix tree, free the swap and its page cache.
365 */
366static int shmem_free_swap(struct address_space *mapping,
367 pgoff_t index, void *radswap)
368{
369 int error;
370
371 spin_lock_irq(&mapping->tree_lock);
372 error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
373 spin_unlock_irq(&mapping->tree_lock);
374 if (!error)
375 free_swap_and_cache(radix_to_swp_entry(radswap));
376 return error;
377}
378
379/*
380 * Pagevec may contain swap entries, so shuffle up pages before releasing.
381 */
382static void shmem_pagevec_release(struct pagevec *pvec)
383{
384 int i, j;
385
386 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
387 struct page *page = pvec->pages[i];
388 if (!radix_tree_exceptional_entry(page))
389 pvec->pages[j++] = page;
390 }
391 pvec->nr = j;
392 pagevec_release(pvec);
393}
394
395/*
396 * Remove range of pages and swap entries from radix tree, and free them.
397 */
398void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
399{
400 struct address_space *mapping = inode->i_mapping;
401 struct shmem_inode_info *info = SHMEM_I(inode);
402 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
403 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
404 pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
405 struct pagevec pvec;
406 pgoff_t indices[PAGEVEC_SIZE];
407 long nr_swaps_freed = 0;
408 pgoff_t index;
409 int i;
410
411 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
412
413 pagevec_init(&pvec, 0);
414 index = start;
415 while (index <= end) {
416 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
417 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
418 pvec.pages, indices);
419 if (!pvec.nr)
420 break;
421 mem_cgroup_uncharge_start();
422 for (i = 0; i < pagevec_count(&pvec); i++) {
423 struct page *page = pvec.pages[i];
424
425 index = indices[i];
426 if (index > end)
427 break;
428
429 if (radix_tree_exceptional_entry(page)) {
430 nr_swaps_freed += !shmem_free_swap(mapping,
431 index, page);
432 continue;
433 }
434
435 if (!trylock_page(page))
436 continue;
437 if (page->mapping == mapping) {
438 VM_BUG_ON(PageWriteback(page));
439 truncate_inode_page(mapping, page);
440 }
441 unlock_page(page);
442 }
443 shmem_pagevec_release(&pvec);
444 mem_cgroup_uncharge_end();
445 cond_resched();
446 index++;
447 }
448
449 if (partial) {
450 struct page *page = NULL;
451 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
452 if (page) {
453 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
454 set_page_dirty(page);
455 unlock_page(page);
456 page_cache_release(page);
457 }
458 }
459
460 index = start;
461 for ( ; ; ) {
462 cond_resched();
463 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
464 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
465 pvec.pages, indices);
466 if (!pvec.nr) {
467 if (index == start)
468 break;
469 index = start;
470 continue;
471 }
472 if (index == start && indices[0] > end) {
473 shmem_pagevec_release(&pvec);
474 break;
475 }
476 mem_cgroup_uncharge_start();
477 for (i = 0; i < pagevec_count(&pvec); i++) {
478 struct page *page = pvec.pages[i];
479
480 index = indices[i];
481 if (index > end)
482 break;
483
484 if (radix_tree_exceptional_entry(page)) {
485 nr_swaps_freed += !shmem_free_swap(mapping,
486 index, page);
487 continue;
488 }
489
490 lock_page(page);
491 if (page->mapping == mapping) {
492 VM_BUG_ON(PageWriteback(page));
493 truncate_inode_page(mapping, page);
494 }
495 unlock_page(page);
496 }
497 shmem_pagevec_release(&pvec);
498 mem_cgroup_uncharge_end();
499 index++;
500 }
501
502 spin_lock(&info->lock);
503 info->swapped -= nr_swaps_freed;
504 shmem_recalc_inode(inode);
505 spin_unlock(&info->lock);
506
507 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
508}
509EXPORT_SYMBOL_GPL(shmem_truncate_range);
510
511static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
512{
513 struct inode *inode = dentry->d_inode;
514 int error;
515
516 error = inode_change_ok(inode, attr);
517 if (error)
518 return error;
519
520 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
521 loff_t oldsize = inode->i_size;
522 loff_t newsize = attr->ia_size;
523
524 if (newsize != oldsize) {
525 i_size_write(inode, newsize);
526 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
527 }
528 if (newsize < oldsize) {
529 loff_t holebegin = round_up(newsize, PAGE_SIZE);
530 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
531 shmem_truncate_range(inode, newsize, (loff_t)-1);
532 /* unmap again to remove racily COWed private pages */
533 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
534 }
535 }
536
537 setattr_copy(inode, attr);
538#ifdef CONFIG_TMPFS_POSIX_ACL
539 if (attr->ia_valid & ATTR_MODE)
540 error = generic_acl_chmod(inode);
541#endif
542 return error;
543}
544
545static void shmem_evict_inode(struct inode *inode)
546{
547 struct shmem_inode_info *info = SHMEM_I(inode);
548 struct shmem_xattr *xattr, *nxattr;
549
550 if (inode->i_mapping->a_ops == &shmem_aops) {
551 shmem_unacct_size(info->flags, inode->i_size);
552 inode->i_size = 0;
553 shmem_truncate_range(inode, 0, (loff_t)-1);
554 if (!list_empty(&info->swaplist)) {
555 mutex_lock(&shmem_swaplist_mutex);
556 list_del_init(&info->swaplist);
557 mutex_unlock(&shmem_swaplist_mutex);
558 }
559 } else
560 kfree(info->symlink);
561
562 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
563 kfree(xattr->name);
564 kfree(xattr);
565 }
566 BUG_ON(inode->i_blocks);
567 shmem_free_inode(inode->i_sb);
568 end_writeback(inode);
569}
570
571/*
572 * If swap found in inode, free it and move page from swapcache to filecache.
573 */
574static int shmem_unuse_inode(struct shmem_inode_info *info,
575 swp_entry_t swap, struct page *page)
576{
577 struct address_space *mapping = info->vfs_inode.i_mapping;
578 void *radswap;
579 pgoff_t index;
580 int error;
581
582 radswap = swp_to_radix_entry(swap);
583 index = radix_tree_locate_item(&mapping->page_tree, radswap);
584 if (index == -1)
585 return 0;
586
587 /*
588 * Move _head_ to start search for next from here.
589 * But be careful: shmem_evict_inode checks list_empty without taking
590 * mutex, and there's an instant in list_move_tail when info->swaplist
591 * would appear empty, if it were the only one on shmem_swaplist.
592 */
593 if (shmem_swaplist.next != &info->swaplist)
594 list_move_tail(&shmem_swaplist, &info->swaplist);
595
596 /*
597 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
598 * but also to hold up shmem_evict_inode(): so inode cannot be freed
599 * beneath us (pagelock doesn't help until the page is in pagecache).
600 */
601 error = shmem_add_to_page_cache(page, mapping, index,
602 GFP_NOWAIT, radswap);
603 /* which does mem_cgroup_uncharge_cache_page on error */
604
605 if (error != -ENOMEM) {
606 /*
607 * Truncation and eviction use free_swap_and_cache(), which
608 * only does trylock page: if we raced, best clean up here.
609 */
610 delete_from_swap_cache(page);
611 set_page_dirty(page);
612 if (!error) {
613 spin_lock(&info->lock);
614 info->swapped--;
615 spin_unlock(&info->lock);
616 swap_free(swap);
617 }
618 error = 1; /* not an error, but entry was found */
619 }
620 return error;
621}
622
623/*
624 * Search through swapped inodes to find and replace swap by page.
625 */
626int shmem_unuse(swp_entry_t swap, struct page *page)
627{
628 struct list_head *this, *next;
629 struct shmem_inode_info *info;
630 int found = 0;
631 int error;
632
633 /*
634 * Charge page using GFP_KERNEL while we can wait, before taking
635 * the shmem_swaplist_mutex which might hold up shmem_writepage().
636 * Charged back to the user (not to caller) when swap account is used.
637 */
638 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
639 if (error)
640 goto out;
641 /* No radix_tree_preload: swap entry keeps a place for page in tree */
642
643 mutex_lock(&shmem_swaplist_mutex);
644 list_for_each_safe(this, next, &shmem_swaplist) {
645 info = list_entry(this, struct shmem_inode_info, swaplist);
646 if (info->swapped)
647 found = shmem_unuse_inode(info, swap, page);
648 else
649 list_del_init(&info->swaplist);
650 cond_resched();
651 if (found)
652 break;
653 }
654 mutex_unlock(&shmem_swaplist_mutex);
655
656 if (!found)
657 mem_cgroup_uncharge_cache_page(page);
658 if (found < 0)
659 error = found;
660out:
661 unlock_page(page);
662 page_cache_release(page);
663 return error;
664}
665
666/*
667 * Move the page from the page cache to the swap cache.
668 */
669static int shmem_writepage(struct page *page, struct writeback_control *wbc)
670{
671 struct shmem_inode_info *info;
672 struct address_space *mapping;
673 struct inode *inode;
674 swp_entry_t swap;
675 pgoff_t index;
676
677 BUG_ON(!PageLocked(page));
678 mapping = page->mapping;
679 index = page->index;
680 inode = mapping->host;
681 info = SHMEM_I(inode);
682 if (info->flags & VM_LOCKED)
683 goto redirty;
684 if (!total_swap_pages)
685 goto redirty;
686
687 /*
688 * shmem_backing_dev_info's capabilities prevent regular writeback or
689 * sync from ever calling shmem_writepage; but a stacking filesystem
690 * might use ->writepage of its underlying filesystem, in which case
691 * tmpfs should write out to swap only in response to memory pressure,
692 * and not for the writeback threads or sync.
693 */
694 if (!wbc->for_reclaim) {
695 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
696 goto redirty;
697 }
698 swap = get_swap_page();
699 if (!swap.val)
700 goto redirty;
701
702 /*
703 * Add inode to shmem_unuse()'s list of swapped-out inodes,
704 * if it's not already there. Do it now before the page is
705 * moved to swap cache, when its pagelock no longer protects
706 * the inode from eviction. But don't unlock the mutex until
707 * we've incremented swapped, because shmem_unuse_inode() will
708 * prune a !swapped inode from the swaplist under this mutex.
709 */
710 mutex_lock(&shmem_swaplist_mutex);
711 if (list_empty(&info->swaplist))
712 list_add_tail(&info->swaplist, &shmem_swaplist);
713
714 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
715 swap_shmem_alloc(swap);
716 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
717
718 spin_lock(&info->lock);
719 info->swapped++;
720 shmem_recalc_inode(inode);
721 spin_unlock(&info->lock);
722
723 mutex_unlock(&shmem_swaplist_mutex);
724 BUG_ON(page_mapped(page));
725 swap_writepage(page, wbc);
726 return 0;
727 }
728
729 mutex_unlock(&shmem_swaplist_mutex);
730 swapcache_free(swap, NULL);
731redirty:
732 set_page_dirty(page);
733 if (wbc->for_reclaim)
734 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
735 unlock_page(page);
736 return 0;
737}
738
739#ifdef CONFIG_NUMA
740#ifdef CONFIG_TMPFS
741static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
742{
743 char buffer[64];
744
745 if (!mpol || mpol->mode == MPOL_DEFAULT)
746 return; /* show nothing */
747
748 mpol_to_str(buffer, sizeof(buffer), mpol, 1);
749
750 seq_printf(seq, ",mpol=%s", buffer);
751}
752
753static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
754{
755 struct mempolicy *mpol = NULL;
756 if (sbinfo->mpol) {
757 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
758 mpol = sbinfo->mpol;
759 mpol_get(mpol);
760 spin_unlock(&sbinfo->stat_lock);
761 }
762 return mpol;
763}
764#endif /* CONFIG_TMPFS */
765
766static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
767 struct shmem_inode_info *info, pgoff_t index)
768{
769 struct mempolicy mpol, *spol;
770 struct vm_area_struct pvma;
771
772 spol = mpol_cond_copy(&mpol,
773 mpol_shared_policy_lookup(&info->policy, index));
774
775 /* Create a pseudo vma that just contains the policy */
776 pvma.vm_start = 0;
777 pvma.vm_pgoff = index;
778 pvma.vm_ops = NULL;
779 pvma.vm_policy = spol;
780 return swapin_readahead(swap, gfp, &pvma, 0);
781}
782
783static struct page *shmem_alloc_page(gfp_t gfp,
784 struct shmem_inode_info *info, pgoff_t index)
785{
786 struct vm_area_struct pvma;
787
788 /* Create a pseudo vma that just contains the policy */
789 pvma.vm_start = 0;
790 pvma.vm_pgoff = index;
791 pvma.vm_ops = NULL;
792 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
793
794 /*
795 * alloc_page_vma() will drop the shared policy reference
796 */
797 return alloc_page_vma(gfp, &pvma, 0);
798}
799#else /* !CONFIG_NUMA */
800#ifdef CONFIG_TMPFS
801static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
802{
803}
804#endif /* CONFIG_TMPFS */
805
806static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
807 struct shmem_inode_info *info, pgoff_t index)
808{
809 return swapin_readahead(swap, gfp, NULL, 0);
810}
811
812static inline struct page *shmem_alloc_page(gfp_t gfp,
813 struct shmem_inode_info *info, pgoff_t index)
814{
815 return alloc_page(gfp);
816}
817#endif /* CONFIG_NUMA */
818
819#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
820static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
821{
822 return NULL;
823}
824#endif
825
826/*
827 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
828 *
829 * If we allocate a new one we do not mark it dirty. That's up to the
830 * vm. If we swap it in we mark it dirty since we also free the swap
831 * entry since a page cannot live in both the swap and page cache
832 */
833static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
834 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
835{
836 struct address_space *mapping = inode->i_mapping;
837 struct shmem_inode_info *info;
838 struct shmem_sb_info *sbinfo;
839 struct page *page;
840 swp_entry_t swap;
841 int error;
842 int once = 0;
843
844 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
845 return -EFBIG;
846repeat:
847 swap.val = 0;
848 page = find_lock_page(mapping, index);
849 if (radix_tree_exceptional_entry(page)) {
850 swap = radix_to_swp_entry(page);
851 page = NULL;
852 }
853
854 if (sgp != SGP_WRITE &&
855 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
856 error = -EINVAL;
857 goto failed;
858 }
859
860 if (page || (sgp == SGP_READ && !swap.val)) {
861 /*
862 * Once we can get the page lock, it must be uptodate:
863 * if there were an error in reading back from swap,
864 * the page would not be inserted into the filecache.
865 */
866 BUG_ON(page && !PageUptodate(page));
867 *pagep = page;
868 return 0;
869 }
870
871 /*
872 * Fast cache lookup did not find it:
873 * bring it back from swap or allocate.
874 */
875 info = SHMEM_I(inode);
876 sbinfo = SHMEM_SB(inode->i_sb);
877
878 if (swap.val) {
879 /* Look it up and read it in.. */
880 page = lookup_swap_cache(swap);
881 if (!page) {
882 /* here we actually do the io */
883 if (fault_type)
884 *fault_type |= VM_FAULT_MAJOR;
885 page = shmem_swapin(swap, gfp, info, index);
886 if (!page) {
887 error = -ENOMEM;
888 goto failed;
889 }
890 }
891
892 /* We have to do this with page locked to prevent races */
893 lock_page(page);
894 if (!PageUptodate(page)) {
895 error = -EIO;
896 goto failed;
897 }
898 wait_on_page_writeback(page);
899
900 /* Someone may have already done it for us */
901 if (page->mapping) {
902 if (page->mapping == mapping &&
903 page->index == index)
904 goto done;
905 error = -EEXIST;
906 goto failed;
907 }
908
909 error = mem_cgroup_cache_charge(page, current->mm,
910 gfp & GFP_RECLAIM_MASK);
911 if (!error)
912 error = shmem_add_to_page_cache(page, mapping, index,
913 gfp, swp_to_radix_entry(swap));
914 if (error)
915 goto failed;
916
917 spin_lock(&info->lock);
918 info->swapped--;
919 shmem_recalc_inode(inode);
920 spin_unlock(&info->lock);
921
922 delete_from_swap_cache(page);
923 set_page_dirty(page);
924 swap_free(swap);
925
926 } else {
927 if (shmem_acct_block(info->flags)) {
928 error = -ENOSPC;
929 goto failed;
930 }
931 if (sbinfo->max_blocks) {
932 if (percpu_counter_compare(&sbinfo->used_blocks,
933 sbinfo->max_blocks) >= 0) {
934 error = -ENOSPC;
935 goto unacct;
936 }
937 percpu_counter_inc(&sbinfo->used_blocks);
938 }
939
940 page = shmem_alloc_page(gfp, info, index);
941 if (!page) {
942 error = -ENOMEM;
943 goto decused;
944 }
945
946 SetPageSwapBacked(page);
947 __set_page_locked(page);
948 error = mem_cgroup_cache_charge(page, current->mm,
949 gfp & GFP_RECLAIM_MASK);
950 if (!error)
951 error = shmem_add_to_page_cache(page, mapping, index,
952 gfp, NULL);
953 if (error)
954 goto decused;
955 lru_cache_add_anon(page);
956
957 spin_lock(&info->lock);
958 info->alloced++;
959 inode->i_blocks += BLOCKS_PER_PAGE;
960 shmem_recalc_inode(inode);
961 spin_unlock(&info->lock);
962
963 clear_highpage(page);
964 flush_dcache_page(page);
965 SetPageUptodate(page);
966 if (sgp == SGP_DIRTY)
967 set_page_dirty(page);
968 }
969done:
970 /* Perhaps the file has been truncated since we checked */
971 if (sgp != SGP_WRITE &&
972 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
973 error = -EINVAL;
974 goto trunc;
975 }
976 *pagep = page;
977 return 0;
978
979 /*
980 * Error recovery.
981 */
982trunc:
983 ClearPageDirty(page);
984 delete_from_page_cache(page);
985 spin_lock(&info->lock);
986 info->alloced--;
987 inode->i_blocks -= BLOCKS_PER_PAGE;
988 spin_unlock(&info->lock);
989decused:
990 if (sbinfo->max_blocks)
991 percpu_counter_add(&sbinfo->used_blocks, -1);
992unacct:
993 shmem_unacct_blocks(info->flags, 1);
994failed:
995 if (swap.val && error != -EINVAL) {
996 struct page *test = find_get_page(mapping, index);
997 if (test && !radix_tree_exceptional_entry(test))
998 page_cache_release(test);
999 /* Have another try if the entry has changed */
1000 if (test != swp_to_radix_entry(swap))
1001 error = -EEXIST;
1002 }
1003 if (page) {
1004 unlock_page(page);
1005 page_cache_release(page);
1006 }
1007 if (error == -ENOSPC && !once++) {
1008 info = SHMEM_I(inode);
1009 spin_lock(&info->lock);
1010 shmem_recalc_inode(inode);
1011 spin_unlock(&info->lock);
1012 goto repeat;
1013 }
1014 if (error == -EEXIST)
1015 goto repeat;
1016 return error;
1017}
1018
1019static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1020{
1021 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1022 int error;
1023 int ret = VM_FAULT_LOCKED;
1024
1025 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1026 if (error)
1027 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1028
1029 if (ret & VM_FAULT_MAJOR) {
1030 count_vm_event(PGMAJFAULT);
1031 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1032 }
1033 return ret;
1034}
1035
1036#ifdef CONFIG_NUMA
1037static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1038{
1039 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1040 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1041}
1042
1043static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1044 unsigned long addr)
1045{
1046 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1047 pgoff_t index;
1048
1049 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1050 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1051}
1052#endif
1053
1054int shmem_lock(struct file *file, int lock, struct user_struct *user)
1055{
1056 struct inode *inode = file->f_path.dentry->d_inode;
1057 struct shmem_inode_info *info = SHMEM_I(inode);
1058 int retval = -ENOMEM;
1059
1060 spin_lock(&info->lock);
1061 if (lock && !(info->flags & VM_LOCKED)) {
1062 if (!user_shm_lock(inode->i_size, user))
1063 goto out_nomem;
1064 info->flags |= VM_LOCKED;
1065 mapping_set_unevictable(file->f_mapping);
1066 }
1067 if (!lock && (info->flags & VM_LOCKED) && user) {
1068 user_shm_unlock(inode->i_size, user);
1069 info->flags &= ~VM_LOCKED;
1070 mapping_clear_unevictable(file->f_mapping);
1071 scan_mapping_unevictable_pages(file->f_mapping);
1072 }
1073 retval = 0;
1074
1075out_nomem:
1076 spin_unlock(&info->lock);
1077 return retval;
1078}
1079
1080static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1081{
1082 file_accessed(file);
1083 vma->vm_ops = &shmem_vm_ops;
1084 vma->vm_flags |= VM_CAN_NONLINEAR;
1085 return 0;
1086}
1087
1088static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1089 int mode, dev_t dev, unsigned long flags)
1090{
1091 struct inode *inode;
1092 struct shmem_inode_info *info;
1093 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1094
1095 if (shmem_reserve_inode(sb))
1096 return NULL;
1097
1098 inode = new_inode(sb);
1099 if (inode) {
1100 inode->i_ino = get_next_ino();
1101 inode_init_owner(inode, dir, mode);
1102 inode->i_blocks = 0;
1103 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1104 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1105 inode->i_generation = get_seconds();
1106 info = SHMEM_I(inode);
1107 memset(info, 0, (char *)inode - (char *)info);
1108 spin_lock_init(&info->lock);
1109 info->flags = flags & VM_NORESERVE;
1110 INIT_LIST_HEAD(&info->swaplist);
1111 INIT_LIST_HEAD(&info->xattr_list);
1112 cache_no_acl(inode);
1113
1114 switch (mode & S_IFMT) {
1115 default:
1116 inode->i_op = &shmem_special_inode_operations;
1117 init_special_inode(inode, mode, dev);
1118 break;
1119 case S_IFREG:
1120 inode->i_mapping->a_ops = &shmem_aops;
1121 inode->i_op = &shmem_inode_operations;
1122 inode->i_fop = &shmem_file_operations;
1123 mpol_shared_policy_init(&info->policy,
1124 shmem_get_sbmpol(sbinfo));
1125 break;
1126 case S_IFDIR:
1127 inc_nlink(inode);
1128 /* Some things misbehave if size == 0 on a directory */
1129 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1130 inode->i_op = &shmem_dir_inode_operations;
1131 inode->i_fop = &simple_dir_operations;
1132 break;
1133 case S_IFLNK:
1134 /*
1135 * Must not load anything in the rbtree,
1136 * mpol_free_shared_policy will not be called.
1137 */
1138 mpol_shared_policy_init(&info->policy, NULL);
1139 break;
1140 }
1141 } else
1142 shmem_free_inode(sb);
1143 return inode;
1144}
1145
1146#ifdef CONFIG_TMPFS
1147static const struct inode_operations shmem_symlink_inode_operations;
1148static const struct inode_operations shmem_short_symlink_operations;
1149
1150static int
1151shmem_write_begin(struct file *file, struct address_space *mapping,
1152 loff_t pos, unsigned len, unsigned flags,
1153 struct page **pagep, void **fsdata)
1154{
1155 struct inode *inode = mapping->host;
1156 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1157 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1158}
1159
1160static int
1161shmem_write_end(struct file *file, struct address_space *mapping,
1162 loff_t pos, unsigned len, unsigned copied,
1163 struct page *page, void *fsdata)
1164{
1165 struct inode *inode = mapping->host;
1166
1167 if (pos + copied > inode->i_size)
1168 i_size_write(inode, pos + copied);
1169
1170 set_page_dirty(page);
1171 unlock_page(page);
1172 page_cache_release(page);
1173
1174 return copied;
1175}
1176
1177static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1178{
1179 struct inode *inode = filp->f_path.dentry->d_inode;
1180 struct address_space *mapping = inode->i_mapping;
1181 pgoff_t index;
1182 unsigned long offset;
1183 enum sgp_type sgp = SGP_READ;
1184
1185 /*
1186 * Might this read be for a stacking filesystem? Then when reading
1187 * holes of a sparse file, we actually need to allocate those pages,
1188 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1189 */
1190 if (segment_eq(get_fs(), KERNEL_DS))
1191 sgp = SGP_DIRTY;
1192
1193 index = *ppos >> PAGE_CACHE_SHIFT;
1194 offset = *ppos & ~PAGE_CACHE_MASK;
1195
1196 for (;;) {
1197 struct page *page = NULL;
1198 pgoff_t end_index;
1199 unsigned long nr, ret;
1200 loff_t i_size = i_size_read(inode);
1201
1202 end_index = i_size >> PAGE_CACHE_SHIFT;
1203 if (index > end_index)
1204 break;
1205 if (index == end_index) {
1206 nr = i_size & ~PAGE_CACHE_MASK;
1207 if (nr <= offset)
1208 break;
1209 }
1210
1211 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1212 if (desc->error) {
1213 if (desc->error == -EINVAL)
1214 desc->error = 0;
1215 break;
1216 }
1217 if (page)
1218 unlock_page(page);
1219
1220 /*
1221 * We must evaluate after, since reads (unlike writes)
1222 * are called without i_mutex protection against truncate
1223 */
1224 nr = PAGE_CACHE_SIZE;
1225 i_size = i_size_read(inode);
1226 end_index = i_size >> PAGE_CACHE_SHIFT;
1227 if (index == end_index) {
1228 nr = i_size & ~PAGE_CACHE_MASK;
1229 if (nr <= offset) {
1230 if (page)
1231 page_cache_release(page);
1232 break;
1233 }
1234 }
1235 nr -= offset;
1236
1237 if (page) {
1238 /*
1239 * If users can be writing to this page using arbitrary
1240 * virtual addresses, take care about potential aliasing
1241 * before reading the page on the kernel side.
1242 */
1243 if (mapping_writably_mapped(mapping))
1244 flush_dcache_page(page);
1245 /*
1246 * Mark the page accessed if we read the beginning.
1247 */
1248 if (!offset)
1249 mark_page_accessed(page);
1250 } else {
1251 page = ZERO_PAGE(0);
1252 page_cache_get(page);
1253 }
1254
1255 /*
1256 * Ok, we have the page, and it's up-to-date, so
1257 * now we can copy it to user space...
1258 *
1259 * The actor routine returns how many bytes were actually used..
1260 * NOTE! This may not be the same as how much of a user buffer
1261 * we filled up (we may be padding etc), so we can only update
1262 * "pos" here (the actor routine has to update the user buffer
1263 * pointers and the remaining count).
1264 */
1265 ret = actor(desc, page, offset, nr);
1266 offset += ret;
1267 index += offset >> PAGE_CACHE_SHIFT;
1268 offset &= ~PAGE_CACHE_MASK;
1269
1270 page_cache_release(page);
1271 if (ret != nr || !desc->count)
1272 break;
1273
1274 cond_resched();
1275 }
1276
1277 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1278 file_accessed(filp);
1279}
1280
1281static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1282 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1283{
1284 struct file *filp = iocb->ki_filp;
1285 ssize_t retval;
1286 unsigned long seg;
1287 size_t count;
1288 loff_t *ppos = &iocb->ki_pos;
1289
1290 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1291 if (retval)
1292 return retval;
1293
1294 for (seg = 0; seg < nr_segs; seg++) {
1295 read_descriptor_t desc;
1296
1297 desc.written = 0;
1298 desc.arg.buf = iov[seg].iov_base;
1299 desc.count = iov[seg].iov_len;
1300 if (desc.count == 0)
1301 continue;
1302 desc.error = 0;
1303 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1304 retval += desc.written;
1305 if (desc.error) {
1306 retval = retval ?: desc.error;
1307 break;
1308 }
1309 if (desc.count > 0)
1310 break;
1311 }
1312 return retval;
1313}
1314
1315static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1316 struct pipe_inode_info *pipe, size_t len,
1317 unsigned int flags)
1318{
1319 struct address_space *mapping = in->f_mapping;
1320 struct inode *inode = mapping->host;
1321 unsigned int loff, nr_pages, req_pages;
1322 struct page *pages[PIPE_DEF_BUFFERS];
1323 struct partial_page partial[PIPE_DEF_BUFFERS];
1324 struct page *page;
1325 pgoff_t index, end_index;
1326 loff_t isize, left;
1327 int error, page_nr;
1328 struct splice_pipe_desc spd = {
1329 .pages = pages,
1330 .partial = partial,
1331 .flags = flags,
1332 .ops = &page_cache_pipe_buf_ops,
1333 .spd_release = spd_release_page,
1334 };
1335
1336 isize = i_size_read(inode);
1337 if (unlikely(*ppos >= isize))
1338 return 0;
1339
1340 left = isize - *ppos;
1341 if (unlikely(left < len))
1342 len = left;
1343
1344 if (splice_grow_spd(pipe, &spd))
1345 return -ENOMEM;
1346
1347 index = *ppos >> PAGE_CACHE_SHIFT;
1348 loff = *ppos & ~PAGE_CACHE_MASK;
1349 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1350 nr_pages = min(req_pages, pipe->buffers);
1351
1352 spd.nr_pages = find_get_pages_contig(mapping, index,
1353 nr_pages, spd.pages);
1354 index += spd.nr_pages;
1355 error = 0;
1356
1357 while (spd.nr_pages < nr_pages) {
1358 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1359 if (error)
1360 break;
1361 unlock_page(page);
1362 spd.pages[spd.nr_pages++] = page;
1363 index++;
1364 }
1365
1366 index = *ppos >> PAGE_CACHE_SHIFT;
1367 nr_pages = spd.nr_pages;
1368 spd.nr_pages = 0;
1369
1370 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1371 unsigned int this_len;
1372
1373 if (!len)
1374 break;
1375
1376 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1377 page = spd.pages[page_nr];
1378
1379 if (!PageUptodate(page) || page->mapping != mapping) {
1380 error = shmem_getpage(inode, index, &page,
1381 SGP_CACHE, NULL);
1382 if (error)
1383 break;
1384 unlock_page(page);
1385 page_cache_release(spd.pages[page_nr]);
1386 spd.pages[page_nr] = page;
1387 }
1388
1389 isize = i_size_read(inode);
1390 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1391 if (unlikely(!isize || index > end_index))
1392 break;
1393
1394 if (end_index == index) {
1395 unsigned int plen;
1396
1397 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1398 if (plen <= loff)
1399 break;
1400
1401 this_len = min(this_len, plen - loff);
1402 len = this_len;
1403 }
1404
1405 spd.partial[page_nr].offset = loff;
1406 spd.partial[page_nr].len = this_len;
1407 len -= this_len;
1408 loff = 0;
1409 spd.nr_pages++;
1410 index++;
1411 }
1412
1413 while (page_nr < nr_pages)
1414 page_cache_release(spd.pages[page_nr++]);
1415
1416 if (spd.nr_pages)
1417 error = splice_to_pipe(pipe, &spd);
1418
1419 splice_shrink_spd(pipe, &spd);
1420
1421 if (error > 0) {
1422 *ppos += error;
1423 file_accessed(in);
1424 }
1425 return error;
1426}
1427
1428static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1429{
1430 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1431
1432 buf->f_type = TMPFS_MAGIC;
1433 buf->f_bsize = PAGE_CACHE_SIZE;
1434 buf->f_namelen = NAME_MAX;
1435 if (sbinfo->max_blocks) {
1436 buf->f_blocks = sbinfo->max_blocks;
1437 buf->f_bavail =
1438 buf->f_bfree = sbinfo->max_blocks -
1439 percpu_counter_sum(&sbinfo->used_blocks);
1440 }
1441 if (sbinfo->max_inodes) {
1442 buf->f_files = sbinfo->max_inodes;
1443 buf->f_ffree = sbinfo->free_inodes;
1444 }
1445 /* else leave those fields 0 like simple_statfs */
1446 return 0;
1447}
1448
1449/*
1450 * File creation. Allocate an inode, and we're done..
1451 */
1452static int
1453shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1454{
1455 struct inode *inode;
1456 int error = -ENOSPC;
1457
1458 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1459 if (inode) {
1460 error = security_inode_init_security(inode, dir,
1461 &dentry->d_name, NULL,
1462 NULL, NULL);
1463 if (error) {
1464 if (error != -EOPNOTSUPP) {
1465 iput(inode);
1466 return error;
1467 }
1468 }
1469#ifdef CONFIG_TMPFS_POSIX_ACL
1470 error = generic_acl_init(inode, dir);
1471 if (error) {
1472 iput(inode);
1473 return error;
1474 }
1475#else
1476 error = 0;
1477#endif
1478 dir->i_size += BOGO_DIRENT_SIZE;
1479 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1480 d_instantiate(dentry, inode);
1481 dget(dentry); /* Extra count - pin the dentry in core */
1482 }
1483 return error;
1484}
1485
1486static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1487{
1488 int error;
1489
1490 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1491 return error;
1492 inc_nlink(dir);
1493 return 0;
1494}
1495
1496static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1497 struct nameidata *nd)
1498{
1499 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1500}
1501
1502/*
1503 * Link a file..
1504 */
1505static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1506{
1507 struct inode *inode = old_dentry->d_inode;
1508 int ret;
1509
1510 /*
1511 * No ordinary (disk based) filesystem counts links as inodes;
1512 * but each new link needs a new dentry, pinning lowmem, and
1513 * tmpfs dentries cannot be pruned until they are unlinked.
1514 */
1515 ret = shmem_reserve_inode(inode->i_sb);
1516 if (ret)
1517 goto out;
1518
1519 dir->i_size += BOGO_DIRENT_SIZE;
1520 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1521 inc_nlink(inode);
1522 ihold(inode); /* New dentry reference */
1523 dget(dentry); /* Extra pinning count for the created dentry */
1524 d_instantiate(dentry, inode);
1525out:
1526 return ret;
1527}
1528
1529static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1530{
1531 struct inode *inode = dentry->d_inode;
1532
1533 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1534 shmem_free_inode(inode->i_sb);
1535
1536 dir->i_size -= BOGO_DIRENT_SIZE;
1537 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1538 drop_nlink(inode);
1539 dput(dentry); /* Undo the count from "create" - this does all the work */
1540 return 0;
1541}
1542
1543static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1544{
1545 if (!simple_empty(dentry))
1546 return -ENOTEMPTY;
1547
1548 drop_nlink(dentry->d_inode);
1549 drop_nlink(dir);
1550 return shmem_unlink(dir, dentry);
1551}
1552
1553/*
1554 * The VFS layer already does all the dentry stuff for rename,
1555 * we just have to decrement the usage count for the target if
1556 * it exists so that the VFS layer correctly free's it when it
1557 * gets overwritten.
1558 */
1559static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1560{
1561 struct inode *inode = old_dentry->d_inode;
1562 int they_are_dirs = S_ISDIR(inode->i_mode);
1563
1564 if (!simple_empty(new_dentry))
1565 return -ENOTEMPTY;
1566
1567 if (new_dentry->d_inode) {
1568 (void) shmem_unlink(new_dir, new_dentry);
1569 if (they_are_dirs)
1570 drop_nlink(old_dir);
1571 } else if (they_are_dirs) {
1572 drop_nlink(old_dir);
1573 inc_nlink(new_dir);
1574 }
1575
1576 old_dir->i_size -= BOGO_DIRENT_SIZE;
1577 new_dir->i_size += BOGO_DIRENT_SIZE;
1578 old_dir->i_ctime = old_dir->i_mtime =
1579 new_dir->i_ctime = new_dir->i_mtime =
1580 inode->i_ctime = CURRENT_TIME;
1581 return 0;
1582}
1583
1584static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1585{
1586 int error;
1587 int len;
1588 struct inode *inode;
1589 struct page *page;
1590 char *kaddr;
1591 struct shmem_inode_info *info;
1592
1593 len = strlen(symname) + 1;
1594 if (len > PAGE_CACHE_SIZE)
1595 return -ENAMETOOLONG;
1596
1597 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1598 if (!inode)
1599 return -ENOSPC;
1600
1601 error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
1602 NULL, NULL);
1603 if (error) {
1604 if (error != -EOPNOTSUPP) {
1605 iput(inode);
1606 return error;
1607 }
1608 error = 0;
1609 }
1610
1611 info = SHMEM_I(inode);
1612 inode->i_size = len-1;
1613 if (len <= SHORT_SYMLINK_LEN) {
1614 info->symlink = kmemdup(symname, len, GFP_KERNEL);
1615 if (!info->symlink) {
1616 iput(inode);
1617 return -ENOMEM;
1618 }
1619 inode->i_op = &shmem_short_symlink_operations;
1620 } else {
1621 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1622 if (error) {
1623 iput(inode);
1624 return error;
1625 }
1626 inode->i_mapping->a_ops = &shmem_aops;
1627 inode->i_op = &shmem_symlink_inode_operations;
1628 kaddr = kmap_atomic(page, KM_USER0);
1629 memcpy(kaddr, symname, len);
1630 kunmap_atomic(kaddr, KM_USER0);
1631 set_page_dirty(page);
1632 unlock_page(page);
1633 page_cache_release(page);
1634 }
1635 dir->i_size += BOGO_DIRENT_SIZE;
1636 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1637 d_instantiate(dentry, inode);
1638 dget(dentry);
1639 return 0;
1640}
1641
1642static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
1643{
1644 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
1645 return NULL;
1646}
1647
1648static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1649{
1650 struct page *page = NULL;
1651 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1652 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
1653 if (page)
1654 unlock_page(page);
1655 return page;
1656}
1657
1658static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1659{
1660 if (!IS_ERR(nd_get_link(nd))) {
1661 struct page *page = cookie;
1662 kunmap(page);
1663 mark_page_accessed(page);
1664 page_cache_release(page);
1665 }
1666}
1667
1668#ifdef CONFIG_TMPFS_XATTR
1669/*
1670 * Superblocks without xattr inode operations may get some security.* xattr
1671 * support from the LSM "for free". As soon as we have any other xattrs
1672 * like ACLs, we also need to implement the security.* handlers at
1673 * filesystem level, though.
1674 */
1675
1676static int shmem_xattr_get(struct dentry *dentry, const char *name,
1677 void *buffer, size_t size)
1678{
1679 struct shmem_inode_info *info;
1680 struct shmem_xattr *xattr;
1681 int ret = -ENODATA;
1682
1683 info = SHMEM_I(dentry->d_inode);
1684
1685 spin_lock(&info->lock);
1686 list_for_each_entry(xattr, &info->xattr_list, list) {
1687 if (strcmp(name, xattr->name))
1688 continue;
1689
1690 ret = xattr->size;
1691 if (buffer) {
1692 if (size < xattr->size)
1693 ret = -ERANGE;
1694 else
1695 memcpy(buffer, xattr->value, xattr->size);
1696 }
1697 break;
1698 }
1699 spin_unlock(&info->lock);
1700 return ret;
1701}
1702
1703static int shmem_xattr_set(struct dentry *dentry, const char *name,
1704 const void *value, size_t size, int flags)
1705{
1706 struct inode *inode = dentry->d_inode;
1707 struct shmem_inode_info *info = SHMEM_I(inode);
1708 struct shmem_xattr *xattr;
1709 struct shmem_xattr *new_xattr = NULL;
1710 size_t len;
1711 int err = 0;
1712
1713 /* value == NULL means remove */
1714 if (value) {
1715 /* wrap around? */
1716 len = sizeof(*new_xattr) + size;
1717 if (len <= sizeof(*new_xattr))
1718 return -ENOMEM;
1719
1720 new_xattr = kmalloc(len, GFP_KERNEL);
1721 if (!new_xattr)
1722 return -ENOMEM;
1723
1724 new_xattr->name = kstrdup(name, GFP_KERNEL);
1725 if (!new_xattr->name) {
1726 kfree(new_xattr);
1727 return -ENOMEM;
1728 }
1729
1730 new_xattr->size = size;
1731 memcpy(new_xattr->value, value, size);
1732 }
1733
1734 spin_lock(&info->lock);
1735 list_for_each_entry(xattr, &info->xattr_list, list) {
1736 if (!strcmp(name, xattr->name)) {
1737 if (flags & XATTR_CREATE) {
1738 xattr = new_xattr;
1739 err = -EEXIST;
1740 } else if (new_xattr) {
1741 list_replace(&xattr->list, &new_xattr->list);
1742 } else {
1743 list_del(&xattr->list);
1744 }
1745 goto out;
1746 }
1747 }
1748 if (flags & XATTR_REPLACE) {
1749 xattr = new_xattr;
1750 err = -ENODATA;
1751 } else {
1752 list_add(&new_xattr->list, &info->xattr_list);
1753 xattr = NULL;
1754 }
1755out:
1756 spin_unlock(&info->lock);
1757 if (xattr)
1758 kfree(xattr->name);
1759 kfree(xattr);
1760 return err;
1761}
1762
1763static const struct xattr_handler *shmem_xattr_handlers[] = {
1764#ifdef CONFIG_TMPFS_POSIX_ACL
1765 &generic_acl_access_handler,
1766 &generic_acl_default_handler,
1767#endif
1768 NULL
1769};
1770
1771static int shmem_xattr_validate(const char *name)
1772{
1773 struct { const char *prefix; size_t len; } arr[] = {
1774 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
1775 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
1776 };
1777 int i;
1778
1779 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1780 size_t preflen = arr[i].len;
1781 if (strncmp(name, arr[i].prefix, preflen) == 0) {
1782 if (!name[preflen])
1783 return -EINVAL;
1784 return 0;
1785 }
1786 }
1787 return -EOPNOTSUPP;
1788}
1789
1790static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
1791 void *buffer, size_t size)
1792{
1793 int err;
1794
1795 /*
1796 * If this is a request for a synthetic attribute in the system.*
1797 * namespace use the generic infrastructure to resolve a handler
1798 * for it via sb->s_xattr.
1799 */
1800 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1801 return generic_getxattr(dentry, name, buffer, size);
1802
1803 err = shmem_xattr_validate(name);
1804 if (err)
1805 return err;
1806
1807 return shmem_xattr_get(dentry, name, buffer, size);
1808}
1809
1810static int shmem_setxattr(struct dentry *dentry, const char *name,
1811 const void *value, size_t size, int flags)
1812{
1813 int err;
1814
1815 /*
1816 * If this is a request for a synthetic attribute in the system.*
1817 * namespace use the generic infrastructure to resolve a handler
1818 * for it via sb->s_xattr.
1819 */
1820 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1821 return generic_setxattr(dentry, name, value, size, flags);
1822
1823 err = shmem_xattr_validate(name);
1824 if (err)
1825 return err;
1826
1827 if (size == 0)
1828 value = ""; /* empty EA, do not remove */
1829
1830 return shmem_xattr_set(dentry, name, value, size, flags);
1831
1832}
1833
1834static int shmem_removexattr(struct dentry *dentry, const char *name)
1835{
1836 int err;
1837
1838 /*
1839 * If this is a request for a synthetic attribute in the system.*
1840 * namespace use the generic infrastructure to resolve a handler
1841 * for it via sb->s_xattr.
1842 */
1843 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1844 return generic_removexattr(dentry, name);
1845
1846 err = shmem_xattr_validate(name);
1847 if (err)
1848 return err;
1849
1850 return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
1851}
1852
1853static bool xattr_is_trusted(const char *name)
1854{
1855 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
1856}
1857
1858static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
1859{
1860 bool trusted = capable(CAP_SYS_ADMIN);
1861 struct shmem_xattr *xattr;
1862 struct shmem_inode_info *info;
1863 size_t used = 0;
1864
1865 info = SHMEM_I(dentry->d_inode);
1866
1867 spin_lock(&info->lock);
1868 list_for_each_entry(xattr, &info->xattr_list, list) {
1869 size_t len;
1870
1871 /* skip "trusted." attributes for unprivileged callers */
1872 if (!trusted && xattr_is_trusted(xattr->name))
1873 continue;
1874
1875 len = strlen(xattr->name) + 1;
1876 used += len;
1877 if (buffer) {
1878 if (size < used) {
1879 used = -ERANGE;
1880 break;
1881 }
1882 memcpy(buffer, xattr->name, len);
1883 buffer += len;
1884 }
1885 }
1886 spin_unlock(&info->lock);
1887
1888 return used;
1889}
1890#endif /* CONFIG_TMPFS_XATTR */
1891
1892static const struct inode_operations shmem_short_symlink_operations = {
1893 .readlink = generic_readlink,
1894 .follow_link = shmem_follow_short_symlink,
1895#ifdef CONFIG_TMPFS_XATTR
1896 .setxattr = shmem_setxattr,
1897 .getxattr = shmem_getxattr,
1898 .listxattr = shmem_listxattr,
1899 .removexattr = shmem_removexattr,
1900#endif
1901};
1902
1903static const struct inode_operations shmem_symlink_inode_operations = {
1904 .readlink = generic_readlink,
1905 .follow_link = shmem_follow_link,
1906 .put_link = shmem_put_link,
1907#ifdef CONFIG_TMPFS_XATTR
1908 .setxattr = shmem_setxattr,
1909 .getxattr = shmem_getxattr,
1910 .listxattr = shmem_listxattr,
1911 .removexattr = shmem_removexattr,
1912#endif
1913};
1914
1915static struct dentry *shmem_get_parent(struct dentry *child)
1916{
1917 return ERR_PTR(-ESTALE);
1918}
1919
1920static int shmem_match(struct inode *ino, void *vfh)
1921{
1922 __u32 *fh = vfh;
1923 __u64 inum = fh[2];
1924 inum = (inum << 32) | fh[1];
1925 return ino->i_ino == inum && fh[0] == ino->i_generation;
1926}
1927
1928static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
1929 struct fid *fid, int fh_len, int fh_type)
1930{
1931 struct inode *inode;
1932 struct dentry *dentry = NULL;
1933 u64 inum = fid->raw[2];
1934 inum = (inum << 32) | fid->raw[1];
1935
1936 if (fh_len < 3)
1937 return NULL;
1938
1939 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
1940 shmem_match, fid->raw);
1941 if (inode) {
1942 dentry = d_find_alias(inode);
1943 iput(inode);
1944 }
1945
1946 return dentry;
1947}
1948
1949static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
1950 int connectable)
1951{
1952 struct inode *inode = dentry->d_inode;
1953
1954 if (*len < 3) {
1955 *len = 3;
1956 return 255;
1957 }
1958
1959 if (inode_unhashed(inode)) {
1960 /* Unfortunately insert_inode_hash is not idempotent,
1961 * so as we hash inodes here rather than at creation
1962 * time, we need a lock to ensure we only try
1963 * to do it once
1964 */
1965 static DEFINE_SPINLOCK(lock);
1966 spin_lock(&lock);
1967 if (inode_unhashed(inode))
1968 __insert_inode_hash(inode,
1969 inode->i_ino + inode->i_generation);
1970 spin_unlock(&lock);
1971 }
1972
1973 fh[0] = inode->i_generation;
1974 fh[1] = inode->i_ino;
1975 fh[2] = ((__u64)inode->i_ino) >> 32;
1976
1977 *len = 3;
1978 return 1;
1979}
1980
1981static const struct export_operations shmem_export_ops = {
1982 .get_parent = shmem_get_parent,
1983 .encode_fh = shmem_encode_fh,
1984 .fh_to_dentry = shmem_fh_to_dentry,
1985};
1986
1987static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
1988 bool remount)
1989{
1990 char *this_char, *value, *rest;
1991
1992 while (options != NULL) {
1993 this_char = options;
1994 for (;;) {
1995 /*
1996 * NUL-terminate this option: unfortunately,
1997 * mount options form a comma-separated list,
1998 * but mpol's nodelist may also contain commas.
1999 */
2000 options = strchr(options, ',');
2001 if (options == NULL)
2002 break;
2003 options++;
2004 if (!isdigit(*options)) {
2005 options[-1] = '\0';
2006 break;
2007 }
2008 }
2009 if (!*this_char)
2010 continue;
2011 if ((value = strchr(this_char,'=')) != NULL) {
2012 *value++ = 0;
2013 } else {
2014 printk(KERN_ERR
2015 "tmpfs: No value for mount option '%s'\n",
2016 this_char);
2017 return 1;
2018 }
2019
2020 if (!strcmp(this_char,"size")) {
2021 unsigned long long size;
2022 size = memparse(value,&rest);
2023 if (*rest == '%') {
2024 size <<= PAGE_SHIFT;
2025 size *= totalram_pages;
2026 do_div(size, 100);
2027 rest++;
2028 }
2029 if (*rest)
2030 goto bad_val;
2031 sbinfo->max_blocks =
2032 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2033 } else if (!strcmp(this_char,"nr_blocks")) {
2034 sbinfo->max_blocks = memparse(value, &rest);
2035 if (*rest)
2036 goto bad_val;
2037 } else if (!strcmp(this_char,"nr_inodes")) {
2038 sbinfo->max_inodes = memparse(value, &rest);
2039 if (*rest)
2040 goto bad_val;
2041 } else if (!strcmp(this_char,"mode")) {
2042 if (remount)
2043 continue;
2044 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2045 if (*rest)
2046 goto bad_val;
2047 } else if (!strcmp(this_char,"uid")) {
2048 if (remount)
2049 continue;
2050 sbinfo->uid = simple_strtoul(value, &rest, 0);
2051 if (*rest)
2052 goto bad_val;
2053 } else if (!strcmp(this_char,"gid")) {
2054 if (remount)
2055 continue;
2056 sbinfo->gid = simple_strtoul(value, &rest, 0);
2057 if (*rest)
2058 goto bad_val;
2059 } else if (!strcmp(this_char,"mpol")) {
2060 if (mpol_parse_str(value, &sbinfo->mpol, 1))
2061 goto bad_val;
2062 } else {
2063 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2064 this_char);
2065 return 1;
2066 }
2067 }
2068 return 0;
2069
2070bad_val:
2071 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2072 value, this_char);
2073 return 1;
2074
2075}
2076
2077static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2078{
2079 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2080 struct shmem_sb_info config = *sbinfo;
2081 unsigned long inodes;
2082 int error = -EINVAL;
2083
2084 if (shmem_parse_options(data, &config, true))
2085 return error;
2086
2087 spin_lock(&sbinfo->stat_lock);
2088 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2089 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2090 goto out;
2091 if (config.max_inodes < inodes)
2092 goto out;
2093 /*
2094 * Those tests disallow limited->unlimited while any are in use;
2095 * but we must separately disallow unlimited->limited, because
2096 * in that case we have no record of how much is already in use.
2097 */
2098 if (config.max_blocks && !sbinfo->max_blocks)
2099 goto out;
2100 if (config.max_inodes && !sbinfo->max_inodes)
2101 goto out;
2102
2103 error = 0;
2104 sbinfo->max_blocks = config.max_blocks;
2105 sbinfo->max_inodes = config.max_inodes;
2106 sbinfo->free_inodes = config.max_inodes - inodes;
2107
2108 mpol_put(sbinfo->mpol);
2109 sbinfo->mpol = config.mpol; /* transfers initial ref */
2110out:
2111 spin_unlock(&sbinfo->stat_lock);
2112 return error;
2113}
2114
2115static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2116{
2117 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2118
2119 if (sbinfo->max_blocks != shmem_default_max_blocks())
2120 seq_printf(seq, ",size=%luk",
2121 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2122 if (sbinfo->max_inodes != shmem_default_max_inodes())
2123 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2124 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2125 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2126 if (sbinfo->uid != 0)
2127 seq_printf(seq, ",uid=%u", sbinfo->uid);
2128 if (sbinfo->gid != 0)
2129 seq_printf(seq, ",gid=%u", sbinfo->gid);
2130 shmem_show_mpol(seq, sbinfo->mpol);
2131 return 0;
2132}
2133#endif /* CONFIG_TMPFS */
2134
2135static void shmem_put_super(struct super_block *sb)
2136{
2137 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2138
2139 percpu_counter_destroy(&sbinfo->used_blocks);
2140 kfree(sbinfo);
2141 sb->s_fs_info = NULL;
2142}
2143
2144int shmem_fill_super(struct super_block *sb, void *data, int silent)
2145{
2146 struct inode *inode;
2147 struct dentry *root;
2148 struct shmem_sb_info *sbinfo;
2149 int err = -ENOMEM;
2150
2151 /* Round up to L1_CACHE_BYTES to resist false sharing */
2152 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2153 L1_CACHE_BYTES), GFP_KERNEL);
2154 if (!sbinfo)
2155 return -ENOMEM;
2156
2157 sbinfo->mode = S_IRWXUGO | S_ISVTX;
2158 sbinfo->uid = current_fsuid();
2159 sbinfo->gid = current_fsgid();
2160 sb->s_fs_info = sbinfo;
2161
2162#ifdef CONFIG_TMPFS
2163 /*
2164 * Per default we only allow half of the physical ram per
2165 * tmpfs instance, limiting inodes to one per page of lowmem;
2166 * but the internal instance is left unlimited.
2167 */
2168 if (!(sb->s_flags & MS_NOUSER)) {
2169 sbinfo->max_blocks = shmem_default_max_blocks();
2170 sbinfo->max_inodes = shmem_default_max_inodes();
2171 if (shmem_parse_options(data, sbinfo, false)) {
2172 err = -EINVAL;
2173 goto failed;
2174 }
2175 }
2176 sb->s_export_op = &shmem_export_ops;
2177#else
2178 sb->s_flags |= MS_NOUSER;
2179#endif
2180
2181 spin_lock_init(&sbinfo->stat_lock);
2182 if (percpu_counter_init(&sbinfo->used_blocks, 0))
2183 goto failed;
2184 sbinfo->free_inodes = sbinfo->max_inodes;
2185
2186 sb->s_maxbytes = MAX_LFS_FILESIZE;
2187 sb->s_blocksize = PAGE_CACHE_SIZE;
2188 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2189 sb->s_magic = TMPFS_MAGIC;
2190 sb->s_op = &shmem_ops;
2191 sb->s_time_gran = 1;
2192#ifdef CONFIG_TMPFS_XATTR
2193 sb->s_xattr = shmem_xattr_handlers;
2194#endif
2195#ifdef CONFIG_TMPFS_POSIX_ACL
2196 sb->s_flags |= MS_POSIXACL;
2197#endif
2198
2199 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2200 if (!inode)
2201 goto failed;
2202 inode->i_uid = sbinfo->uid;
2203 inode->i_gid = sbinfo->gid;
2204 root = d_alloc_root(inode);
2205 if (!root)
2206 goto failed_iput;
2207 sb->s_root = root;
2208 return 0;
2209
2210failed_iput:
2211 iput(inode);
2212failed:
2213 shmem_put_super(sb);
2214 return err;
2215}
2216
2217static struct kmem_cache *shmem_inode_cachep;
2218
2219static struct inode *shmem_alloc_inode(struct super_block *sb)
2220{
2221 struct shmem_inode_info *info;
2222 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2223 if (!info)
2224 return NULL;
2225 return &info->vfs_inode;
2226}
2227
2228static void shmem_destroy_callback(struct rcu_head *head)
2229{
2230 struct inode *inode = container_of(head, struct inode, i_rcu);
2231 INIT_LIST_HEAD(&inode->i_dentry);
2232 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2233}
2234
2235static void shmem_destroy_inode(struct inode *inode)
2236{
2237 if ((inode->i_mode & S_IFMT) == S_IFREG)
2238 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2239 call_rcu(&inode->i_rcu, shmem_destroy_callback);
2240}
2241
2242static void shmem_init_inode(void *foo)
2243{
2244 struct shmem_inode_info *info = foo;
2245 inode_init_once(&info->vfs_inode);
2246}
2247
2248static int shmem_init_inodecache(void)
2249{
2250 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2251 sizeof(struct shmem_inode_info),
2252 0, SLAB_PANIC, shmem_init_inode);
2253 return 0;
2254}
2255
2256static void shmem_destroy_inodecache(void)
2257{
2258 kmem_cache_destroy(shmem_inode_cachep);
2259}
2260
2261static const struct address_space_operations shmem_aops = {
2262 .writepage = shmem_writepage,
2263 .set_page_dirty = __set_page_dirty_no_writeback,
2264#ifdef CONFIG_TMPFS
2265 .write_begin = shmem_write_begin,
2266 .write_end = shmem_write_end,
2267#endif
2268 .migratepage = migrate_page,
2269 .error_remove_page = generic_error_remove_page,
2270};
2271
2272static const struct file_operations shmem_file_operations = {
2273 .mmap = shmem_mmap,
2274#ifdef CONFIG_TMPFS
2275 .llseek = generic_file_llseek,
2276 .read = do_sync_read,
2277 .write = do_sync_write,
2278 .aio_read = shmem_file_aio_read,
2279 .aio_write = generic_file_aio_write,
2280 .fsync = noop_fsync,
2281 .splice_read = shmem_file_splice_read,
2282 .splice_write = generic_file_splice_write,
2283#endif
2284};
2285
2286static const struct inode_operations shmem_inode_operations = {
2287 .setattr = shmem_setattr,
2288 .truncate_range = shmem_truncate_range,
2289#ifdef CONFIG_TMPFS_XATTR
2290 .setxattr = shmem_setxattr,
2291 .getxattr = shmem_getxattr,
2292 .listxattr = shmem_listxattr,
2293 .removexattr = shmem_removexattr,
2294#endif
2295};
2296
2297static const struct inode_operations shmem_dir_inode_operations = {
2298#ifdef CONFIG_TMPFS
2299 .create = shmem_create,
2300 .lookup = simple_lookup,
2301 .link = shmem_link,
2302 .unlink = shmem_unlink,
2303 .symlink = shmem_symlink,
2304 .mkdir = shmem_mkdir,
2305 .rmdir = shmem_rmdir,
2306 .mknod = shmem_mknod,
2307 .rename = shmem_rename,
2308#endif
2309#ifdef CONFIG_TMPFS_XATTR
2310 .setxattr = shmem_setxattr,
2311 .getxattr = shmem_getxattr,
2312 .listxattr = shmem_listxattr,
2313 .removexattr = shmem_removexattr,
2314#endif
2315#ifdef CONFIG_TMPFS_POSIX_ACL
2316 .setattr = shmem_setattr,
2317#endif
2318};
2319
2320static const struct inode_operations shmem_special_inode_operations = {
2321#ifdef CONFIG_TMPFS_XATTR
2322 .setxattr = shmem_setxattr,
2323 .getxattr = shmem_getxattr,
2324 .listxattr = shmem_listxattr,
2325 .removexattr = shmem_removexattr,
2326#endif
2327#ifdef CONFIG_TMPFS_POSIX_ACL
2328 .setattr = shmem_setattr,
2329#endif
2330};
2331
2332static const struct super_operations shmem_ops = {
2333 .alloc_inode = shmem_alloc_inode,
2334 .destroy_inode = shmem_destroy_inode,
2335#ifdef CONFIG_TMPFS
2336 .statfs = shmem_statfs,
2337 .remount_fs = shmem_remount_fs,
2338 .show_options = shmem_show_options,
2339#endif
2340 .evict_inode = shmem_evict_inode,
2341 .drop_inode = generic_delete_inode,
2342 .put_super = shmem_put_super,
2343};
2344
2345static const struct vm_operations_struct shmem_vm_ops = {
2346 .fault = shmem_fault,
2347#ifdef CONFIG_NUMA
2348 .set_policy = shmem_set_policy,
2349 .get_policy = shmem_get_policy,
2350#endif
2351};
2352
2353static struct dentry *shmem_mount(struct file_system_type *fs_type,
2354 int flags, const char *dev_name, void *data)
2355{
2356 return mount_nodev(fs_type, flags, data, shmem_fill_super);
2357}
2358
2359static struct file_system_type shmem_fs_type = {
2360 .owner = THIS_MODULE,
2361 .name = "tmpfs",
2362 .mount = shmem_mount,
2363 .kill_sb = kill_litter_super,
2364};
2365
2366int __init shmem_init(void)
2367{
2368 int error;
2369
2370 error = bdi_init(&shmem_backing_dev_info);
2371 if (error)
2372 goto out4;
2373
2374 error = shmem_init_inodecache();
2375 if (error)
2376 goto out3;
2377
2378 error = register_filesystem(&shmem_fs_type);
2379 if (error) {
2380 printk(KERN_ERR "Could not register tmpfs\n");
2381 goto out2;
2382 }
2383
2384 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2385 shmem_fs_type.name, NULL);
2386 if (IS_ERR(shm_mnt)) {
2387 error = PTR_ERR(shm_mnt);
2388 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2389 goto out1;
2390 }
2391 return 0;
2392
2393out1:
2394 unregister_filesystem(&shmem_fs_type);
2395out2:
2396 shmem_destroy_inodecache();
2397out3:
2398 bdi_destroy(&shmem_backing_dev_info);
2399out4:
2400 shm_mnt = ERR_PTR(error);
2401 return error;
2402}
2403
2404#else /* !CONFIG_SHMEM */
2405
2406/*
2407 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2408 *
2409 * This is intended for small system where the benefits of the full
2410 * shmem code (swap-backed and resource-limited) are outweighed by
2411 * their complexity. On systems without swap this code should be
2412 * effectively equivalent, but much lighter weight.
2413 */
2414
2415#include <linux/ramfs.h>
2416
2417static struct file_system_type shmem_fs_type = {
2418 .name = "tmpfs",
2419 .mount = ramfs_mount,
2420 .kill_sb = kill_litter_super,
2421};
2422
2423int __init shmem_init(void)
2424{
2425 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
2426
2427 shm_mnt = kern_mount(&shmem_fs_type);
2428 BUG_ON(IS_ERR(shm_mnt));
2429
2430 return 0;
2431}
2432
2433int shmem_unuse(swp_entry_t swap, struct page *page)
2434{
2435 return 0;
2436}
2437
2438int shmem_lock(struct file *file, int lock, struct user_struct *user)
2439{
2440 return 0;
2441}
2442
2443void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2444{
2445 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
2446}
2447EXPORT_SYMBOL_GPL(shmem_truncate_range);
2448
2449#define shmem_vm_ops generic_file_vm_ops
2450#define shmem_file_operations ramfs_file_operations
2451#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
2452#define shmem_acct_size(flags, size) 0
2453#define shmem_unacct_size(flags, size) do {} while (0)
2454
2455#endif /* CONFIG_SHMEM */
2456
2457/* common code */
2458
2459/**
2460 * shmem_file_setup - get an unlinked file living in tmpfs
2461 * @name: name for dentry (to be seen in /proc/<pid>/maps
2462 * @size: size to be set for the file
2463 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2464 */
2465struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2466{
2467 int error;
2468 struct file *file;
2469 struct inode *inode;
2470 struct path path;
2471 struct dentry *root;
2472 struct qstr this;
2473
2474 if (IS_ERR(shm_mnt))
2475 return (void *)shm_mnt;
2476
2477 if (size < 0 || size > MAX_LFS_FILESIZE)
2478 return ERR_PTR(-EINVAL);
2479
2480 if (shmem_acct_size(flags, size))
2481 return ERR_PTR(-ENOMEM);
2482
2483 error = -ENOMEM;
2484 this.name = name;
2485 this.len = strlen(name);
2486 this.hash = 0; /* will go */
2487 root = shm_mnt->mnt_root;
2488 path.dentry = d_alloc(root, &this);
2489 if (!path.dentry)
2490 goto put_memory;
2491 path.mnt = mntget(shm_mnt);
2492
2493 error = -ENOSPC;
2494 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
2495 if (!inode)
2496 goto put_dentry;
2497
2498 d_instantiate(path.dentry, inode);
2499 inode->i_size = size;
2500 inode->i_nlink = 0; /* It is unlinked */
2501#ifndef CONFIG_MMU
2502 error = ramfs_nommu_expand_for_mapping(inode, size);
2503 if (error)
2504 goto put_dentry;
2505#endif
2506
2507 error = -ENFILE;
2508 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
2509 &shmem_file_operations);
2510 if (!file)
2511 goto put_dentry;
2512
2513 return file;
2514
2515put_dentry:
2516 path_put(&path);
2517put_memory:
2518 shmem_unacct_size(flags, size);
2519 return ERR_PTR(error);
2520}
2521EXPORT_SYMBOL_GPL(shmem_file_setup);
2522
2523/**
2524 * shmem_zero_setup - setup a shared anonymous mapping
2525 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2526 */
2527int shmem_zero_setup(struct vm_area_struct *vma)
2528{
2529 struct file *file;
2530 loff_t size = vma->vm_end - vma->vm_start;
2531
2532 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2533 if (IS_ERR(file))
2534 return PTR_ERR(file);
2535
2536 if (vma->vm_file)
2537 fput(vma->vm_file);
2538 vma->vm_file = file;
2539 vma->vm_ops = &shmem_vm_ops;
2540 vma->vm_flags |= VM_CAN_NONLINEAR;
2541 return 0;
2542}
2543
2544/**
2545 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2546 * @mapping: the page's address_space
2547 * @index: the page index
2548 * @gfp: the page allocator flags to use if allocating
2549 *
2550 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2551 * with any new page allocations done using the specified allocation flags.
2552 * But read_cache_page_gfp() uses the ->readpage() method: which does not
2553 * suit tmpfs, since it may have pages in swapcache, and needs to find those
2554 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2555 *
2556 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2557 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2558 */
2559struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2560 pgoff_t index, gfp_t gfp)
2561{
2562#ifdef CONFIG_SHMEM
2563 struct inode *inode = mapping->host;
2564 struct page *page;
2565 int error;
2566
2567 BUG_ON(mapping->a_ops != &shmem_aops);
2568 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2569 if (error)
2570 page = ERR_PTR(error);
2571 else
2572 unlock_page(page);
2573 return page;
2574#else
2575 /*
2576 * The tiny !SHMEM case uses ramfs without swap
2577 */
2578 return read_cache_page_gfp(mapping, index, gfp);
2579#endif
2580}
2581EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);