Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * Resizable virtual memory filesystem for Linux.
   3 *
   4 * Copyright (C) 2000 Linus Torvalds.
   5 *		 2000 Transmeta Corp.
   6 *		 2000-2001 Christoph Rohland
   7 *		 2000-2001 SAP AG
   8 *		 2002 Red Hat Inc.
   9 * Copyright (C) 2002-2011 Hugh Dickins.
  10 * Copyright (C) 2011 Google Inc.
  11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
  12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
  13 *
  14 * Extended attribute support for tmpfs:
  15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
  16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
  17 *
  18 * tiny-shmem:
  19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
  20 *
  21 * This file is released under the GPL.
  22 */
  23
  24#include <linux/fs.h>
  25#include <linux/init.h>
  26#include <linux/vfs.h>
  27#include <linux/mount.h>
  28#include <linux/ramfs.h>
  29#include <linux/pagemap.h>
  30#include <linux/file.h>
 
  31#include <linux/mm.h>
  32#include <linux/random.h>
  33#include <linux/sched/signal.h>
  34#include <linux/export.h>
 
  35#include <linux/swap.h>
  36#include <linux/uio.h>
  37#include <linux/khugepaged.h>
  38#include <linux/hugetlb.h>
  39#include <linux/frontswap.h>
  40#include <linux/fs_parser.h>
 
 
 
  41
  42#include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
  43
  44static struct vfsmount *shm_mnt;
  45
  46#ifdef CONFIG_SHMEM
  47/*
  48 * This virtual memory filesystem is heavily based on the ramfs. It
  49 * extends ramfs by the ability to use swap and honor resource limits
  50 * which makes it a completely usable filesystem.
  51 */
  52
  53#include <linux/xattr.h>
  54#include <linux/exportfs.h>
  55#include <linux/posix_acl.h>
  56#include <linux/posix_acl_xattr.h>
  57#include <linux/mman.h>
  58#include <linux/string.h>
  59#include <linux/slab.h>
  60#include <linux/backing-dev.h>
  61#include <linux/shmem_fs.h>
  62#include <linux/writeback.h>
  63#include <linux/blkdev.h>
  64#include <linux/pagevec.h>
  65#include <linux/percpu_counter.h>
  66#include <linux/falloc.h>
  67#include <linux/splice.h>
  68#include <linux/security.h>
  69#include <linux/swapops.h>
  70#include <linux/mempolicy.h>
  71#include <linux/namei.h>
  72#include <linux/ctype.h>
  73#include <linux/migrate.h>
  74#include <linux/highmem.h>
  75#include <linux/seq_file.h>
  76#include <linux/magic.h>
  77#include <linux/syscalls.h>
  78#include <linux/fcntl.h>
  79#include <uapi/linux/memfd.h>
  80#include <linux/userfaultfd_k.h>
  81#include <linux/rmap.h>
  82#include <linux/uuid.h>
 
 
  83
  84#include <linux/uaccess.h>
  85#include <asm/pgtable.h>
  86
  87#include "internal.h"
  88
  89#define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
  90#define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
  91
  92/* Pretend that each entry is of this size in directory's i_size */
  93#define BOGO_DIRENT_SIZE 20
  94
 
 
 
  95/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
  96#define SHORT_SYMLINK_LEN 128
  97
  98/*
  99 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
 100 * inode->i_private (with i_mutex making sure that it has only one user at
 101 * a time): we would prefer not to enlarge the shmem inode just for that.
 102 */
 103struct shmem_falloc {
 104	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
 105	pgoff_t start;		/* start of range currently being fallocated */
 106	pgoff_t next;		/* the next page offset to be fallocated */
 107	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
 108	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
 109};
 110
 111struct shmem_options {
 112	unsigned long long blocks;
 113	unsigned long long inodes;
 114	struct mempolicy *mpol;
 115	kuid_t uid;
 116	kgid_t gid;
 117	umode_t mode;
 
 118	int huge;
 119	int seen;
 
 
 
 120#define SHMEM_SEEN_BLOCKS 1
 121#define SHMEM_SEEN_INODES 2
 122#define SHMEM_SEEN_HUGE 4
 
 
 
 123};
 124
 125#ifdef CONFIG_TMPFS
 126static unsigned long shmem_default_max_blocks(void)
 127{
 128	return totalram_pages() / 2;
 129}
 130
 131static unsigned long shmem_default_max_inodes(void)
 132{
 133	unsigned long nr_pages = totalram_pages();
 134
 135	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
 
 136}
 137#endif
 138
 139static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
 140static int shmem_replace_page(struct page **pagep, gfp_t gfp,
 141				struct shmem_inode_info *info, pgoff_t index);
 142static int shmem_swapin_page(struct inode *inode, pgoff_t index,
 143			     struct page **pagep, enum sgp_type sgp,
 144			     gfp_t gfp, struct vm_area_struct *vma,
 145			     vm_fault_t *fault_type);
 146static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 147		struct page **pagep, enum sgp_type sgp,
 148		gfp_t gfp, struct vm_area_struct *vma,
 149		struct vm_fault *vmf, vm_fault_t *fault_type);
 150
 151int shmem_getpage(struct inode *inode, pgoff_t index,
 152		struct page **pagep, enum sgp_type sgp)
 153{
 154	return shmem_getpage_gfp(inode, index, pagep, sgp,
 155		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
 156}
 157
 158static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
 159{
 160	return sb->s_fs_info;
 161}
 162
 163/*
 164 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
 165 * for shared memory and for shared anonymous (/dev/zero) mappings
 166 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
 167 * consistent with the pre-accounting of private mappings ...
 168 */
 169static inline int shmem_acct_size(unsigned long flags, loff_t size)
 170{
 171	return (flags & VM_NORESERVE) ?
 172		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
 173}
 174
 175static inline void shmem_unacct_size(unsigned long flags, loff_t size)
 176{
 177	if (!(flags & VM_NORESERVE))
 178		vm_unacct_memory(VM_ACCT(size));
 179}
 180
 181static inline int shmem_reacct_size(unsigned long flags,
 182		loff_t oldsize, loff_t newsize)
 183{
 184	if (!(flags & VM_NORESERVE)) {
 185		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
 186			return security_vm_enough_memory_mm(current->mm,
 187					VM_ACCT(newsize) - VM_ACCT(oldsize));
 188		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
 189			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
 190	}
 191	return 0;
 192}
 193
 194/*
 195 * ... whereas tmpfs objects are accounted incrementally as
 196 * pages are allocated, in order to allow large sparse files.
 197 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
 198 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
 199 */
 200static inline int shmem_acct_block(unsigned long flags, long pages)
 201{
 202	if (!(flags & VM_NORESERVE))
 203		return 0;
 204
 205	return security_vm_enough_memory_mm(current->mm,
 206			pages * VM_ACCT(PAGE_SIZE));
 207}
 208
 209static inline void shmem_unacct_blocks(unsigned long flags, long pages)
 210{
 211	if (flags & VM_NORESERVE)
 212		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
 213}
 214
 215static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
 216{
 217	struct shmem_inode_info *info = SHMEM_I(inode);
 218	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 
 219
 220	if (shmem_acct_block(info->flags, pages))
 221		return false;
 222
 
 223	if (sbinfo->max_blocks) {
 224		if (percpu_counter_compare(&sbinfo->used_blocks,
 225					   sbinfo->max_blocks - pages) > 0)
 
 
 
 
 
 
 
 
 
 
 226			goto unacct;
 227		percpu_counter_add(&sbinfo->used_blocks, pages);
 228	}
 229
 230	return true;
 231
 232unacct:
 233	shmem_unacct_blocks(info->flags, pages);
 234	return false;
 235}
 236
 237static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
 238{
 239	struct shmem_inode_info *info = SHMEM_I(inode);
 240	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 241
 
 
 
 242	if (sbinfo->max_blocks)
 243		percpu_counter_sub(&sbinfo->used_blocks, pages);
 244	shmem_unacct_blocks(info->flags, pages);
 245}
 246
 247static const struct super_operations shmem_ops;
 248static const struct address_space_operations shmem_aops;
 249static const struct file_operations shmem_file_operations;
 250static const struct inode_operations shmem_inode_operations;
 251static const struct inode_operations shmem_dir_inode_operations;
 252static const struct inode_operations shmem_special_inode_operations;
 253static const struct vm_operations_struct shmem_vm_ops;
 
 254static struct file_system_type shmem_fs_type;
 255
 
 
 
 
 
 256bool vma_is_shmem(struct vm_area_struct *vma)
 257{
 258	return vma->vm_ops == &shmem_vm_ops;
 259}
 260
 261static LIST_HEAD(shmem_swaplist);
 262static DEFINE_MUTEX(shmem_swaplist_mutex);
 263
 264static int shmem_reserve_inode(struct super_block *sb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 265{
 266	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 267	if (sbinfo->max_inodes) {
 268		spin_lock(&sbinfo->stat_lock);
 269		if (!sbinfo->free_inodes) {
 270			spin_unlock(&sbinfo->stat_lock);
 271			return -ENOSPC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 272		}
 273		sbinfo->free_inodes--;
 274		spin_unlock(&sbinfo->stat_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 275	}
 
 276	return 0;
 277}
 278
 279static void shmem_free_inode(struct super_block *sb)
 280{
 281	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 282	if (sbinfo->max_inodes) {
 283		spin_lock(&sbinfo->stat_lock);
 284		sbinfo->free_inodes++;
 285		spin_unlock(&sbinfo->stat_lock);
 286	}
 287}
 288
 289/**
 290 * shmem_recalc_inode - recalculate the block usage of an inode
 291 * @inode: inode to recalc
 
 
 292 *
 293 * We have to calculate the free blocks since the mm can drop
 294 * undirtied hole pages behind our back.
 295 *
 296 * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
 297 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
 298 *
 299 * It has to be called with the spinlock held.
 300 */
 301static void shmem_recalc_inode(struct inode *inode)
 302{
 303	struct shmem_inode_info *info = SHMEM_I(inode);
 304	long freed;
 305
 306	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
 307	if (freed > 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 308		info->alloced -= freed;
 309		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
 
 
 
 310		shmem_inode_unacct_blocks(inode, freed);
 311	}
 312}
 313
 314bool shmem_charge(struct inode *inode, long pages)
 315{
 316	struct shmem_inode_info *info = SHMEM_I(inode);
 317	unsigned long flags;
 318
 319	if (!shmem_inode_acct_block(inode, pages))
 320		return false;
 321
 322	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
 323	inode->i_mapping->nrpages += pages;
 324
 325	spin_lock_irqsave(&info->lock, flags);
 326	info->alloced += pages;
 327	inode->i_blocks += pages * BLOCKS_PER_PAGE;
 328	shmem_recalc_inode(inode);
 329	spin_unlock_irqrestore(&info->lock, flags);
 330
 
 331	return true;
 332}
 333
 334void shmem_uncharge(struct inode *inode, long pages)
 335{
 336	struct shmem_inode_info *info = SHMEM_I(inode);
 337	unsigned long flags;
 338
 339	/* nrpages adjustment done by __delete_from_page_cache() or caller */
 340
 341	spin_lock_irqsave(&info->lock, flags);
 342	info->alloced -= pages;
 343	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
 344	shmem_recalc_inode(inode);
 345	spin_unlock_irqrestore(&info->lock, flags);
 346
 347	shmem_inode_unacct_blocks(inode, pages);
 348}
 349
 350/*
 351 * Replace item expected in xarray by a new item, while holding xa_lock.
 352 */
 353static int shmem_replace_entry(struct address_space *mapping,
 354			pgoff_t index, void *expected, void *replacement)
 355{
 356	XA_STATE(xas, &mapping->i_pages, index);
 357	void *item;
 358
 359	VM_BUG_ON(!expected);
 360	VM_BUG_ON(!replacement);
 361	item = xas_load(&xas);
 362	if (item != expected)
 363		return -ENOENT;
 364	xas_store(&xas, replacement);
 365	return 0;
 366}
 367
 368/*
 369 * Sometimes, before we decide whether to proceed or to fail, we must check
 370 * that an entry was not already brought back from swap by a racing thread.
 371 *
 372 * Checking page is not enough: by the time a SwapCache page is locked, it
 373 * might be reused, and again be SwapCache, using the same swap as before.
 374 */
 375static bool shmem_confirm_swap(struct address_space *mapping,
 376			       pgoff_t index, swp_entry_t swap)
 377{
 378	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
 379}
 380
 381/*
 382 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
 383 *
 384 * SHMEM_HUGE_NEVER:
 385 *	disables huge pages for the mount;
 386 * SHMEM_HUGE_ALWAYS:
 387 *	enables huge pages for the mount;
 388 * SHMEM_HUGE_WITHIN_SIZE:
 389 *	only allocate huge pages if the page will be fully within i_size,
 390 *	also respect fadvise()/madvise() hints;
 391 * SHMEM_HUGE_ADVISE:
 392 *	only allocate huge pages if requested with fadvise()/madvise();
 393 */
 394
 395#define SHMEM_HUGE_NEVER	0
 396#define SHMEM_HUGE_ALWAYS	1
 397#define SHMEM_HUGE_WITHIN_SIZE	2
 398#define SHMEM_HUGE_ADVISE	3
 399
 400/*
 401 * Special values.
 402 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
 403 *
 404 * SHMEM_HUGE_DENY:
 405 *	disables huge on shm_mnt and all mounts, for emergency use;
 406 * SHMEM_HUGE_FORCE:
 407 *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
 408 *
 409 */
 410#define SHMEM_HUGE_DENY		(-1)
 411#define SHMEM_HUGE_FORCE	(-2)
 412
 413#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
 414/* ifdef here to avoid bloating shmem.o when not necessary */
 415
 416static int shmem_huge __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417
 418#if defined(CONFIG_SYSFS)
 419static int shmem_parse_huge(const char *str)
 420{
 421	if (!strcmp(str, "never"))
 422		return SHMEM_HUGE_NEVER;
 423	if (!strcmp(str, "always"))
 424		return SHMEM_HUGE_ALWAYS;
 425	if (!strcmp(str, "within_size"))
 426		return SHMEM_HUGE_WITHIN_SIZE;
 427	if (!strcmp(str, "advise"))
 428		return SHMEM_HUGE_ADVISE;
 429	if (!strcmp(str, "deny"))
 430		return SHMEM_HUGE_DENY;
 431	if (!strcmp(str, "force"))
 432		return SHMEM_HUGE_FORCE;
 433	return -EINVAL;
 434}
 435#endif
 436
 437#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
 438static const char *shmem_format_huge(int huge)
 439{
 440	switch (huge) {
 441	case SHMEM_HUGE_NEVER:
 442		return "never";
 443	case SHMEM_HUGE_ALWAYS:
 444		return "always";
 445	case SHMEM_HUGE_WITHIN_SIZE:
 446		return "within_size";
 447	case SHMEM_HUGE_ADVISE:
 448		return "advise";
 449	case SHMEM_HUGE_DENY:
 450		return "deny";
 451	case SHMEM_HUGE_FORCE:
 452		return "force";
 453	default:
 454		VM_BUG_ON(1);
 455		return "bad_val";
 456	}
 457}
 458#endif
 459
 460static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 461		struct shrink_control *sc, unsigned long nr_to_split)
 462{
 463	LIST_HEAD(list), *pos, *next;
 464	LIST_HEAD(to_remove);
 465	struct inode *inode;
 466	struct shmem_inode_info *info;
 467	struct page *page;
 468	unsigned long batch = sc ? sc->nr_to_scan : 128;
 469	int removed = 0, split = 0;
 470
 471	if (list_empty(&sbinfo->shrinklist))
 472		return SHRINK_STOP;
 473
 474	spin_lock(&sbinfo->shrinklist_lock);
 475	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
 476		info = list_entry(pos, struct shmem_inode_info, shrinklist);
 477
 478		/* pin the inode */
 479		inode = igrab(&info->vfs_inode);
 480
 481		/* inode is about to be evicted */
 482		if (!inode) {
 483			list_del_init(&info->shrinklist);
 484			removed++;
 485			goto next;
 486		}
 487
 488		/* Check if there's anything to gain */
 489		if (round_up(inode->i_size, PAGE_SIZE) ==
 490				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
 491			list_move(&info->shrinklist, &to_remove);
 492			removed++;
 493			goto next;
 494		}
 495
 496		list_move(&info->shrinklist, &list);
 497next:
 
 498		if (!--batch)
 499			break;
 500	}
 501	spin_unlock(&sbinfo->shrinklist_lock);
 502
 503	list_for_each_safe(pos, next, &to_remove) {
 504		info = list_entry(pos, struct shmem_inode_info, shrinklist);
 505		inode = &info->vfs_inode;
 506		list_del_init(&info->shrinklist);
 507		iput(inode);
 508	}
 509
 510	list_for_each_safe(pos, next, &list) {
 511		int ret;
 
 512
 513		info = list_entry(pos, struct shmem_inode_info, shrinklist);
 514		inode = &info->vfs_inode;
 515
 516		if (nr_to_split && split >= nr_to_split)
 517			goto leave;
 518
 519		page = find_get_page(inode->i_mapping,
 520				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
 521		if (!page)
 522			goto drop;
 523
 524		/* No huge page at the end of the file: nothing to split */
 525		if (!PageTransHuge(page)) {
 526			put_page(page);
 527			goto drop;
 528		}
 529
 530		/*
 531		 * Leave the inode on the list if we failed to lock
 532		 * the page at this time.
 533		 *
 534		 * Waiting for the lock may lead to deadlock in the
 535		 * reclaim path.
 536		 */
 537		if (!trylock_page(page)) {
 538			put_page(page);
 539			goto leave;
 540		}
 541
 542		ret = split_huge_page(page);
 543		unlock_page(page);
 544		put_page(page);
 545
 546		/* If split failed leave the inode on the list */
 547		if (ret)
 548			goto leave;
 549
 550		split++;
 551drop:
 552		list_del_init(&info->shrinklist);
 553		removed++;
 554leave:
 
 
 
 
 
 
 
 
 
 
 
 555		iput(inode);
 556	}
 557
 558	spin_lock(&sbinfo->shrinklist_lock);
 559	list_splice_tail(&list, &sbinfo->shrinklist);
 560	sbinfo->shrinklist_len -= removed;
 561	spin_unlock(&sbinfo->shrinklist_lock);
 562
 563	return split;
 564}
 565
 566static long shmem_unused_huge_scan(struct super_block *sb,
 567		struct shrink_control *sc)
 568{
 569	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 570
 571	if (!READ_ONCE(sbinfo->shrinklist_len))
 572		return SHRINK_STOP;
 573
 574	return shmem_unused_huge_shrink(sbinfo, sc, 0);
 575}
 576
 577static long shmem_unused_huge_count(struct super_block *sb,
 578		struct shrink_control *sc)
 579{
 580	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 581	return READ_ONCE(sbinfo->shrinklist_len);
 582}
 583#else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 584
 585#define shmem_huge SHMEM_HUGE_DENY
 586
 587static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 588		struct shrink_control *sc, unsigned long nr_to_split)
 589{
 590	return 0;
 591}
 592#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 593
 594static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
 
 595{
 596	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
 597	    (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
 598	    shmem_huge != SHMEM_HUGE_DENY)
 599		return true;
 600	return false;
 601}
 
 602
 603/*
 604 * Like add_to_page_cache_locked, but error if expected item has gone.
 605 */
 606static int shmem_add_to_page_cache(struct page *page,
 607				   struct address_space *mapping,
 608				   pgoff_t index, void *expected, gfp_t gfp)
 609{
 610	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
 611	unsigned long i = 0;
 612	unsigned long nr = compound_nr(page);
 613
 614	VM_BUG_ON_PAGE(PageTail(page), page);
 615	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
 616	VM_BUG_ON_PAGE(!PageLocked(page), page);
 617	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 618	VM_BUG_ON(expected && PageTransHuge(page));
 619
 620	page_ref_add(page, nr);
 621	page->mapping = mapping;
 622	page->index = index;
 
 623
 624	do {
 625		void *entry;
 626		xas_lock_irq(&xas);
 627		entry = xas_find_conflict(&xas);
 628		if (entry != expected)
 629			xas_set_err(&xas, -EEXIST);
 630		xas_create_range(&xas);
 631		if (xas_error(&xas))
 632			goto unlock;
 633next:
 634		xas_store(&xas, page);
 635		if (++i < nr) {
 636			xas_next(&xas);
 637			goto next;
 638		}
 639		if (PageTransHuge(page)) {
 640			count_vm_event(THP_FILE_ALLOC);
 641			__inc_node_page_state(page, NR_SHMEM_THPS);
 642		}
 
 
 
 
 
 
 
 643		mapping->nrpages += nr;
 644		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
 645		__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
 646unlock:
 647		xas_unlock_irq(&xas);
 648	} while (xas_nomem(&xas, gfp));
 649
 650	if (xas_error(&xas)) {
 651		page->mapping = NULL;
 652		page_ref_sub(page, nr);
 653		return xas_error(&xas);
 654	}
 655
 656	return 0;
 657}
 658
 659/*
 660 * Like delete_from_page_cache, but substitutes swap for page.
 661 */
 662static void shmem_delete_from_page_cache(struct page *page, void *radswap)
 663{
 664	struct address_space *mapping = page->mapping;
 
 665	int error;
 666
 667	VM_BUG_ON_PAGE(PageCompound(page), page);
 668
 669	xa_lock_irq(&mapping->i_pages);
 670	error = shmem_replace_entry(mapping, page->index, page, radswap);
 671	page->mapping = NULL;
 672	mapping->nrpages--;
 673	__dec_node_page_state(page, NR_FILE_PAGES);
 674	__dec_node_page_state(page, NR_SHMEM);
 675	xa_unlock_irq(&mapping->i_pages);
 676	put_page(page);
 677	BUG_ON(error);
 678}
 679
 680/*
 681 * Remove swap entry from page cache, free the swap and its page cache.
 682 */
 683static int shmem_free_swap(struct address_space *mapping,
 684			   pgoff_t index, void *radswap)
 685{
 686	void *old;
 687
 688	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
 689	if (old != radswap)
 690		return -ENOENT;
 691	free_swap_and_cache(radix_to_swp_entry(radswap));
 692	return 0;
 693}
 694
 695/*
 696 * Determine (in bytes) how many of the shmem object's pages mapped by the
 697 * given offsets are swapped out.
 698 *
 699 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
 700 * as long as the inode doesn't go away and racy results are not a problem.
 701 */
 702unsigned long shmem_partial_swap_usage(struct address_space *mapping,
 703						pgoff_t start, pgoff_t end)
 704{
 705	XA_STATE(xas, &mapping->i_pages, start);
 706	struct page *page;
 707	unsigned long swapped = 0;
 
 708
 709	rcu_read_lock();
 710	xas_for_each(&xas, page, end - 1) {
 711		if (xas_retry(&xas, page))
 712			continue;
 713		if (xa_is_value(page))
 714			swapped++;
 715
 
 716		if (need_resched()) {
 717			xas_pause(&xas);
 718			cond_resched_rcu();
 719		}
 720	}
 721
 722	rcu_read_unlock();
 723
 724	return swapped << PAGE_SHIFT;
 725}
 726
 727/*
 728 * Determine (in bytes) how many of the shmem object's pages mapped by the
 729 * given vma is swapped out.
 730 *
 731 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
 732 * as long as the inode doesn't go away and racy results are not a problem.
 733 */
 734unsigned long shmem_swap_usage(struct vm_area_struct *vma)
 735{
 736	struct inode *inode = file_inode(vma->vm_file);
 737	struct shmem_inode_info *info = SHMEM_I(inode);
 738	struct address_space *mapping = inode->i_mapping;
 739	unsigned long swapped;
 740
 741	/* Be careful as we don't hold info->lock */
 742	swapped = READ_ONCE(info->swapped);
 743
 744	/*
 745	 * The easier cases are when the shmem object has nothing in swap, or
 746	 * the vma maps it whole. Then we can simply use the stats that we
 747	 * already track.
 748	 */
 749	if (!swapped)
 750		return 0;
 751
 752	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
 753		return swapped << PAGE_SHIFT;
 754
 755	/* Here comes the more involved part */
 756	return shmem_partial_swap_usage(mapping,
 757			linear_page_index(vma, vma->vm_start),
 758			linear_page_index(vma, vma->vm_end));
 759}
 760
 761/*
 762 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
 763 */
 764void shmem_unlock_mapping(struct address_space *mapping)
 765{
 766	struct pagevec pvec;
 767	pgoff_t indices[PAGEVEC_SIZE];
 768	pgoff_t index = 0;
 769
 770	pagevec_init(&pvec);
 771	/*
 772	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
 773	 */
 774	while (!mapping_unevictable(mapping)) {
 775		/*
 776		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
 777		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
 778		 */
 779		pvec.nr = find_get_entries(mapping, index,
 780					   PAGEVEC_SIZE, pvec.pages, indices);
 781		if (!pvec.nr)
 782			break;
 783		index = indices[pvec.nr - 1] + 1;
 784		pagevec_remove_exceptionals(&pvec);
 785		check_move_unevictable_pages(&pvec);
 786		pagevec_release(&pvec);
 787		cond_resched();
 788	}
 789}
 790
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 791/*
 792 * Remove range of pages and swap entries from page cache, and free them.
 793 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
 794 */
 795static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 796								 bool unfalloc)
 797{
 798	struct address_space *mapping = inode->i_mapping;
 799	struct shmem_inode_info *info = SHMEM_I(inode);
 800	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
 801	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
 802	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
 803	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
 804	struct pagevec pvec;
 805	pgoff_t indices[PAGEVEC_SIZE];
 
 
 806	long nr_swaps_freed = 0;
 807	pgoff_t index;
 808	int i;
 809
 810	if (lend == -1)
 811		end = -1;	/* unsigned, so actually very big */
 812
 813	pagevec_init(&pvec);
 814	index = start;
 815	while (index < end) {
 816		pvec.nr = find_get_entries(mapping, index,
 817			min(end - index, (pgoff_t)PAGEVEC_SIZE),
 818			pvec.pages, indices);
 819		if (!pvec.nr)
 820			break;
 821		for (i = 0; i < pagevec_count(&pvec); i++) {
 822			struct page *page = pvec.pages[i];
 823
 824			index = indices[i];
 825			if (index >= end)
 826				break;
 
 
 
 827
 828			if (xa_is_value(page)) {
 829				if (unfalloc)
 830					continue;
 831				nr_swaps_freed += !shmem_free_swap(mapping,
 832								index, page);
 833				continue;
 834			}
 835
 836			VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
 837
 838			if (!trylock_page(page))
 839				continue;
 840
 841			if (PageTransTail(page)) {
 842				/* Middle of THP: zero out the page */
 843				clear_highpage(page);
 844				unlock_page(page);
 845				continue;
 846			} else if (PageTransHuge(page)) {
 847				if (index == round_down(end, HPAGE_PMD_NR)) {
 848					/*
 849					 * Range ends in the middle of THP:
 850					 * zero out the page
 851					 */
 852					clear_highpage(page);
 853					unlock_page(page);
 854					continue;
 855				}
 856				index += HPAGE_PMD_NR - 1;
 857				i += HPAGE_PMD_NR - 1;
 858			}
 859
 860			if (!unfalloc || !PageUptodate(page)) {
 861				VM_BUG_ON_PAGE(PageTail(page), page);
 862				if (page_mapping(page) == mapping) {
 863					VM_BUG_ON_PAGE(PageWriteback(page), page);
 864					truncate_inode_page(mapping, page);
 865				}
 866			}
 867			unlock_page(page);
 868		}
 869		pagevec_remove_exceptionals(&pvec);
 870		pagevec_release(&pvec);
 871		cond_resched();
 872		index++;
 873	}
 874
 875	if (partial_start) {
 876		struct page *page = NULL;
 877		shmem_getpage(inode, start - 1, &page, SGP_READ);
 878		if (page) {
 879			unsigned int top = PAGE_SIZE;
 880			if (start > end) {
 881				top = partial_end;
 882				partial_end = 0;
 883			}
 884			zero_user_segment(page, partial_start, top);
 885			set_page_dirty(page);
 886			unlock_page(page);
 887			put_page(page);
 888		}
 889	}
 890	if (partial_end) {
 891		struct page *page = NULL;
 892		shmem_getpage(inode, end, &page, SGP_READ);
 893		if (page) {
 894			zero_user_segment(page, 0, partial_end);
 895			set_page_dirty(page);
 896			unlock_page(page);
 897			put_page(page);
 898		}
 
 
 
 
 
 
 
 
 899	}
 900	if (start >= end)
 901		return;
 902
 903	index = start;
 904	while (index < end) {
 905		cond_resched();
 906
 907		pvec.nr = find_get_entries(mapping, index,
 908				min(end - index, (pgoff_t)PAGEVEC_SIZE),
 909				pvec.pages, indices);
 910		if (!pvec.nr) {
 911			/* If all gone or hole-punch or unfalloc, we're done */
 912			if (index == start || end != -1)
 913				break;
 914			/* But if truncating, restart to make sure all gone */
 915			index = start;
 916			continue;
 917		}
 918		for (i = 0; i < pagevec_count(&pvec); i++) {
 919			struct page *page = pvec.pages[i];
 920
 921			index = indices[i];
 922			if (index >= end)
 923				break;
 924
 925			if (xa_is_value(page)) {
 926				if (unfalloc)
 927					continue;
 928				if (shmem_free_swap(mapping, index, page)) {
 929					/* Swap was replaced by page: retry */
 930					index--;
 931					break;
 932				}
 933				nr_swaps_freed++;
 934				continue;
 935			}
 936
 937			lock_page(page);
 938
 939			if (PageTransTail(page)) {
 940				/* Middle of THP: zero out the page */
 941				clear_highpage(page);
 942				unlock_page(page);
 943				/*
 944				 * Partial thp truncate due 'start' in middle
 945				 * of THP: don't need to look on these pages
 946				 * again on !pvec.nr restart.
 947				 */
 948				if (index != round_down(end, HPAGE_PMD_NR))
 949					start++;
 950				continue;
 951			} else if (PageTransHuge(page)) {
 952				if (index == round_down(end, HPAGE_PMD_NR)) {
 953					/*
 954					 * Range ends in the middle of THP:
 955					 * zero out the page
 956					 */
 957					clear_highpage(page);
 958					unlock_page(page);
 959					continue;
 960				}
 961				index += HPAGE_PMD_NR - 1;
 962				i += HPAGE_PMD_NR - 1;
 963			}
 964
 965			if (!unfalloc || !PageUptodate(page)) {
 966				VM_BUG_ON_PAGE(PageTail(page), page);
 967				if (page_mapping(page) == mapping) {
 968					VM_BUG_ON_PAGE(PageWriteback(page), page);
 969					truncate_inode_page(mapping, page);
 970				} else {
 971					/* Page was replaced by swap: retry */
 972					unlock_page(page);
 973					index--;
 974					break;
 975				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 976			}
 977			unlock_page(page);
 978		}
 979		pagevec_remove_exceptionals(&pvec);
 980		pagevec_release(&pvec);
 981		index++;
 982	}
 983
 984	spin_lock_irq(&info->lock);
 985	info->swapped -= nr_swaps_freed;
 986	shmem_recalc_inode(inode);
 987	spin_unlock_irq(&info->lock);
 988}
 989
 990void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 991{
 992	shmem_undo_range(inode, lstart, lend, false);
 993	inode->i_ctime = inode->i_mtime = current_time(inode);
 
 994}
 995EXPORT_SYMBOL_GPL(shmem_truncate_range);
 996
 997static int shmem_getattr(const struct path *path, struct kstat *stat,
 
 998			 u32 request_mask, unsigned int query_flags)
 999{
1000	struct inode *inode = path->dentry->d_inode;
1001	struct shmem_inode_info *info = SHMEM_I(inode);
1002	struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
1003
1004	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1005		spin_lock_irq(&info->lock);
1006		shmem_recalc_inode(inode);
1007		spin_unlock_irq(&info->lock);
1008	}
1009	generic_fillattr(inode, stat);
 
 
 
 
 
 
 
1010
1011	if (is_huge_enabled(sb_info))
1012		stat->blksize = HPAGE_PMD_SIZE;
1013
 
 
 
 
 
 
1014	return 0;
1015}
1016
1017static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 
1018{
1019	struct inode *inode = d_inode(dentry);
1020	struct shmem_inode_info *info = SHMEM_I(inode);
1021	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1022	int error;
 
 
1023
1024	error = setattr_prepare(dentry, attr);
1025	if (error)
1026		return error;
1027
 
 
 
 
 
 
1028	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1029		loff_t oldsize = inode->i_size;
1030		loff_t newsize = attr->ia_size;
1031
1032		/* protected by i_mutex */
1033		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1034		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1035			return -EPERM;
1036
1037		if (newsize != oldsize) {
1038			error = shmem_reacct_size(SHMEM_I(inode)->flags,
1039					oldsize, newsize);
1040			if (error)
1041				return error;
1042			i_size_write(inode, newsize);
1043			inode->i_ctime = inode->i_mtime = current_time(inode);
 
 
1044		}
1045		if (newsize <= oldsize) {
1046			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1047			if (oldsize > holebegin)
1048				unmap_mapping_range(inode->i_mapping,
1049							holebegin, 0, 1);
1050			if (info->alloced)
1051				shmem_truncate_range(inode,
1052							newsize, (loff_t)-1);
1053			/* unmap again to remove racily COWed private pages */
1054			if (oldsize > holebegin)
1055				unmap_mapping_range(inode->i_mapping,
1056							holebegin, 0, 1);
1057
1058			/*
1059			 * Part of the huge page can be beyond i_size: subject
1060			 * to shrink under memory pressure.
1061			 */
1062			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1063				spin_lock(&sbinfo->shrinklist_lock);
1064				/*
1065				 * _careful to defend against unlocked access to
1066				 * ->shrink_list in shmem_unused_huge_shrink()
1067				 */
1068				if (list_empty_careful(&info->shrinklist)) {
1069					list_add_tail(&info->shrinklist,
1070							&sbinfo->shrinklist);
1071					sbinfo->shrinklist_len++;
1072				}
1073				spin_unlock(&sbinfo->shrinklist_lock);
1074			}
1075		}
1076	}
1077
1078	setattr_copy(inode, attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079	if (attr->ia_valid & ATTR_MODE)
1080		error = posix_acl_chmod(inode, inode->i_mode);
 
 
 
 
 
 
1081	return error;
1082}
1083
1084static void shmem_evict_inode(struct inode *inode)
1085{
1086	struct shmem_inode_info *info = SHMEM_I(inode);
1087	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 
1088
1089	if (inode->i_mapping->a_ops == &shmem_aops) {
1090		shmem_unacct_size(info->flags, inode->i_size);
1091		inode->i_size = 0;
 
1092		shmem_truncate_range(inode, 0, (loff_t)-1);
1093		if (!list_empty(&info->shrinklist)) {
1094			spin_lock(&sbinfo->shrinklist_lock);
1095			if (!list_empty(&info->shrinklist)) {
1096				list_del_init(&info->shrinklist);
1097				sbinfo->shrinklist_len--;
1098			}
1099			spin_unlock(&sbinfo->shrinklist_lock);
1100		}
1101		while (!list_empty(&info->swaplist)) {
1102			/* Wait while shmem_unuse() is scanning this inode... */
1103			wait_var_event(&info->stop_eviction,
1104				       !atomic_read(&info->stop_eviction));
1105			mutex_lock(&shmem_swaplist_mutex);
1106			/* ...but beware of the race if we peeked too early */
1107			if (!atomic_read(&info->stop_eviction))
1108				list_del_init(&info->swaplist);
1109			mutex_unlock(&shmem_swaplist_mutex);
1110		}
1111	}
1112
1113	simple_xattrs_free(&info->xattrs);
 
1114	WARN_ON(inode->i_blocks);
1115	shmem_free_inode(inode->i_sb);
1116	clear_inode(inode);
 
 
 
 
1117}
1118
1119extern struct swap_info_struct *swap_info[];
1120
1121static int shmem_find_swap_entries(struct address_space *mapping,
1122				   pgoff_t start, unsigned int nr_entries,
1123				   struct page **entries, pgoff_t *indices,
1124				   unsigned int type, bool frontswap)
1125{
1126	XA_STATE(xas, &mapping->i_pages, start);
1127	struct page *page;
1128	swp_entry_t entry;
1129	unsigned int ret = 0;
1130
1131	if (!nr_entries)
1132		return 0;
1133
1134	rcu_read_lock();
1135	xas_for_each(&xas, page, ULONG_MAX) {
1136		if (xas_retry(&xas, page))
1137			continue;
1138
1139		if (!xa_is_value(page))
1140			continue;
1141
1142		entry = radix_to_swp_entry(page);
 
 
 
 
1143		if (swp_type(entry) != type)
1144			continue;
1145		if (frontswap &&
1146		    !frontswap_test(swap_info[type], swp_offset(entry)))
1147			continue;
1148
1149		indices[ret] = xas.xa_index;
1150		entries[ret] = page;
 
1151
1152		if (need_resched()) {
1153			xas_pause(&xas);
1154			cond_resched_rcu();
1155		}
1156		if (++ret == nr_entries)
1157			break;
1158	}
1159	rcu_read_unlock();
1160
1161	return ret;
1162}
1163
1164/*
1165 * Move the swapped pages for an inode to page cache. Returns the count
1166 * of pages swapped in, or the error in case of failure.
1167 */
1168static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1169				    pgoff_t *indices)
1170{
1171	int i = 0;
1172	int ret = 0;
1173	int error = 0;
1174	struct address_space *mapping = inode->i_mapping;
1175
1176	for (i = 0; i < pvec.nr; i++) {
1177		struct page *page = pvec.pages[i];
1178
1179		if (!xa_is_value(page))
1180			continue;
1181		error = shmem_swapin_page(inode, indices[i],
1182					  &page, SGP_CACHE,
1183					  mapping_gfp_mask(mapping),
1184					  NULL, NULL);
1185		if (error == 0) {
1186			unlock_page(page);
1187			put_page(page);
1188			ret++;
1189		}
1190		if (error == -ENOMEM)
1191			break;
1192		error = 0;
1193	}
1194	return error ? error : ret;
1195}
1196
1197/*
1198 * If swap found in inode, free it and move page from swapcache to filecache.
1199 */
1200static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1201			     bool frontswap, unsigned long *fs_pages_to_unuse)
1202{
1203	struct address_space *mapping = inode->i_mapping;
1204	pgoff_t start = 0;
1205	struct pagevec pvec;
1206	pgoff_t indices[PAGEVEC_SIZE];
1207	bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1208	int ret = 0;
1209
1210	pagevec_init(&pvec);
1211	do {
1212		unsigned int nr_entries = PAGEVEC_SIZE;
1213
1214		if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1215			nr_entries = *fs_pages_to_unuse;
1216
1217		pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1218						  pvec.pages, indices,
1219						  type, frontswap);
1220		if (pvec.nr == 0) {
1221			ret = 0;
1222			break;
1223		}
1224
1225		ret = shmem_unuse_swap_entries(inode, pvec, indices);
1226		if (ret < 0)
1227			break;
1228
1229		if (frontswap_partial) {
1230			*fs_pages_to_unuse -= ret;
1231			if (*fs_pages_to_unuse == 0) {
1232				ret = FRONTSWAP_PAGES_UNUSED;
1233				break;
1234			}
1235		}
1236
1237		start = indices[pvec.nr - 1];
1238	} while (true);
1239
1240	return ret;
1241}
1242
1243/*
1244 * Read all the shared memory data that resides in the swap
1245 * device 'type' back into memory, so the swap device can be
1246 * unused.
1247 */
1248int shmem_unuse(unsigned int type, bool frontswap,
1249		unsigned long *fs_pages_to_unuse)
1250{
1251	struct shmem_inode_info *info, *next;
1252	int error = 0;
1253
1254	if (list_empty(&shmem_swaplist))
1255		return 0;
1256
1257	mutex_lock(&shmem_swaplist_mutex);
1258	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1259		if (!info->swapped) {
1260			list_del_init(&info->swaplist);
1261			continue;
1262		}
1263		/*
1264		 * Drop the swaplist mutex while searching the inode for swap;
1265		 * but before doing so, make sure shmem_evict_inode() will not
1266		 * remove placeholder inode from swaplist, nor let it be freed
1267		 * (igrab() would protect from unlink, but not from unmount).
1268		 */
1269		atomic_inc(&info->stop_eviction);
1270		mutex_unlock(&shmem_swaplist_mutex);
1271
1272		error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1273					  fs_pages_to_unuse);
1274		cond_resched();
1275
1276		mutex_lock(&shmem_swaplist_mutex);
1277		next = list_next_entry(info, swaplist);
1278		if (!info->swapped)
1279			list_del_init(&info->swaplist);
1280		if (atomic_dec_and_test(&info->stop_eviction))
1281			wake_up_var(&info->stop_eviction);
1282		if (error)
1283			break;
1284	}
1285	mutex_unlock(&shmem_swaplist_mutex);
1286
1287	return error;
1288}
1289
1290/*
1291 * Move the page from the page cache to the swap cache.
1292 */
1293static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1294{
1295	struct shmem_inode_info *info;
1296	struct address_space *mapping;
1297	struct inode *inode;
 
 
1298	swp_entry_t swap;
1299	pgoff_t index;
1300
1301	VM_BUG_ON_PAGE(PageCompound(page), page);
1302	BUG_ON(!PageLocked(page));
1303	mapping = page->mapping;
1304	index = page->index;
1305	inode = mapping->host;
1306	info = SHMEM_I(inode);
1307	if (info->flags & VM_LOCKED)
1308		goto redirty;
1309	if (!total_swap_pages)
1310		goto redirty;
1311
1312	/*
1313	 * Our capabilities prevent regular writeback or sync from ever calling
1314	 * shmem_writepage; but a stacking filesystem might use ->writepage of
1315	 * its underlying filesystem, in which case tmpfs should write out to
1316	 * swap only in response to memory pressure, and not for the writeback
1317	 * threads or sync.
1318	 */
1319	if (!wbc->for_reclaim) {
1320		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
 
 
1321		goto redirty;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1322	}
1323
 
 
1324	/*
1325	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1326	 * value into swapfile.c, the only way we can correctly account for a
1327	 * fallocated page arriving here is now to initialize it and write it.
1328	 *
1329	 * That's okay for a page already fallocated earlier, but if we have
1330	 * not yet completed the fallocation, then (a) we want to keep track
1331	 * of this page in case we have to undo it, and (b) it may not be a
1332	 * good idea to continue anyway, once we're pushing into swap.  So
1333	 * reactivate the page, and let shmem_fallocate() quit when too many.
1334	 */
1335	if (!PageUptodate(page)) {
1336		if (inode->i_private) {
1337			struct shmem_falloc *shmem_falloc;
1338			spin_lock(&inode->i_lock);
1339			shmem_falloc = inode->i_private;
1340			if (shmem_falloc &&
1341			    !shmem_falloc->waitq &&
1342			    index >= shmem_falloc->start &&
1343			    index < shmem_falloc->next)
1344				shmem_falloc->nr_unswapped++;
1345			else
1346				shmem_falloc = NULL;
1347			spin_unlock(&inode->i_lock);
1348			if (shmem_falloc)
1349				goto redirty;
1350		}
1351		clear_highpage(page);
1352		flush_dcache_page(page);
1353		SetPageUptodate(page);
1354	}
1355
1356	swap = get_swap_page(page);
1357	if (!swap.val)
1358		goto redirty;
1359
1360	/*
1361	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1362	 * if it's not already there.  Do it now before the page is
1363	 * moved to swap cache, when its pagelock no longer protects
1364	 * the inode from eviction.  But don't unlock the mutex until
1365	 * we've incremented swapped, because shmem_unuse_inode() will
1366	 * prune a !swapped inode from the swaplist under this mutex.
1367	 */
1368	mutex_lock(&shmem_swaplist_mutex);
1369	if (list_empty(&info->swaplist))
1370		list_add(&info->swaplist, &shmem_swaplist);
1371
1372	if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1373		spin_lock_irq(&info->lock);
1374		shmem_recalc_inode(inode);
1375		info->swapped++;
1376		spin_unlock_irq(&info->lock);
1377
1378		swap_shmem_alloc(swap);
1379		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1380
1381		mutex_unlock(&shmem_swaplist_mutex);
1382		BUG_ON(page_mapped(page));
1383		swap_writepage(page, wbc);
1384		return 0;
1385	}
1386
1387	mutex_unlock(&shmem_swaplist_mutex);
1388	put_swap_page(page, swap);
1389redirty:
1390	set_page_dirty(page);
1391	if (wbc->for_reclaim)
1392		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1393	unlock_page(page);
1394	return 0;
1395}
1396
1397#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1398static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1399{
1400	char buffer[64];
1401
1402	if (!mpol || mpol->mode == MPOL_DEFAULT)
1403		return;		/* show nothing */
1404
1405	mpol_to_str(buffer, sizeof(buffer), mpol);
1406
1407	seq_printf(seq, ",mpol=%s", buffer);
1408}
1409
1410static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1411{
1412	struct mempolicy *mpol = NULL;
1413	if (sbinfo->mpol) {
1414		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
1415		mpol = sbinfo->mpol;
1416		mpol_get(mpol);
1417		spin_unlock(&sbinfo->stat_lock);
1418	}
1419	return mpol;
1420}
1421#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1422static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1423{
1424}
1425static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1426{
1427	return NULL;
1428}
1429#endif /* CONFIG_NUMA && CONFIG_TMPFS */
1430#ifndef CONFIG_NUMA
1431#define vm_policy vm_private_data
1432#endif
1433
1434static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1435		struct shmem_inode_info *info, pgoff_t index)
1436{
1437	/* Create a pseudo vma that just contains the policy */
1438	vma_init(vma, NULL);
1439	/* Bias interleave by inode number to distribute better across nodes */
1440	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1441	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1442}
1443
1444static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
 
1445{
1446	/* Drop reference taken by mpol_shared_policy_lookup() */
1447	mpol_cond_put(vma->vm_policy);
 
 
 
 
 
 
 
1448}
1449
1450static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1451			struct shmem_inode_info *info, pgoff_t index)
 
 
 
1452{
1453	struct vm_area_struct pvma;
1454	struct page *page;
1455	struct vm_fault vmf;
 
1456
1457	shmem_pseudo_vma_init(&pvma, info, index);
1458	vmf.vma = &pvma;
1459	vmf.address = 0;
1460	page = swap_cluster_readahead(swap, gfp, &vmf);
1461	shmem_pseudo_vma_destroy(&pvma);
1462
1463	return page;
 
 
 
 
 
 
 
1464}
1465
1466static struct page *shmem_alloc_hugepage(gfp_t gfp,
1467		struct shmem_inode_info *info, pgoff_t index)
1468{
1469	struct vm_area_struct pvma;
1470	struct address_space *mapping = info->vfs_inode.i_mapping;
1471	pgoff_t hindex;
1472	struct page *page;
1473
1474	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1475		return NULL;
1476
1477	hindex = round_down(index, HPAGE_PMD_NR);
1478	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1479								XA_PRESENT))
1480		return NULL;
1481
1482	shmem_pseudo_vma_init(&pvma, info, hindex);
1483	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1484			HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1485	shmem_pseudo_vma_destroy(&pvma);
1486	if (page)
1487		prep_transhuge_page(page);
1488	return page;
1489}
1490
1491static struct page *shmem_alloc_page(gfp_t gfp,
1492			struct shmem_inode_info *info, pgoff_t index)
1493{
1494	struct vm_area_struct pvma;
 
1495	struct page *page;
1496
1497	shmem_pseudo_vma_init(&pvma, info, index);
1498	page = alloc_page_vma(gfp, &pvma, 0);
1499	shmem_pseudo_vma_destroy(&pvma);
1500
1501	return page;
1502}
1503
1504static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1505		struct inode *inode,
1506		pgoff_t index, bool huge)
1507{
 
1508	struct shmem_inode_info *info = SHMEM_I(inode);
1509	struct page *page;
1510	int nr;
1511	int err = -ENOSPC;
1512
1513	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1514		huge = false;
1515	nr = huge ? HPAGE_PMD_NR : 1;
1516
1517	if (!shmem_inode_acct_block(inode, nr))
1518		goto failed;
 
1519
1520	if (huge)
1521		page = shmem_alloc_hugepage(gfp, info, index);
1522	else
1523		page = shmem_alloc_page(gfp, info, index);
1524	if (page) {
1525		__SetPageLocked(page);
1526		__SetPageSwapBacked(page);
1527		return page;
 
 
 
 
 
 
 
 
 
 
1528	}
 
 
1529
1530	err = -ENOMEM;
1531	shmem_inode_unacct_blocks(inode, nr);
1532failed:
1533	return ERR_PTR(err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1534}
1535
1536/*
1537 * When a page is moved from swapcache to shmem filecache (either by the
1538 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1539 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1540 * ignorance of the mapping it belongs to.  If that mapping has special
1541 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1542 * we may need to copy to a suitable page before moving to filecache.
1543 *
1544 * In a future release, this may well be extended to respect cpuset and
1545 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1546 * but for now it is a simple matter of zone.
1547 */
1548static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1549{
1550	return page_zonenum(page) > gfp_zone(gfp);
1551}
1552
1553static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1554				struct shmem_inode_info *info, pgoff_t index)
1555{
1556	struct page *oldpage, *newpage;
1557	struct address_space *swap_mapping;
1558	swp_entry_t entry;
1559	pgoff_t swap_index;
1560	int error;
1561
1562	oldpage = *pagep;
1563	entry.val = page_private(oldpage);
1564	swap_index = swp_offset(entry);
1565	swap_mapping = page_mapping(oldpage);
1566
1567	/*
1568	 * We have arrived here because our zones are constrained, so don't
1569	 * limit chance of success by further cpuset and node constraints.
1570	 */
1571	gfp &= ~GFP_CONSTRAINT_MASK;
1572	newpage = shmem_alloc_page(gfp, info, index);
1573	if (!newpage)
 
1574		return -ENOMEM;
1575
1576	get_page(newpage);
1577	copy_highpage(newpage, oldpage);
1578	flush_dcache_page(newpage);
1579
1580	__SetPageLocked(newpage);
1581	__SetPageSwapBacked(newpage);
1582	SetPageUptodate(newpage);
1583	set_page_private(newpage, entry.val);
1584	SetPageSwapCache(newpage);
1585
1586	/*
1587	 * Our caller will very soon move newpage out of swapcache, but it's
1588	 * a nice clean interface for us to replace oldpage by newpage there.
1589	 */
1590	xa_lock_irq(&swap_mapping->i_pages);
1591	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1592	if (!error) {
1593		__inc_node_page_state(newpage, NR_FILE_PAGES);
1594		__dec_node_page_state(oldpage, NR_FILE_PAGES);
 
 
 
1595	}
1596	xa_unlock_irq(&swap_mapping->i_pages);
1597
1598	if (unlikely(error)) {
1599		/*
1600		 * Is this possible?  I think not, now that our callers check
1601		 * both PageSwapCache and page_private after getting page lock;
1602		 * but be defensive.  Reverse old to newpage for clear and free.
1603		 */
1604		oldpage = newpage;
1605	} else {
1606		mem_cgroup_migrate(oldpage, newpage);
1607		lru_cache_add_anon(newpage);
1608		*pagep = newpage;
1609	}
1610
1611	ClearPageSwapCache(oldpage);
1612	set_page_private(oldpage, 0);
1613
1614	unlock_page(oldpage);
1615	put_page(oldpage);
1616	put_page(oldpage);
1617	return error;
1618}
1619
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1620/*
1621 * Swap in the page pointed to by *pagep.
1622 * Caller has to make sure that *pagep contains a valid swapped page.
1623 * Returns 0 and the page in pagep if success. On failure, returns the
1624 * the error code and NULL in *pagep.
1625 */
1626static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1627			     struct page **pagep, enum sgp_type sgp,
1628			     gfp_t gfp, struct vm_area_struct *vma,
1629			     vm_fault_t *fault_type)
1630{
1631	struct address_space *mapping = inode->i_mapping;
1632	struct shmem_inode_info *info = SHMEM_I(inode);
1633	struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
1634	struct mem_cgroup *memcg;
1635	struct page *page;
1636	swp_entry_t swap;
1637	int error;
1638
1639	VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1640	swap = radix_to_swp_entry(*pagep);
1641	*pagep = NULL;
 
 
 
 
 
 
 
 
 
 
 
1642
1643	/* Look it up and read it in.. */
1644	page = lookup_swap_cache(swap, NULL, 0);
1645	if (!page) {
1646		/* Or update major stats only when swapin succeeds?? */
1647		if (fault_type) {
1648			*fault_type |= VM_FAULT_MAJOR;
1649			count_vm_event(PGMAJFAULT);
1650			count_memcg_event_mm(charge_mm, PGMAJFAULT);
1651		}
1652		/* Here we actually start the io */
1653		page = shmem_swapin(swap, gfp, info, index);
1654		if (!page) {
1655			error = -ENOMEM;
1656			goto failed;
1657		}
1658	}
1659
1660	/* We have to do this with page locked to prevent races */
1661	lock_page(page);
1662	if (!PageSwapCache(page) || page_private(page) != swap.val ||
 
1663	    !shmem_confirm_swap(mapping, index, swap)) {
1664		error = -EEXIST;
1665		goto unlock;
1666	}
1667	if (!PageUptodate(page)) {
1668		error = -EIO;
1669		goto failed;
1670	}
1671	wait_on_page_writeback(page);
 
 
 
 
 
 
1672
1673	if (shmem_should_replace_page(page, gfp)) {
1674		error = shmem_replace_page(&page, gfp, info, index);
1675		if (error)
1676			goto failed;
1677	}
1678
1679	error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1680					    false);
1681	if (!error) {
1682		error = shmem_add_to_page_cache(page, mapping, index,
1683						swp_to_radix_entry(swap), gfp);
1684		/*
1685		 * We already confirmed swap under page lock, and make
1686		 * no memory allocation here, so usually no possibility
1687		 * of error; but free_swap_and_cache() only trylocks a
1688		 * page, so it is just possible that the entry has been
1689		 * truncated or holepunched since swap was confirmed.
1690		 * shmem_undo_range() will have done some of the
1691		 * unaccounting, now delete_from_swap_cache() will do
1692		 * the rest.
1693		 */
1694		if (error) {
1695			mem_cgroup_cancel_charge(page, memcg, false);
1696			delete_from_swap_cache(page);
1697		}
1698	}
1699	if (error)
1700		goto failed;
1701
1702	mem_cgroup_commit_charge(page, memcg, true, false);
1703
1704	spin_lock_irq(&info->lock);
1705	info->swapped--;
1706	shmem_recalc_inode(inode);
1707	spin_unlock_irq(&info->lock);
1708
1709	if (sgp == SGP_WRITE)
1710		mark_page_accessed(page);
1711
1712	delete_from_swap_cache(page);
1713	set_page_dirty(page);
1714	swap_free(swap);
 
1715
1716	*pagep = page;
1717	return 0;
1718failed:
1719	if (!shmem_confirm_swap(mapping, index, swap))
1720		error = -EEXIST;
 
 
1721unlock:
1722	if (page) {
1723		unlock_page(page);
1724		put_page(page);
1725	}
 
1726
1727	return error;
1728}
1729
1730/*
1731 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1732 *
1733 * If we allocate a new one we do not mark it dirty. That's up to the
1734 * vm. If we swap it in we mark it dirty since we also free the swap
1735 * entry since a page cannot live in both the swap and page cache.
1736 *
1737 * vmf and fault_type are only supplied by shmem_fault:
1738 * otherwise they are NULL.
1739 */
1740static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1741	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1742	struct vm_area_struct *vma, struct vm_fault *vmf,
1743			vm_fault_t *fault_type)
1744{
1745	struct address_space *mapping = inode->i_mapping;
1746	struct shmem_inode_info *info = SHMEM_I(inode);
1747	struct shmem_sb_info *sbinfo;
1748	struct mm_struct *charge_mm;
1749	struct mem_cgroup *memcg;
1750	struct page *page;
1751	enum sgp_type sgp_huge = sgp;
1752	pgoff_t hindex = index;
1753	int error;
1754	int once = 0;
1755	int alloced = 0;
1756
1757	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1758		return -EFBIG;
1759	if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1760		sgp = SGP_CACHE;
1761repeat:
1762	if (sgp <= SGP_CACHE &&
1763	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1764		return -EINVAL;
1765	}
1766
1767	sbinfo = SHMEM_SB(inode->i_sb);
1768	charge_mm = vma ? vma->vm_mm : current->mm;
1769
1770	page = find_lock_entry(mapping, index);
1771	if (xa_is_value(page)) {
1772		error = shmem_swapin_page(inode, index, &page,
1773					  sgp, gfp, vma, fault_type);
 
 
 
 
 
 
 
1774		if (error == -EEXIST)
1775			goto repeat;
1776
1777		*pagep = page;
1778		return error;
1779	}
1780
1781	if (page && sgp == SGP_WRITE)
1782		mark_page_accessed(page);
1783
1784	/* fallocated page? */
1785	if (page && !PageUptodate(page)) {
 
 
 
 
 
 
 
 
 
1786		if (sgp != SGP_READ)
1787			goto clear;
1788		unlock_page(page);
1789		put_page(page);
1790		page = NULL;
1791	}
1792	if (page || sgp == SGP_READ) {
1793		*pagep = page;
 
 
 
 
 
1794		return 0;
1795	}
 
1796
1797	/*
1798	 * Fast cache lookup did not find it:
1799	 * bring it back from swap or allocate.
1800	 */
1801
1802	if (vma && userfaultfd_missing(vma)) {
1803		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1804		return 0;
1805	}
1806
1807	/* shmem_symlink() */
1808	if (mapping->a_ops != &shmem_aops)
1809		goto alloc_nohuge;
1810	if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1811		goto alloc_nohuge;
1812	if (shmem_huge == SHMEM_HUGE_FORCE)
1813		goto alloc_huge;
1814	switch (sbinfo->huge) {
1815		loff_t i_size;
1816		pgoff_t off;
1817	case SHMEM_HUGE_NEVER:
1818		goto alloc_nohuge;
1819	case SHMEM_HUGE_WITHIN_SIZE:
1820		off = round_up(index, HPAGE_PMD_NR);
1821		i_size = round_up(i_size_read(inode), PAGE_SIZE);
1822		if (i_size >= HPAGE_PMD_SIZE &&
1823		    i_size >> PAGE_SHIFT >= off)
1824			goto alloc_huge;
1825		/* fallthrough */
1826	case SHMEM_HUGE_ADVISE:
1827		if (sgp_huge == SGP_HUGE)
1828			goto alloc_huge;
1829		/* TODO: implement fadvise() hints */
1830		goto alloc_nohuge;
1831	}
1832
1833alloc_huge:
1834	page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1835	if (IS_ERR(page)) {
1836alloc_nohuge:
1837		page = shmem_alloc_and_acct_page(gfp, inode,
1838						 index, false);
1839	}
1840	if (IS_ERR(page)) {
1841		int retry = 5;
1842
1843		error = PTR_ERR(page);
1844		page = NULL;
1845		if (error != -ENOSPC)
1846			goto unlock;
1847		/*
1848		 * Try to reclaim some space by splitting a huge page
1849		 * beyond i_size on the filesystem.
1850		 */
1851		while (retry--) {
1852			int ret;
1853
1854			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1855			if (ret == SHRINK_STOP)
1856				break;
1857			if (ret)
1858				goto alloc_nohuge;
1859		}
1860		goto unlock;
 
1861	}
1862
1863	if (PageTransHuge(page))
1864		hindex = round_down(index, HPAGE_PMD_NR);
1865	else
1866		hindex = index;
1867
1868	if (sgp == SGP_WRITE)
1869		__SetPageReferenced(page);
 
1870
1871	error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1872					    PageTransHuge(page));
1873	if (error)
1874		goto unacct;
1875	error = shmem_add_to_page_cache(page, mapping, hindex,
1876					NULL, gfp & GFP_RECLAIM_MASK);
1877	if (error) {
1878		mem_cgroup_cancel_charge(page, memcg,
1879					 PageTransHuge(page));
1880		goto unacct;
1881	}
1882	mem_cgroup_commit_charge(page, memcg, false,
1883				 PageTransHuge(page));
1884	lru_cache_add_anon(page);
1885
1886	spin_lock_irq(&info->lock);
1887	info->alloced += compound_nr(page);
1888	inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1889	shmem_recalc_inode(inode);
1890	spin_unlock_irq(&info->lock);
1891	alloced = true;
1892
1893	if (PageTransHuge(page) &&
1894	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1895			hindex + HPAGE_PMD_NR - 1) {
 
 
1896		/*
1897		 * Part of the huge page is beyond i_size: subject
1898		 * to shrink under memory pressure.
1899		 */
1900		spin_lock(&sbinfo->shrinklist_lock);
1901		/*
1902		 * _careful to defend against unlocked access to
1903		 * ->shrink_list in shmem_unused_huge_shrink()
1904		 */
1905		if (list_empty_careful(&info->shrinklist)) {
1906			list_add_tail(&info->shrinklist,
1907				      &sbinfo->shrinklist);
1908			sbinfo->shrinklist_len++;
1909		}
1910		spin_unlock(&sbinfo->shrinklist_lock);
1911	}
1912
 
 
1913	/*
1914	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1915	 */
1916	if (sgp == SGP_FALLOC)
1917		sgp = SGP_WRITE;
1918clear:
1919	/*
1920	 * Let SGP_WRITE caller clear ends if write does not fill page;
1921	 * but SGP_FALLOC on a page fallocated earlier must initialize
1922	 * it now, lest undo on failure cancel our earlier guarantee.
1923	 */
1924	if (sgp != SGP_WRITE && !PageUptodate(page)) {
1925		struct page *head = compound_head(page);
1926		int i;
1927
1928		for (i = 0; i < compound_nr(head); i++) {
1929			clear_highpage(head + i);
1930			flush_dcache_page(head + i);
1931		}
1932		SetPageUptodate(head);
1933	}
1934
1935	/* Perhaps the file has been truncated since we checked */
1936	if (sgp <= SGP_CACHE &&
1937	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1938		if (alloced) {
1939			ClearPageDirty(page);
1940			delete_from_page_cache(page);
1941			spin_lock_irq(&info->lock);
1942			shmem_recalc_inode(inode);
1943			spin_unlock_irq(&info->lock);
1944		}
1945		error = -EINVAL;
1946		goto unlock;
1947	}
1948	*pagep = page + index - hindex;
 
1949	return 0;
1950
1951	/*
1952	 * Error recovery.
1953	 */
1954unacct:
1955	shmem_inode_unacct_blocks(inode, compound_nr(page));
1956
1957	if (PageTransHuge(page)) {
1958		unlock_page(page);
1959		put_page(page);
1960		goto alloc_nohuge;
1961	}
1962unlock:
1963	if (page) {
1964		unlock_page(page);
1965		put_page(page);
1966	}
1967	if (error == -ENOSPC && !once++) {
1968		spin_lock_irq(&info->lock);
1969		shmem_recalc_inode(inode);
1970		spin_unlock_irq(&info->lock);
1971		goto repeat;
1972	}
1973	if (error == -EEXIST)
1974		goto repeat;
1975	return error;
1976}
1977
 
 
 
 
 
 
 
1978/*
1979 * This is like autoremove_wake_function, but it removes the wait queue
1980 * entry unconditionally - even if something else had already woken the
1981 * target.
1982 */
1983static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 
1984{
1985	int ret = default_wake_function(wait, mode, sync, key);
1986	list_del_init(&wait->entry);
1987	return ret;
1988}
1989
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1990static vm_fault_t shmem_fault(struct vm_fault *vmf)
1991{
1992	struct vm_area_struct *vma = vmf->vma;
1993	struct inode *inode = file_inode(vma->vm_file);
1994	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1995	enum sgp_type sgp;
 
1996	int err;
1997	vm_fault_t ret = VM_FAULT_LOCKED;
1998
1999	/*
2000	 * Trinity finds that probing a hole which tmpfs is punching can
2001	 * prevent the hole-punch from ever completing: which in turn
2002	 * locks writers out with its hold on i_mutex.  So refrain from
2003	 * faulting pages into the hole while it's being punched.  Although
2004	 * shmem_undo_range() does remove the additions, it may be unable to
2005	 * keep up, as each new page needs its own unmap_mapping_range() call,
2006	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2007	 *
2008	 * It does not matter if we sometimes reach this check just before the
2009	 * hole-punch begins, so that one fault then races with the punch:
2010	 * we just need to make racing faults a rare case.
2011	 *
2012	 * The implementation below would be much simpler if we just used a
2013	 * standard mutex or completion: but we cannot take i_mutex in fault,
2014	 * and bloating every shmem inode for this unlikely case would be sad.
2015	 */
2016	if (unlikely(inode->i_private)) {
2017		struct shmem_falloc *shmem_falloc;
2018
2019		spin_lock(&inode->i_lock);
2020		shmem_falloc = inode->i_private;
2021		if (shmem_falloc &&
2022		    shmem_falloc->waitq &&
2023		    vmf->pgoff >= shmem_falloc->start &&
2024		    vmf->pgoff < shmem_falloc->next) {
2025			wait_queue_head_t *shmem_falloc_waitq;
2026			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2027
2028			ret = VM_FAULT_NOPAGE;
2029			if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
2030			   !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
2031				/* It's polite to up mmap_sem if we can */
2032				up_read(&vma->vm_mm->mmap_sem);
2033				ret = VM_FAULT_RETRY;
2034			}
2035
2036			shmem_falloc_waitq = shmem_falloc->waitq;
2037			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2038					TASK_UNINTERRUPTIBLE);
2039			spin_unlock(&inode->i_lock);
2040			schedule();
2041
2042			/*
2043			 * shmem_falloc_waitq points into the shmem_fallocate()
2044			 * stack of the hole-punching task: shmem_falloc_waitq
2045			 * is usually invalid by the time we reach here, but
2046			 * finish_wait() does not dereference it in that case;
2047			 * though i_lock needed lest racing with wake_up_all().
2048			 */
2049			spin_lock(&inode->i_lock);
2050			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2051			spin_unlock(&inode->i_lock);
2052			return ret;
2053		}
2054		spin_unlock(&inode->i_lock);
2055	}
2056
2057	sgp = SGP_CACHE;
2058
2059	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2060	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2061		sgp = SGP_NOHUGE;
2062	else if (vma->vm_flags & VM_HUGEPAGE)
2063		sgp = SGP_HUGE;
2064
2065	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2066				  gfp, vma, vmf, &ret);
2067	if (err)
2068		return vmf_error(err);
 
 
 
 
2069	return ret;
2070}
2071
2072unsigned long shmem_get_unmapped_area(struct file *file,
2073				      unsigned long uaddr, unsigned long len,
2074				      unsigned long pgoff, unsigned long flags)
2075{
2076	unsigned long (*get_area)(struct file *,
2077		unsigned long, unsigned long, unsigned long, unsigned long);
2078	unsigned long addr;
2079	unsigned long offset;
2080	unsigned long inflated_len;
2081	unsigned long inflated_addr;
2082	unsigned long inflated_offset;
2083
2084	if (len > TASK_SIZE)
2085		return -ENOMEM;
2086
2087	get_area = current->mm->get_unmapped_area;
2088	addr = get_area(file, uaddr, len, pgoff, flags);
2089
2090	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
2091		return addr;
2092	if (IS_ERR_VALUE(addr))
2093		return addr;
2094	if (addr & ~PAGE_MASK)
2095		return addr;
2096	if (addr > TASK_SIZE - len)
2097		return addr;
2098
2099	if (shmem_huge == SHMEM_HUGE_DENY)
2100		return addr;
2101	if (len < HPAGE_PMD_SIZE)
2102		return addr;
2103	if (flags & MAP_FIXED)
2104		return addr;
2105	/*
2106	 * Our priority is to support MAP_SHARED mapped hugely;
2107	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2108	 * But if caller specified an address hint, respect that as before.
 
2109	 */
2110	if (uaddr)
2111		return addr;
2112
2113	if (shmem_huge != SHMEM_HUGE_FORCE) {
2114		struct super_block *sb;
2115
2116		if (file) {
2117			VM_BUG_ON(file->f_op != &shmem_file_operations);
2118			sb = file_inode(file)->i_sb;
2119		} else {
2120			/*
2121			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2122			 * for "/dev/zero", to create a shared anonymous object.
2123			 */
2124			if (IS_ERR(shm_mnt))
2125				return addr;
2126			sb = shm_mnt->mnt_sb;
2127		}
2128		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2129			return addr;
2130	}
2131
2132	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2133	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2134		return addr;
2135	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2136		return addr;
2137
2138	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2139	if (inflated_len > TASK_SIZE)
2140		return addr;
2141	if (inflated_len < len)
2142		return addr;
2143
2144	inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2145	if (IS_ERR_VALUE(inflated_addr))
2146		return addr;
2147	if (inflated_addr & ~PAGE_MASK)
2148		return addr;
2149
2150	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2151	inflated_addr += offset - inflated_offset;
2152	if (inflated_offset > offset)
2153		inflated_addr += HPAGE_PMD_SIZE;
2154
2155	if (inflated_addr > TASK_SIZE - len)
2156		return addr;
2157	return inflated_addr;
2158}
2159
2160#ifdef CONFIG_NUMA
2161static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2162{
2163	struct inode *inode = file_inode(vma->vm_file);
2164	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2165}
2166
2167static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2168					  unsigned long addr)
2169{
2170	struct inode *inode = file_inode(vma->vm_file);
2171	pgoff_t index;
2172
 
 
 
 
 
 
 
2173	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2174	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2175}
2176#endif
2177
2178int shmem_lock(struct file *file, int lock, struct user_struct *user)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2179{
2180	struct inode *inode = file_inode(file);
2181	struct shmem_inode_info *info = SHMEM_I(inode);
2182	int retval = -ENOMEM;
2183
2184	spin_lock_irq(&info->lock);
 
 
 
 
2185	if (lock && !(info->flags & VM_LOCKED)) {
2186		if (!user_shm_lock(inode->i_size, user))
2187			goto out_nomem;
2188		info->flags |= VM_LOCKED;
2189		mapping_set_unevictable(file->f_mapping);
2190	}
2191	if (!lock && (info->flags & VM_LOCKED) && user) {
2192		user_shm_unlock(inode->i_size, user);
2193		info->flags &= ~VM_LOCKED;
2194		mapping_clear_unevictable(file->f_mapping);
2195	}
2196	retval = 0;
2197
2198out_nomem:
2199	spin_unlock_irq(&info->lock);
2200	return retval;
2201}
2202
2203static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2204{
2205	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
 
 
2206
2207	if (info->seals & F_SEAL_FUTURE_WRITE) {
2208		/*
2209		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
2210		 * "future write" seal active.
2211		 */
2212		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
2213			return -EPERM;
2214
2215		/*
2216		 * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
2217		 * read-only mapping, take care to not allow mprotect to revert
2218		 * protections.
2219		 */
2220		vma->vm_flags &= ~(VM_MAYWRITE);
2221	}
2222
2223	file_accessed(file);
2224	vma->vm_ops = &shmem_vm_ops;
2225	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2226			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2227			(vma->vm_end & HPAGE_PMD_MASK)) {
2228		khugepaged_enter(vma, vma->vm_flags);
2229	}
2230	return 0;
2231}
2232
2233static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2234				     umode_t mode, dev_t dev, unsigned long flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2235{
2236	struct inode *inode;
2237	struct shmem_inode_info *info;
2238	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 
 
2239
2240	if (shmem_reserve_inode(sb))
2241		return NULL;
 
2242
2243	inode = new_inode(sb);
2244	if (inode) {
2245		inode->i_ino = get_next_ino();
2246		inode_init_owner(inode, dir, mode);
2247		inode->i_blocks = 0;
2248		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2249		inode->i_generation = prandom_u32();
2250		info = SHMEM_I(inode);
2251		memset(info, 0, (char *)inode - (char *)info);
2252		spin_lock_init(&info->lock);
2253		atomic_set(&info->stop_eviction, 0);
2254		info->seals = F_SEAL_SEAL;
2255		info->flags = flags & VM_NORESERVE;
2256		INIT_LIST_HEAD(&info->shrinklist);
2257		INIT_LIST_HEAD(&info->swaplist);
2258		simple_xattrs_init(&info->xattrs);
2259		cache_no_acl(inode);
2260
2261		switch (mode & S_IFMT) {
2262		default:
2263			inode->i_op = &shmem_special_inode_operations;
2264			init_special_inode(inode, mode, dev);
2265			break;
2266		case S_IFREG:
2267			inode->i_mapping->a_ops = &shmem_aops;
2268			inode->i_op = &shmem_inode_operations;
2269			inode->i_fop = &shmem_file_operations;
2270			mpol_shared_policy_init(&info->policy,
2271						 shmem_get_sbmpol(sbinfo));
2272			break;
2273		case S_IFDIR:
2274			inc_nlink(inode);
2275			/* Some things misbehave if size == 0 on a directory */
2276			inode->i_size = 2 * BOGO_DIRENT_SIZE;
2277			inode->i_op = &shmem_dir_inode_operations;
2278			inode->i_fop = &simple_dir_operations;
2279			break;
2280		case S_IFLNK:
2281			/*
2282			 * Must not load anything in the rbtree,
2283			 * mpol_free_shared_policy will not be called.
2284			 */
2285			mpol_shared_policy_init(&info->policy, NULL);
2286			break;
2287		}
2288
2289		lockdep_annotate_inode_mutex_key(inode);
2290	} else
2291		shmem_free_inode(sb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2292	return inode;
2293}
2294
2295bool shmem_mapping(struct address_space *mapping)
 
 
 
2296{
2297	return mapping->a_ops == &shmem_aops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2298}
 
 
 
 
 
 
 
 
2299
2300static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2301				  pmd_t *dst_pmd,
2302				  struct vm_area_struct *dst_vma,
2303				  unsigned long dst_addr,
2304				  unsigned long src_addr,
2305				  bool zeropage,
2306				  struct page **pagep)
2307{
2308	struct inode *inode = file_inode(dst_vma->vm_file);
2309	struct shmem_inode_info *info = SHMEM_I(inode);
2310	struct address_space *mapping = inode->i_mapping;
2311	gfp_t gfp = mapping_gfp_mask(mapping);
2312	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2313	struct mem_cgroup *memcg;
2314	spinlock_t *ptl;
2315	void *page_kaddr;
2316	struct page *page;
2317	pte_t _dst_pte, *dst_pte;
2318	int ret;
2319	pgoff_t offset, max_off;
2320
2321	ret = -ENOMEM;
2322	if (!shmem_inode_acct_block(inode, 1))
2323		goto out;
 
 
 
 
 
 
 
 
 
2324
2325	if (!*pagep) {
2326		page = shmem_alloc_page(gfp, info, pgoff);
2327		if (!page)
 
2328			goto out_unacct_blocks;
2329
2330		if (!zeropage) {	/* mcopy_atomic */
2331			page_kaddr = kmap_atomic(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2332			ret = copy_from_user(page_kaddr,
2333					     (const void __user *)src_addr,
2334					     PAGE_SIZE);
2335			kunmap_atomic(page_kaddr);
 
2336
2337			/* fallback to copy_from_user outside mmap_sem */
2338			if (unlikely(ret)) {
2339				*pagep = page;
2340				shmem_inode_unacct_blocks(inode, 1);
2341				/* don't free the page */
2342				return -ENOENT;
2343			}
2344		} else {		/* mfill_zeropage_atomic */
2345			clear_highpage(page);
 
 
2346		}
2347	} else {
2348		page = *pagep;
2349		*pagep = NULL;
 
2350	}
2351
2352	VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
2353	__SetPageLocked(page);
2354	__SetPageSwapBacked(page);
2355	__SetPageUptodate(page);
 
2356
2357	ret = -EFAULT;
2358	offset = linear_page_index(dst_vma, dst_addr);
2359	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2360	if (unlikely(offset >= max_off))
2361		goto out_release;
2362
2363	ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
2364	if (ret)
2365		goto out_release;
2366
2367	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2368						gfp & GFP_RECLAIM_MASK);
2369	if (ret)
2370		goto out_release_uncharge;
2371
2372	mem_cgroup_commit_charge(page, memcg, false, false);
2373
2374	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2375	if (dst_vma->vm_flags & VM_WRITE)
2376		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2377	else {
2378		/*
2379		 * We don't set the pte dirty if the vma has no
2380		 * VM_WRITE permission, so mark the page dirty or it
2381		 * could be freed from under us. We could do it
2382		 * unconditionally before unlock_page(), but doing it
2383		 * only if VM_WRITE is not set is faster.
2384		 */
2385		set_page_dirty(page);
2386	}
2387
2388	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2389
2390	ret = -EFAULT;
2391	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2392	if (unlikely(offset >= max_off))
2393		goto out_release_uncharge_unlock;
2394
2395	ret = -EEXIST;
2396	if (!pte_none(*dst_pte))
2397		goto out_release_uncharge_unlock;
2398
2399	lru_cache_add_anon(page);
2400
2401	spin_lock(&info->lock);
2402	info->alloced++;
2403	inode->i_blocks += BLOCKS_PER_PAGE;
2404	shmem_recalc_inode(inode);
2405	spin_unlock(&info->lock);
2406
2407	inc_mm_counter(dst_mm, mm_counter_file(page));
2408	page_add_file_rmap(page, false);
2409	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
2410
2411	/* No need to invalidate - it was non-present before */
2412	update_mmu_cache(dst_vma, dst_addr, dst_pte);
2413	pte_unmap_unlock(dst_pte, ptl);
2414	unlock_page(page);
2415	ret = 0;
2416out:
2417	return ret;
2418out_release_uncharge_unlock:
2419	pte_unmap_unlock(dst_pte, ptl);
2420	ClearPageDirty(page);
2421	delete_from_page_cache(page);
2422out_release_uncharge:
2423	mem_cgroup_cancel_charge(page, memcg, false);
2424out_release:
2425	unlock_page(page);
2426	put_page(page);
2427out_unacct_blocks:
2428	shmem_inode_unacct_blocks(inode, 1);
2429	goto out;
2430}
2431
2432int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
2433			   pmd_t *dst_pmd,
2434			   struct vm_area_struct *dst_vma,
2435			   unsigned long dst_addr,
2436			   unsigned long src_addr,
2437			   struct page **pagep)
2438{
2439	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2440				      dst_addr, src_addr, false, pagep);
2441}
2442
2443int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
2444			     pmd_t *dst_pmd,
2445			     struct vm_area_struct *dst_vma,
2446			     unsigned long dst_addr)
2447{
2448	struct page *page = NULL;
2449
2450	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2451				      dst_addr, 0, true, &page);
2452}
 
2453
2454#ifdef CONFIG_TMPFS
2455static const struct inode_operations shmem_symlink_inode_operations;
2456static const struct inode_operations shmem_short_symlink_operations;
2457
2458#ifdef CONFIG_TMPFS_XATTR
2459static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2460#else
2461#define shmem_initxattrs NULL
2462#endif
2463
2464static int
2465shmem_write_begin(struct file *file, struct address_space *mapping,
2466			loff_t pos, unsigned len, unsigned flags,
2467			struct page **pagep, void **fsdata)
2468{
2469	struct inode *inode = mapping->host;
2470	struct shmem_inode_info *info = SHMEM_I(inode);
2471	pgoff_t index = pos >> PAGE_SHIFT;
 
 
2472
2473	/* i_mutex is held by caller */
2474	if (unlikely(info->seals & (F_SEAL_GROW |
2475				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2476		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2477			return -EPERM;
2478		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2479			return -EPERM;
2480	}
2481
2482	return shmem_getpage(inode, index, pagep, SGP_WRITE);
 
 
 
 
 
 
 
 
 
 
 
 
2483}
2484
2485static int
2486shmem_write_end(struct file *file, struct address_space *mapping,
2487			loff_t pos, unsigned len, unsigned copied,
2488			struct page *page, void *fsdata)
2489{
 
2490	struct inode *inode = mapping->host;
2491
2492	if (pos + copied > inode->i_size)
2493		i_size_write(inode, pos + copied);
2494
2495	if (!PageUptodate(page)) {
2496		struct page *head = compound_head(page);
2497		if (PageTransCompound(page)) {
2498			int i;
2499
2500			for (i = 0; i < HPAGE_PMD_NR; i++) {
2501				if (head + i == page)
2502					continue;
2503				clear_highpage(head + i);
2504				flush_dcache_page(head + i);
2505			}
2506		}
2507		if (copied < PAGE_SIZE) {
2508			unsigned from = pos & (PAGE_SIZE - 1);
2509			zero_user_segments(page, 0, from,
2510					from + copied, PAGE_SIZE);
2511		}
2512		SetPageUptodate(head);
2513	}
2514	set_page_dirty(page);
2515	unlock_page(page);
2516	put_page(page);
2517
2518	return copied;
2519}
2520
2521static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2522{
2523	struct file *file = iocb->ki_filp;
2524	struct inode *inode = file_inode(file);
2525	struct address_space *mapping = inode->i_mapping;
2526	pgoff_t index;
2527	unsigned long offset;
2528	enum sgp_type sgp = SGP_READ;
2529	int error = 0;
2530	ssize_t retval = 0;
2531	loff_t *ppos = &iocb->ki_pos;
2532
2533	/*
2534	 * Might this read be for a stacking filesystem?  Then when reading
2535	 * holes of a sparse file, we actually need to allocate those pages,
2536	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2537	 */
2538	if (!iter_is_iovec(to))
2539		sgp = SGP_CACHE;
2540
2541	index = *ppos >> PAGE_SHIFT;
2542	offset = *ppos & ~PAGE_MASK;
2543
2544	for (;;) {
 
2545		struct page *page = NULL;
2546		pgoff_t end_index;
2547		unsigned long nr, ret;
2548		loff_t i_size = i_size_read(inode);
2549
2550		end_index = i_size >> PAGE_SHIFT;
2551		if (index > end_index)
2552			break;
2553		if (index == end_index) {
2554			nr = i_size & ~PAGE_MASK;
2555			if (nr <= offset)
2556				break;
2557		}
2558
2559		error = shmem_getpage(inode, index, &page, sgp);
2560		if (error) {
2561			if (error == -EINVAL)
2562				error = 0;
2563			break;
2564		}
2565		if (page) {
2566			if (sgp == SGP_CACHE)
2567				set_page_dirty(page);
2568			unlock_page(page);
 
 
 
 
 
2569		}
2570
2571		/*
2572		 * We must evaluate after, since reads (unlike writes)
2573		 * are called without i_mutex protection against truncate
2574		 */
2575		nr = PAGE_SIZE;
2576		i_size = i_size_read(inode);
2577		end_index = i_size >> PAGE_SHIFT;
2578		if (index == end_index) {
2579			nr = i_size & ~PAGE_MASK;
2580			if (nr <= offset) {
2581				if (page)
2582					put_page(page);
2583				break;
2584			}
2585		}
2586		nr -= offset;
2587
2588		if (page) {
2589			/*
2590			 * If users can be writing to this page using arbitrary
2591			 * virtual addresses, take care about potential aliasing
2592			 * before reading the page on the kernel side.
2593			 */
2594			if (mapping_writably_mapped(mapping))
2595				flush_dcache_page(page);
2596			/*
2597			 * Mark the page accessed if we read the beginning.
2598			 */
2599			if (!offset)
2600				mark_page_accessed(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2601		} else {
2602			page = ZERO_PAGE(0);
2603			get_page(page);
 
 
 
 
2604		}
2605
2606		/*
2607		 * Ok, we have the page, and it's up-to-date, so
2608		 * now we can copy it to user space...
2609		 */
2610		ret = copy_page_to_iter(page, offset, nr, to);
2611		retval += ret;
2612		offset += ret;
2613		index += offset >> PAGE_SHIFT;
2614		offset &= ~PAGE_MASK;
2615
2616		put_page(page);
2617		if (!iov_iter_count(to))
2618			break;
2619		if (ret < nr) {
2620			error = -EFAULT;
2621			break;
2622		}
2623		cond_resched();
2624	}
2625
2626	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2627	file_accessed(file);
2628	return retval ? retval : error;
2629}
2630
2631/*
2632 * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2633 */
2634static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2635				    pgoff_t index, pgoff_t end, int whence)
2636{
2637	struct page *page;
2638	struct pagevec pvec;
2639	pgoff_t indices[PAGEVEC_SIZE];
2640	bool done = false;
2641	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2642
2643	pagevec_init(&pvec);
2644	pvec.nr = 1;		/* start small: we may be there already */
2645	while (!done) {
2646		pvec.nr = find_get_entries(mapping, index,
2647					pvec.nr, pvec.pages, indices);
2648		if (!pvec.nr) {
2649			if (whence == SEEK_DATA)
2650				index = end;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2651			break;
2652		}
2653		for (i = 0; i < pvec.nr; i++, index++) {
2654			if (index < indices[i]) {
2655				if (whence == SEEK_HOLE) {
2656					done = true;
2657					break;
2658				}
2659				index = indices[i];
2660			}
2661			page = pvec.pages[i];
2662			if (page && !xa_is_value(page)) {
2663				if (!PageUptodate(page))
2664					page = NULL;
2665			}
2666			if (index >= end ||
2667			    (page && whence == SEEK_DATA) ||
2668			    (!page && whence == SEEK_HOLE)) {
2669				done = true;
2670				break;
2671			}
2672		}
2673		pagevec_remove_exceptionals(&pvec);
2674		pagevec_release(&pvec);
2675		pvec.nr = PAGEVEC_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2676		cond_resched();
2677	}
2678	return index;
 
 
 
 
 
2679}
2680
2681static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2682{
2683	struct address_space *mapping = file->f_mapping;
2684	struct inode *inode = mapping->host;
2685	pgoff_t start, end;
2686	loff_t new_offset;
2687
2688	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2689		return generic_file_llseek_size(file, offset, whence,
2690					MAX_LFS_FILESIZE, i_size_read(inode));
2691	inode_lock(inode);
2692	/* We're holding i_mutex so we can access i_size directly */
2693
2694	if (offset < 0 || offset >= inode->i_size)
2695		offset = -ENXIO;
2696	else {
2697		start = offset >> PAGE_SHIFT;
2698		end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2699		new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2700		new_offset <<= PAGE_SHIFT;
2701		if (new_offset > offset) {
2702			if (new_offset < inode->i_size)
2703				offset = new_offset;
2704			else if (whence == SEEK_DATA)
2705				offset = -ENXIO;
2706			else
2707				offset = inode->i_size;
2708		}
2709	}
2710
 
 
 
2711	if (offset >= 0)
2712		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2713	inode_unlock(inode);
2714	return offset;
2715}
2716
2717static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2718							 loff_t len)
2719{
2720	struct inode *inode = file_inode(file);
2721	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2722	struct shmem_inode_info *info = SHMEM_I(inode);
2723	struct shmem_falloc shmem_falloc;
2724	pgoff_t start, index, end;
2725	int error;
2726
2727	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2728		return -EOPNOTSUPP;
2729
2730	inode_lock(inode);
2731
2732	if (mode & FALLOC_FL_PUNCH_HOLE) {
2733		struct address_space *mapping = file->f_mapping;
2734		loff_t unmap_start = round_up(offset, PAGE_SIZE);
2735		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2736		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2737
2738		/* protected by i_mutex */
2739		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2740			error = -EPERM;
2741			goto out;
2742		}
2743
2744		shmem_falloc.waitq = &shmem_falloc_waitq;
2745		shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2746		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2747		spin_lock(&inode->i_lock);
2748		inode->i_private = &shmem_falloc;
2749		spin_unlock(&inode->i_lock);
2750
2751		if ((u64)unmap_end > (u64)unmap_start)
2752			unmap_mapping_range(mapping, unmap_start,
2753					    1 + unmap_end - unmap_start, 0);
2754		shmem_truncate_range(inode, offset, offset + len - 1);
2755		/* No need to unmap again: hole-punching leaves COWed pages */
2756
2757		spin_lock(&inode->i_lock);
2758		inode->i_private = NULL;
2759		wake_up_all(&shmem_falloc_waitq);
2760		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2761		spin_unlock(&inode->i_lock);
2762		error = 0;
2763		goto out;
2764	}
2765
2766	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2767	error = inode_newsize_ok(inode, offset + len);
2768	if (error)
2769		goto out;
2770
2771	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2772		error = -EPERM;
2773		goto out;
2774	}
2775
2776	start = offset >> PAGE_SHIFT;
2777	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2778	/* Try to avoid a swapstorm if len is impossible to satisfy */
2779	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2780		error = -ENOSPC;
2781		goto out;
2782	}
2783
2784	shmem_falloc.waitq = NULL;
2785	shmem_falloc.start = start;
2786	shmem_falloc.next  = start;
2787	shmem_falloc.nr_falloced = 0;
2788	shmem_falloc.nr_unswapped = 0;
2789	spin_lock(&inode->i_lock);
2790	inode->i_private = &shmem_falloc;
2791	spin_unlock(&inode->i_lock);
2792
2793	for (index = start; index < end; index++) {
2794		struct page *page;
 
 
 
 
 
 
 
 
 
2795
2796		/*
2797		 * Good, the fallocate(2) manpage permits EINTR: we may have
2798		 * been interrupted because we are using up too much memory.
2799		 */
2800		if (signal_pending(current))
2801			error = -EINTR;
2802		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2803			error = -ENOMEM;
2804		else
2805			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
 
2806		if (error) {
2807			/* Remove the !PageUptodate pages we added */
 
2808			if (index > start) {
2809				shmem_undo_range(inode,
2810				    (loff_t)start << PAGE_SHIFT,
2811				    ((loff_t)index << PAGE_SHIFT) - 1, true);
2812			}
2813			goto undone;
2814		}
2815
2816		/*
 
 
 
 
 
 
 
 
 
 
2817		 * Inform shmem_writepage() how far we have reached.
2818		 * No need for lock or barrier: we have the page lock.
2819		 */
2820		shmem_falloc.next++;
2821		if (!PageUptodate(page))
2822			shmem_falloc.nr_falloced++;
2823
2824		/*
2825		 * If !PageUptodate, leave it that way so that freeable pages
2826		 * can be recognized if we need to rollback on error later.
2827		 * But set_page_dirty so that memory pressure will swap rather
2828		 * than free the pages we are allocating (and SGP_CACHE pages
2829		 * might still be clean: we now need to mark those dirty too).
2830		 */
2831		set_page_dirty(page);
2832		unlock_page(page);
2833		put_page(page);
2834		cond_resched();
2835	}
2836
2837	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2838		i_size_write(inode, offset + len);
2839	inode->i_ctime = current_time(inode);
2840undone:
2841	spin_lock(&inode->i_lock);
2842	inode->i_private = NULL;
2843	spin_unlock(&inode->i_lock);
2844out:
 
 
2845	inode_unlock(inode);
2846	return error;
2847}
2848
2849static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2850{
2851	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2852
2853	buf->f_type = TMPFS_MAGIC;
2854	buf->f_bsize = PAGE_SIZE;
2855	buf->f_namelen = NAME_MAX;
2856	if (sbinfo->max_blocks) {
2857		buf->f_blocks = sbinfo->max_blocks;
2858		buf->f_bavail =
2859		buf->f_bfree  = sbinfo->max_blocks -
2860				percpu_counter_sum(&sbinfo->used_blocks);
2861	}
2862	if (sbinfo->max_inodes) {
2863		buf->f_files = sbinfo->max_inodes;
2864		buf->f_ffree = sbinfo->free_inodes;
2865	}
2866	/* else leave those fields 0 like simple_statfs */
 
 
 
2867	return 0;
2868}
2869
2870/*
2871 * File creation. Allocate an inode, and we're done..
2872 */
2873static int
2874shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 
2875{
2876	struct inode *inode;
2877	int error = -ENOSPC;
2878
2879	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2880	if (inode) {
2881		error = simple_acl_create(dir, inode);
2882		if (error)
2883			goto out_iput;
2884		error = security_inode_init_security(inode, dir,
2885						     &dentry->d_name,
2886						     shmem_initxattrs, NULL);
2887		if (error && error != -EOPNOTSUPP)
2888			goto out_iput;
2889
2890		error = 0;
2891		dir->i_size += BOGO_DIRENT_SIZE;
2892		dir->i_ctime = dir->i_mtime = current_time(dir);
2893		d_instantiate(dentry, inode);
2894		dget(dentry); /* Extra count - pin the dentry in core */
2895	}
 
 
 
 
 
 
 
 
 
 
 
2896	return error;
 
2897out_iput:
2898	iput(inode);
2899	return error;
2900}
2901
2902static int
2903shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
 
2904{
2905	struct inode *inode;
2906	int error = -ENOSPC;
2907
2908	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2909	if (inode) {
2910		error = security_inode_init_security(inode, dir,
2911						     NULL,
2912						     shmem_initxattrs, NULL);
2913		if (error && error != -EOPNOTSUPP)
2914			goto out_iput;
2915		error = simple_acl_create(dir, inode);
2916		if (error)
2917			goto out_iput;
2918		d_tmpfile(dentry, inode);
2919	}
2920	return error;
 
 
 
 
 
 
 
 
 
 
2921out_iput:
2922	iput(inode);
2923	return error;
2924}
2925
2926static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
2927{
2928	int error;
2929
2930	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
 
2931		return error;
2932	inc_nlink(dir);
2933	return 0;
2934}
2935
2936static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2937		bool excl)
2938{
2939	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2940}
2941
2942/*
2943 * Link a file..
2944 */
2945static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 
2946{
2947	struct inode *inode = d_inode(old_dentry);
2948	int ret = 0;
2949
2950	/*
2951	 * No ordinary (disk based) filesystem counts links as inodes;
2952	 * but each new link needs a new dentry, pinning lowmem, and
2953	 * tmpfs dentries cannot be pruned until they are unlinked.
2954	 * But if an O_TMPFILE file is linked into the tmpfs, the
2955	 * first link must skip that, to get the accounting right.
2956	 */
2957	if (inode->i_nlink) {
2958		ret = shmem_reserve_inode(inode->i_sb);
2959		if (ret)
2960			goto out;
2961	}
2962
 
 
 
 
 
 
 
2963	dir->i_size += BOGO_DIRENT_SIZE;
2964	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
 
 
2965	inc_nlink(inode);
2966	ihold(inode);	/* New dentry reference */
2967	dget(dentry);		/* Extra pinning count for the created dentry */
2968	d_instantiate(dentry, inode);
2969out:
2970	return ret;
2971}
2972
2973static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2974{
2975	struct inode *inode = d_inode(dentry);
2976
2977	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2978		shmem_free_inode(inode->i_sb);
 
 
2979
2980	dir->i_size -= BOGO_DIRENT_SIZE;
2981	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
 
 
2982	drop_nlink(inode);
2983	dput(dentry);	/* Undo the count from "create" - this does all the work */
2984	return 0;
2985}
2986
2987static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2988{
2989	if (!simple_empty(dentry))
2990		return -ENOTEMPTY;
2991
2992	drop_nlink(d_inode(dentry));
2993	drop_nlink(dir);
2994	return shmem_unlink(dir, dentry);
2995}
2996
2997static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2998{
2999	bool old_is_dir = d_is_dir(old_dentry);
3000	bool new_is_dir = d_is_dir(new_dentry);
3001
3002	if (old_dir != new_dir && old_is_dir != new_is_dir) {
3003		if (old_is_dir) {
3004			drop_nlink(old_dir);
3005			inc_nlink(new_dir);
3006		} else {
3007			drop_nlink(new_dir);
3008			inc_nlink(old_dir);
3009		}
3010	}
3011	old_dir->i_ctime = old_dir->i_mtime =
3012	new_dir->i_ctime = new_dir->i_mtime =
3013	d_inode(old_dentry)->i_ctime =
3014	d_inode(new_dentry)->i_ctime = current_time(old_dir);
3015
3016	return 0;
3017}
3018
3019static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
3020{
3021	struct dentry *whiteout;
3022	int error;
3023
3024	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3025	if (!whiteout)
3026		return -ENOMEM;
3027
3028	error = shmem_mknod(old_dir, whiteout,
3029			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3030	dput(whiteout);
3031	if (error)
3032		return error;
3033
3034	/*
3035	 * Cheat and hash the whiteout while the old dentry is still in
3036	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3037	 *
3038	 * d_lookup() will consistently find one of them at this point,
3039	 * not sure which one, but that isn't even important.
3040	 */
3041	d_rehash(whiteout);
3042	return 0;
3043}
3044
3045/*
3046 * The VFS layer already does all the dentry stuff for rename,
3047 * we just have to decrement the usage count for the target if
3048 * it exists so that the VFS layer correctly free's it when it
3049 * gets overwritten.
3050 */
3051static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
 
 
 
3052{
3053	struct inode *inode = d_inode(old_dentry);
3054	int they_are_dirs = S_ISDIR(inode->i_mode);
 
3055
3056	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3057		return -EINVAL;
3058
3059	if (flags & RENAME_EXCHANGE)
3060		return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
 
3061
3062	if (!simple_empty(new_dentry))
3063		return -ENOTEMPTY;
3064
3065	if (flags & RENAME_WHITEOUT) {
3066		int error;
3067
3068		error = shmem_whiteout(old_dir, old_dentry);
3069		if (error)
3070			return error;
3071	}
3072
 
 
 
 
 
3073	if (d_really_is_positive(new_dentry)) {
3074		(void) shmem_unlink(new_dir, new_dentry);
3075		if (they_are_dirs) {
3076			drop_nlink(d_inode(new_dentry));
3077			drop_nlink(old_dir);
3078		}
3079	} else if (they_are_dirs) {
3080		drop_nlink(old_dir);
3081		inc_nlink(new_dir);
3082	}
3083
3084	old_dir->i_size -= BOGO_DIRENT_SIZE;
3085	new_dir->i_size += BOGO_DIRENT_SIZE;
3086	old_dir->i_ctime = old_dir->i_mtime =
3087	new_dir->i_ctime = new_dir->i_mtime =
3088	inode->i_ctime = current_time(old_dir);
3089	return 0;
3090}
3091
3092static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
 
3093{
3094	int error;
3095	int len;
3096	struct inode *inode;
3097	struct page *page;
3098
3099	len = strlen(symname) + 1;
3100	if (len > PAGE_SIZE)
3101		return -ENAMETOOLONG;
3102
3103	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3104				VM_NORESERVE);
3105	if (!inode)
3106		return -ENOSPC;
3107
3108	error = security_inode_init_security(inode, dir, &dentry->d_name,
3109					     shmem_initxattrs, NULL);
3110	if (error) {
3111		if (error != -EOPNOTSUPP) {
3112			iput(inode);
3113			return error;
3114		}
3115		error = 0;
3116	}
3117
3118	inode->i_size = len-1;
3119	if (len <= SHORT_SYMLINK_LEN) {
3120		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3121		if (!inode->i_link) {
3122			iput(inode);
3123			return -ENOMEM;
3124		}
3125		inode->i_op = &shmem_short_symlink_operations;
3126	} else {
3127		inode_nohighmem(inode);
3128		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3129		if (error) {
3130			iput(inode);
3131			return error;
3132		}
3133		inode->i_mapping->a_ops = &shmem_aops;
3134		inode->i_op = &shmem_symlink_inode_operations;
3135		memcpy(page_address(page), symname, len);
3136		SetPageUptodate(page);
3137		set_page_dirty(page);
3138		unlock_page(page);
3139		put_page(page);
3140	}
3141	dir->i_size += BOGO_DIRENT_SIZE;
3142	dir->i_ctime = dir->i_mtime = current_time(dir);
 
3143	d_instantiate(dentry, inode);
3144	dget(dentry);
3145	return 0;
 
 
 
 
 
 
3146}
3147
3148static void shmem_put_link(void *arg)
3149{
3150	mark_page_accessed(arg);
3151	put_page(arg);
3152}
3153
3154static const char *shmem_get_link(struct dentry *dentry,
3155				  struct inode *inode,
3156				  struct delayed_call *done)
3157{
3158	struct page *page = NULL;
3159	int error;
 
3160	if (!dentry) {
3161		page = find_get_page(inode->i_mapping, 0);
3162		if (!page)
3163			return ERR_PTR(-ECHILD);
3164		if (!PageUptodate(page)) {
3165			put_page(page);
 
3166			return ERR_PTR(-ECHILD);
3167		}
3168	} else {
3169		error = shmem_getpage(inode, 0, &page, SGP_READ);
3170		if (error)
3171			return ERR_PTR(error);
3172		unlock_page(page);
 
 
 
 
 
 
 
3173	}
3174	set_delayed_call(done, shmem_put_link, page);
3175	return page_address(page);
3176}
3177
3178#ifdef CONFIG_TMPFS_XATTR
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3179/*
3180 * Superblocks without xattr inode operations may get some security.* xattr
3181 * support from the LSM "for free". As soon as we have any other xattrs
3182 * like ACLs, we also need to implement the security.* handlers at
3183 * filesystem level, though.
3184 */
3185
3186/*
3187 * Callback for security_inode_init_security() for acquiring xattrs.
3188 */
3189static int shmem_initxattrs(struct inode *inode,
3190			    const struct xattr *xattr_array,
3191			    void *fs_info)
3192{
3193	struct shmem_inode_info *info = SHMEM_I(inode);
 
3194	const struct xattr *xattr;
3195	struct simple_xattr *new_xattr;
 
3196	size_t len;
3197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3198	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3199		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3200		if (!new_xattr)
3201			return -ENOMEM;
3202
3203		len = strlen(xattr->name) + 1;
3204		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3205					  GFP_KERNEL);
3206		if (!new_xattr->name) {
3207			kfree(new_xattr);
3208			return -ENOMEM;
3209		}
3210
3211		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3212		       XATTR_SECURITY_PREFIX_LEN);
3213		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3214		       xattr->name, len);
3215
3216		simple_xattr_list_add(&info->xattrs, new_xattr);
 
 
 
 
 
 
 
 
 
 
3217	}
3218
3219	return 0;
3220}
3221
3222static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3223				   struct dentry *unused, struct inode *inode,
3224				   const char *name, void *buffer, size_t size)
3225{
3226	struct shmem_inode_info *info = SHMEM_I(inode);
3227
3228	name = xattr_full_name(handler, name);
3229	return simple_xattr_get(&info->xattrs, name, buffer, size);
3230}
3231
3232static int shmem_xattr_handler_set(const struct xattr_handler *handler,
 
3233				   struct dentry *unused, struct inode *inode,
3234				   const char *name, const void *value,
3235				   size_t size, int flags)
3236{
3237	struct shmem_inode_info *info = SHMEM_I(inode);
 
 
 
3238
3239	name = xattr_full_name(handler, name);
3240	return simple_xattr_set(&info->xattrs, name, value, size, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3241}
3242
3243static const struct xattr_handler shmem_security_xattr_handler = {
3244	.prefix = XATTR_SECURITY_PREFIX,
3245	.get = shmem_xattr_handler_get,
3246	.set = shmem_xattr_handler_set,
3247};
3248
3249static const struct xattr_handler shmem_trusted_xattr_handler = {
3250	.prefix = XATTR_TRUSTED_PREFIX,
3251	.get = shmem_xattr_handler_get,
3252	.set = shmem_xattr_handler_set,
3253};
3254
3255static const struct xattr_handler *shmem_xattr_handlers[] = {
3256#ifdef CONFIG_TMPFS_POSIX_ACL
3257	&posix_acl_access_xattr_handler,
3258	&posix_acl_default_xattr_handler,
3259#endif
 
 
3260	&shmem_security_xattr_handler,
3261	&shmem_trusted_xattr_handler,
 
3262	NULL
3263};
3264
3265static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3266{
3267	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3268	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3269}
3270#endif /* CONFIG_TMPFS_XATTR */
3271
3272static const struct inode_operations shmem_short_symlink_operations = {
 
 
3273	.get_link	= simple_get_link,
3274#ifdef CONFIG_TMPFS_XATTR
3275	.listxattr	= shmem_listxattr,
3276#endif
3277};
3278
3279static const struct inode_operations shmem_symlink_inode_operations = {
 
 
3280	.get_link	= shmem_get_link,
3281#ifdef CONFIG_TMPFS_XATTR
3282	.listxattr	= shmem_listxattr,
3283#endif
3284};
3285
3286static struct dentry *shmem_get_parent(struct dentry *child)
3287{
3288	return ERR_PTR(-ESTALE);
3289}
3290
3291static int shmem_match(struct inode *ino, void *vfh)
3292{
3293	__u32 *fh = vfh;
3294	__u64 inum = fh[2];
3295	inum = (inum << 32) | fh[1];
3296	return ino->i_ino == inum && fh[0] == ino->i_generation;
3297}
3298
3299/* Find any alias of inode, but prefer a hashed alias */
3300static struct dentry *shmem_find_alias(struct inode *inode)
3301{
3302	struct dentry *alias = d_find_alias(inode);
3303
3304	return alias ?: d_find_any_alias(inode);
3305}
3306
3307
3308static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3309		struct fid *fid, int fh_len, int fh_type)
3310{
3311	struct inode *inode;
3312	struct dentry *dentry = NULL;
3313	u64 inum;
3314
3315	if (fh_len < 3)
3316		return NULL;
3317
3318	inum = fid->raw[2];
3319	inum = (inum << 32) | fid->raw[1];
3320
3321	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3322			shmem_match, fid->raw);
3323	if (inode) {
3324		dentry = shmem_find_alias(inode);
3325		iput(inode);
3326	}
3327
3328	return dentry;
3329}
3330
3331static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3332				struct inode *parent)
3333{
3334	if (*len < 3) {
3335		*len = 3;
3336		return FILEID_INVALID;
3337	}
3338
3339	if (inode_unhashed(inode)) {
3340		/* Unfortunately insert_inode_hash is not idempotent,
3341		 * so as we hash inodes here rather than at creation
3342		 * time, we need a lock to ensure we only try
3343		 * to do it once
3344		 */
3345		static DEFINE_SPINLOCK(lock);
3346		spin_lock(&lock);
3347		if (inode_unhashed(inode))
3348			__insert_inode_hash(inode,
3349					    inode->i_ino + inode->i_generation);
3350		spin_unlock(&lock);
3351	}
3352
3353	fh[0] = inode->i_generation;
3354	fh[1] = inode->i_ino;
3355	fh[2] = ((__u64)inode->i_ino) >> 32;
3356
3357	*len = 3;
3358	return 1;
3359}
3360
3361static const struct export_operations shmem_export_ops = {
3362	.get_parent     = shmem_get_parent,
3363	.encode_fh      = shmem_encode_fh,
3364	.fh_to_dentry	= shmem_fh_to_dentry,
3365};
3366
3367enum shmem_param {
3368	Opt_gid,
3369	Opt_huge,
3370	Opt_mode,
3371	Opt_mpol,
3372	Opt_nr_blocks,
3373	Opt_nr_inodes,
3374	Opt_size,
3375	Opt_uid,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3376};
3377
3378static const struct fs_parameter_spec shmem_param_specs[] = {
3379	fsparam_u32   ("gid",		Opt_gid),
3380	fsparam_enum  ("huge",		Opt_huge),
3381	fsparam_u32oct("mode",		Opt_mode),
3382	fsparam_string("mpol",		Opt_mpol),
3383	fsparam_string("nr_blocks",	Opt_nr_blocks),
3384	fsparam_string("nr_inodes",	Opt_nr_inodes),
3385	fsparam_string("size",		Opt_size),
3386	fsparam_u32   ("uid",		Opt_uid),
 
 
 
 
 
 
 
 
 
 
 
 
3387	{}
3388};
3389
3390static const struct fs_parameter_enum shmem_param_enums[] = {
3391	{ Opt_huge,	"never",	SHMEM_HUGE_NEVER },
3392	{ Opt_huge,	"always",	SHMEM_HUGE_ALWAYS },
3393	{ Opt_huge,	"within_size",	SHMEM_HUGE_WITHIN_SIZE },
3394	{ Opt_huge,	"advise",	SHMEM_HUGE_ADVISE },
3395	{}
3396};
3397
3398const struct fs_parameter_description shmem_fs_parameters = {
3399	.name		= "tmpfs",
3400	.specs		= shmem_param_specs,
3401	.enums		= shmem_param_enums,
3402};
3403
3404static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3405{
3406	struct shmem_options *ctx = fc->fs_private;
3407	struct fs_parse_result result;
3408	unsigned long long size;
3409	char *rest;
3410	int opt;
 
 
3411
3412	opt = fs_parse(fc, &shmem_fs_parameters, param, &result);
3413	if (opt < 0)
3414		return opt;
3415
3416	switch (opt) {
3417	case Opt_size:
3418		size = memparse(param->string, &rest);
3419		if (*rest == '%') {
3420			size <<= PAGE_SHIFT;
3421			size *= totalram_pages();
3422			do_div(size, 100);
3423			rest++;
3424		}
3425		if (*rest)
3426			goto bad_value;
3427		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3428		ctx->seen |= SHMEM_SEEN_BLOCKS;
3429		break;
3430	case Opt_nr_blocks:
3431		ctx->blocks = memparse(param->string, &rest);
3432		if (*rest)
3433			goto bad_value;
3434		ctx->seen |= SHMEM_SEEN_BLOCKS;
3435		break;
3436	case Opt_nr_inodes:
3437		ctx->inodes = memparse(param->string, &rest);
3438		if (*rest)
3439			goto bad_value;
3440		ctx->seen |= SHMEM_SEEN_INODES;
3441		break;
3442	case Opt_mode:
3443		ctx->mode = result.uint_32 & 07777;
3444		break;
3445	case Opt_uid:
3446		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3447		if (!uid_valid(ctx->uid))
3448			goto bad_value;
 
 
 
 
 
 
 
 
 
3449		break;
3450	case Opt_gid:
3451		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3452		if (!gid_valid(ctx->gid))
3453			goto bad_value;
 
 
 
 
 
 
 
 
 
3454		break;
3455	case Opt_huge:
3456		ctx->huge = result.uint_32;
3457		if (ctx->huge != SHMEM_HUGE_NEVER &&
3458		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
3459		      has_transparent_hugepage()))
3460			goto unsupported_parameter;
3461		ctx->seen |= SHMEM_SEEN_HUGE;
3462		break;
3463	case Opt_mpol:
3464		if (IS_ENABLED(CONFIG_NUMA)) {
3465			mpol_put(ctx->mpol);
3466			ctx->mpol = NULL;
3467			if (mpol_parse_str(param->string, &ctx->mpol))
3468				goto bad_value;
3469			break;
3470		}
3471		goto unsupported_parameter;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3472	}
3473	return 0;
3474
3475unsupported_parameter:
3476	return invalf(fc, "tmpfs: Unsupported parameter '%s'", param->key);
3477bad_value:
3478	return invalf(fc, "tmpfs: Bad value for '%s'", param->key);
3479}
3480
3481static int shmem_parse_options(struct fs_context *fc, void *data)
3482{
3483	char *options = data;
3484
3485	if (options) {
3486		int err = security_sb_eat_lsm_opts(options, &fc->security);
3487		if (err)
3488			return err;
3489	}
3490
3491	while (options != NULL) {
3492		char *this_char = options;
3493		for (;;) {
3494			/*
3495			 * NUL-terminate this option: unfortunately,
3496			 * mount options form a comma-separated list,
3497			 * but mpol's nodelist may also contain commas.
3498			 */
3499			options = strchr(options, ',');
3500			if (options == NULL)
3501				break;
3502			options++;
3503			if (!isdigit(*options)) {
3504				options[-1] = '\0';
3505				break;
3506			}
3507		}
3508		if (*this_char) {
3509			char *value = strchr(this_char,'=');
3510			size_t len = 0;
3511			int err;
3512
3513			if (value) {
3514				*value++ = '\0';
3515				len = strlen(value);
3516			}
3517			err = vfs_parse_fs_string(fc, this_char, value, len);
3518			if (err < 0)
3519				return err;
3520		}
3521	}
3522	return 0;
3523}
3524
3525/*
3526 * Reconfigure a shmem filesystem.
3527 *
3528 * Note that we disallow change from limited->unlimited blocks/inodes while any
3529 * are in use; but we must separately disallow unlimited->limited, because in
3530 * that case we have no record of how much is already in use.
3531 */
3532static int shmem_reconfigure(struct fs_context *fc)
3533{
3534	struct shmem_options *ctx = fc->fs_private;
3535	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3536	unsigned long inodes;
 
3537	const char *err;
3538
3539	spin_lock(&sbinfo->stat_lock);
3540	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
 
3541	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3542		if (!sbinfo->max_blocks) {
3543			err = "Cannot retroactively limit size";
3544			goto out;
3545		}
3546		if (percpu_counter_compare(&sbinfo->used_blocks,
3547					   ctx->blocks) > 0) {
3548			err = "Too small a size for current use";
3549			goto out;
3550		}
3551	}
3552	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3553		if (!sbinfo->max_inodes) {
3554			err = "Cannot retroactively limit inodes";
3555			goto out;
3556		}
3557		if (ctx->inodes < inodes) {
3558			err = "Too few inodes for current use";
3559			goto out;
3560		}
3561	}
3562
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3563	if (ctx->seen & SHMEM_SEEN_HUGE)
3564		sbinfo->huge = ctx->huge;
 
 
3565	if (ctx->seen & SHMEM_SEEN_BLOCKS)
3566		sbinfo->max_blocks  = ctx->blocks;
3567	if (ctx->seen & SHMEM_SEEN_INODES) {
3568		sbinfo->max_inodes  = ctx->inodes;
3569		sbinfo->free_inodes = ctx->inodes - inodes;
3570	}
3571
3572	/*
3573	 * Preserve previous mempolicy unless mpol remount option was specified.
3574	 */
3575	if (ctx->mpol) {
3576		mpol_put(sbinfo->mpol);
3577		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
3578		ctx->mpol = NULL;
3579	}
3580	spin_unlock(&sbinfo->stat_lock);
 
 
 
 
 
3581	return 0;
3582out:
3583	spin_unlock(&sbinfo->stat_lock);
3584	return invalf(fc, "tmpfs: %s", err);
3585}
3586
3587static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3588{
3589	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
 
3590
3591	if (sbinfo->max_blocks != shmem_default_max_blocks())
3592		seq_printf(seq, ",size=%luk",
3593			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3594	if (sbinfo->max_inodes != shmem_default_max_inodes())
3595		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3596	if (sbinfo->mode != (0777 | S_ISVTX))
3597		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3598	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3599		seq_printf(seq, ",uid=%u",
3600				from_kuid_munged(&init_user_ns, sbinfo->uid));
3601	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3602		seq_printf(seq, ",gid=%u",
3603				from_kgid_munged(&init_user_ns, sbinfo->gid));
3604#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3605	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3606	if (sbinfo->huge)
3607		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3608#endif
3609	shmem_show_mpol(seq, sbinfo->mpol);
 
 
 
 
3610	return 0;
3611}
3612
3613#endif /* CONFIG_TMPFS */
3614
3615static void shmem_put_super(struct super_block *sb)
3616{
3617	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3618
 
 
 
 
3619	percpu_counter_destroy(&sbinfo->used_blocks);
3620	mpol_put(sbinfo->mpol);
3621	kfree(sbinfo);
3622	sb->s_fs_info = NULL;
3623}
3624
3625static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3626{
3627	struct shmem_options *ctx = fc->fs_private;
3628	struct inode *inode;
3629	struct shmem_sb_info *sbinfo;
3630	int err = -ENOMEM;
3631
3632	/* Round up to L1_CACHE_BYTES to resist false sharing */
3633	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3634				L1_CACHE_BYTES), GFP_KERNEL);
3635	if (!sbinfo)
3636		return -ENOMEM;
3637
3638	sb->s_fs_info = sbinfo;
3639
3640#ifdef CONFIG_TMPFS
3641	/*
3642	 * Per default we only allow half of the physical ram per
3643	 * tmpfs instance, limiting inodes to one per page of lowmem;
3644	 * but the internal instance is left unlimited.
3645	 */
3646	if (!(sb->s_flags & SB_KERNMOUNT)) {
3647		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3648			ctx->blocks = shmem_default_max_blocks();
3649		if (!(ctx->seen & SHMEM_SEEN_INODES))
3650			ctx->inodes = shmem_default_max_inodes();
 
 
 
3651	} else {
3652		sb->s_flags |= SB_NOUSER;
3653	}
3654	sb->s_export_op = &shmem_export_ops;
3655	sb->s_flags |= SB_NOSEC;
3656#else
3657	sb->s_flags |= SB_NOUSER;
3658#endif
3659	sbinfo->max_blocks = ctx->blocks;
3660	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
 
 
 
 
 
 
3661	sbinfo->uid = ctx->uid;
3662	sbinfo->gid = ctx->gid;
 
3663	sbinfo->mode = ctx->mode;
3664	sbinfo->huge = ctx->huge;
3665	sbinfo->mpol = ctx->mpol;
3666	ctx->mpol = NULL;
3667
3668	spin_lock_init(&sbinfo->stat_lock);
3669	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3670		goto failed;
3671	spin_lock_init(&sbinfo->shrinklist_lock);
3672	INIT_LIST_HEAD(&sbinfo->shrinklist);
3673
3674	sb->s_maxbytes = MAX_LFS_FILESIZE;
3675	sb->s_blocksize = PAGE_SIZE;
3676	sb->s_blocksize_bits = PAGE_SHIFT;
3677	sb->s_magic = TMPFS_MAGIC;
3678	sb->s_op = &shmem_ops;
3679	sb->s_time_gran = 1;
3680#ifdef CONFIG_TMPFS_XATTR
3681	sb->s_xattr = shmem_xattr_handlers;
3682#endif
3683#ifdef CONFIG_TMPFS_POSIX_ACL
3684	sb->s_flags |= SB_POSIXACL;
3685#endif
3686	uuid_gen(&sb->s_uuid);
3687
3688	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3689	if (!inode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3690		goto failed;
 
3691	inode->i_uid = sbinfo->uid;
3692	inode->i_gid = sbinfo->gid;
3693	sb->s_root = d_make_root(inode);
3694	if (!sb->s_root)
3695		goto failed;
3696	return 0;
3697
3698failed:
3699	shmem_put_super(sb);
3700	return err;
3701}
3702
3703static int shmem_get_tree(struct fs_context *fc)
3704{
3705	return get_tree_nodev(fc, shmem_fill_super);
3706}
3707
3708static void shmem_free_fc(struct fs_context *fc)
3709{
3710	struct shmem_options *ctx = fc->fs_private;
3711
3712	if (ctx) {
3713		mpol_put(ctx->mpol);
3714		kfree(ctx);
3715	}
3716}
3717
3718static const struct fs_context_operations shmem_fs_context_ops = {
3719	.free			= shmem_free_fc,
3720	.get_tree		= shmem_get_tree,
3721#ifdef CONFIG_TMPFS
3722	.parse_monolithic	= shmem_parse_options,
3723	.parse_param		= shmem_parse_one,
3724	.reconfigure		= shmem_reconfigure,
3725#endif
3726};
3727
3728static struct kmem_cache *shmem_inode_cachep;
3729
3730static struct inode *shmem_alloc_inode(struct super_block *sb)
3731{
3732	struct shmem_inode_info *info;
3733	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3734	if (!info)
3735		return NULL;
3736	return &info->vfs_inode;
3737}
3738
3739static void shmem_free_in_core_inode(struct inode *inode)
3740{
3741	if (S_ISLNK(inode->i_mode))
3742		kfree(inode->i_link);
3743	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3744}
3745
3746static void shmem_destroy_inode(struct inode *inode)
3747{
3748	if (S_ISREG(inode->i_mode))
3749		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
 
 
3750}
3751
3752static void shmem_init_inode(void *foo)
3753{
3754	struct shmem_inode_info *info = foo;
3755	inode_init_once(&info->vfs_inode);
3756}
3757
3758static void shmem_init_inodecache(void)
3759{
3760	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3761				sizeof(struct shmem_inode_info),
3762				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3763}
3764
3765static void shmem_destroy_inodecache(void)
3766{
3767	kmem_cache_destroy(shmem_inode_cachep);
3768}
3769
3770static const struct address_space_operations shmem_aops = {
 
 
 
 
 
 
 
3771	.writepage	= shmem_writepage,
3772	.set_page_dirty	= __set_page_dirty_no_writeback,
3773#ifdef CONFIG_TMPFS
3774	.write_begin	= shmem_write_begin,
3775	.write_end	= shmem_write_end,
3776#endif
3777#ifdef CONFIG_MIGRATION
3778	.migratepage	= migrate_page,
3779#endif
3780	.error_remove_page = generic_error_remove_page,
3781};
 
3782
3783static const struct file_operations shmem_file_operations = {
3784	.mmap		= shmem_mmap,
 
3785	.get_unmapped_area = shmem_get_unmapped_area,
3786#ifdef CONFIG_TMPFS
3787	.llseek		= shmem_file_llseek,
3788	.read_iter	= shmem_file_read_iter,
3789	.write_iter	= generic_file_write_iter,
3790	.fsync		= noop_fsync,
3791	.splice_read	= generic_file_splice_read,
3792	.splice_write	= iter_file_splice_write,
3793	.fallocate	= shmem_fallocate,
3794#endif
3795};
3796
3797static const struct inode_operations shmem_inode_operations = {
3798	.getattr	= shmem_getattr,
3799	.setattr	= shmem_setattr,
3800#ifdef CONFIG_TMPFS_XATTR
3801	.listxattr	= shmem_listxattr,
3802	.set_acl	= simple_set_acl,
 
 
3803#endif
3804};
3805
3806static const struct inode_operations shmem_dir_inode_operations = {
3807#ifdef CONFIG_TMPFS
 
3808	.create		= shmem_create,
3809	.lookup		= simple_lookup,
3810	.link		= shmem_link,
3811	.unlink		= shmem_unlink,
3812	.symlink	= shmem_symlink,
3813	.mkdir		= shmem_mkdir,
3814	.rmdir		= shmem_rmdir,
3815	.mknod		= shmem_mknod,
3816	.rename		= shmem_rename2,
3817	.tmpfile	= shmem_tmpfile,
 
3818#endif
3819#ifdef CONFIG_TMPFS_XATTR
3820	.listxattr	= shmem_listxattr,
 
 
3821#endif
3822#ifdef CONFIG_TMPFS_POSIX_ACL
3823	.setattr	= shmem_setattr,
3824	.set_acl	= simple_set_acl,
3825#endif
3826};
3827
3828static const struct inode_operations shmem_special_inode_operations = {
 
3829#ifdef CONFIG_TMPFS_XATTR
3830	.listxattr	= shmem_listxattr,
3831#endif
3832#ifdef CONFIG_TMPFS_POSIX_ACL
3833	.setattr	= shmem_setattr,
3834	.set_acl	= simple_set_acl,
3835#endif
3836};
3837
3838static const struct super_operations shmem_ops = {
3839	.alloc_inode	= shmem_alloc_inode,
3840	.free_inode	= shmem_free_in_core_inode,
3841	.destroy_inode	= shmem_destroy_inode,
3842#ifdef CONFIG_TMPFS
3843	.statfs		= shmem_statfs,
3844	.show_options	= shmem_show_options,
3845#endif
 
 
 
3846	.evict_inode	= shmem_evict_inode,
3847	.drop_inode	= generic_delete_inode,
3848	.put_super	= shmem_put_super,
3849#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3850	.nr_cached_objects	= shmem_unused_huge_count,
3851	.free_cached_objects	= shmem_unused_huge_scan,
3852#endif
3853};
3854
3855static const struct vm_operations_struct shmem_vm_ops = {
3856	.fault		= shmem_fault,
3857	.map_pages	= filemap_map_pages,
3858#ifdef CONFIG_NUMA
3859	.set_policy     = shmem_set_policy,
3860	.get_policy     = shmem_get_policy,
3861#endif
3862};
3863
 
 
 
 
 
 
 
 
 
3864int shmem_init_fs_context(struct fs_context *fc)
3865{
3866	struct shmem_options *ctx;
3867
3868	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3869	if (!ctx)
3870		return -ENOMEM;
3871
3872	ctx->mode = 0777 | S_ISVTX;
3873	ctx->uid = current_fsuid();
3874	ctx->gid = current_fsgid();
3875
3876	fc->fs_private = ctx;
3877	fc->ops = &shmem_fs_context_ops;
3878	return 0;
3879}
3880
3881static struct file_system_type shmem_fs_type = {
3882	.owner		= THIS_MODULE,
3883	.name		= "tmpfs",
3884	.init_fs_context = shmem_init_fs_context,
3885#ifdef CONFIG_TMPFS
3886	.parameters	= &shmem_fs_parameters,
3887#endif
3888	.kill_sb	= kill_litter_super,
3889	.fs_flags	= FS_USERNS_MOUNT,
3890};
3891
3892int __init shmem_init(void)
3893{
3894	int error;
3895
3896	shmem_init_inodecache();
3897
 
 
 
 
 
 
 
 
3898	error = register_filesystem(&shmem_fs_type);
3899	if (error) {
3900		pr_err("Could not register tmpfs\n");
3901		goto out2;
3902	}
3903
3904	shm_mnt = kern_mount(&shmem_fs_type);
3905	if (IS_ERR(shm_mnt)) {
3906		error = PTR_ERR(shm_mnt);
3907		pr_err("Could not kern_mount tmpfs\n");
3908		goto out1;
3909	}
3910
3911#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3912	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3913		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3914	else
3915		shmem_huge = 0; /* just in case it was patched */
3916#endif
3917	return 0;
3918
3919out1:
3920	unregister_filesystem(&shmem_fs_type);
3921out2:
 
 
 
 
3922	shmem_destroy_inodecache();
3923	shm_mnt = ERR_PTR(error);
3924	return error;
3925}
3926
3927#if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
3928static ssize_t shmem_enabled_show(struct kobject *kobj,
3929		struct kobj_attribute *attr, char *buf)
3930{
3931	int values[] = {
3932		SHMEM_HUGE_ALWAYS,
3933		SHMEM_HUGE_WITHIN_SIZE,
3934		SHMEM_HUGE_ADVISE,
3935		SHMEM_HUGE_NEVER,
3936		SHMEM_HUGE_DENY,
3937		SHMEM_HUGE_FORCE,
3938	};
3939	int i, count;
3940
3941	for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
3942		const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
3943
3944		count += sprintf(buf + count, fmt,
3945				shmem_format_huge(values[i]));
 
 
3946	}
3947	buf[count - 1] = '\n';
3948	return count;
 
3949}
3950
3951static ssize_t shmem_enabled_store(struct kobject *kobj,
3952		struct kobj_attribute *attr, const char *buf, size_t count)
3953{
3954	char tmp[16];
3955	int huge;
3956
3957	if (count + 1 > sizeof(tmp))
3958		return -EINVAL;
3959	memcpy(tmp, buf, count);
3960	tmp[count] = '\0';
3961	if (count && tmp[count - 1] == '\n')
3962		tmp[count - 1] = '\0';
3963
3964	huge = shmem_parse_huge(tmp);
3965	if (huge == -EINVAL)
3966		return -EINVAL;
3967	if (!has_transparent_hugepage() &&
3968			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3969		return -EINVAL;
3970
3971	shmem_huge = huge;
3972	if (shmem_huge > SHMEM_HUGE_DENY)
3973		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3974	return count;
3975}
3976
3977struct kobj_attribute shmem_enabled_attr =
3978	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3979#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3980
3981#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3982bool shmem_huge_enabled(struct vm_area_struct *vma)
3983{
3984	struct inode *inode = file_inode(vma->vm_file);
3985	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3986	loff_t i_size;
3987	pgoff_t off;
3988
3989	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
3990	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
3991		return false;
3992	if (shmem_huge == SHMEM_HUGE_FORCE)
3993		return true;
3994	if (shmem_huge == SHMEM_HUGE_DENY)
3995		return false;
3996	switch (sbinfo->huge) {
3997		case SHMEM_HUGE_NEVER:
3998			return false;
3999		case SHMEM_HUGE_ALWAYS:
4000			return true;
4001		case SHMEM_HUGE_WITHIN_SIZE:
4002			off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4003			i_size = round_up(i_size_read(inode), PAGE_SIZE);
4004			if (i_size >= HPAGE_PMD_SIZE &&
4005					i_size >> PAGE_SHIFT >= off)
4006				return true;
4007			/* fall through */
4008		case SHMEM_HUGE_ADVISE:
4009			/* TODO: implement fadvise() hints */
4010			return (vma->vm_flags & VM_HUGEPAGE);
4011		default:
4012			VM_BUG_ON(1);
4013			return false;
4014	}
4015}
4016#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
4017
4018#else /* !CONFIG_SHMEM */
4019
4020/*
4021 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4022 *
4023 * This is intended for small system where the benefits of the full
4024 * shmem code (swap-backed and resource-limited) are outweighed by
4025 * their complexity. On systems without swap this code should be
4026 * effectively equivalent, but much lighter weight.
4027 */
4028
4029static struct file_system_type shmem_fs_type = {
4030	.name		= "tmpfs",
4031	.init_fs_context = ramfs_init_fs_context,
4032	.parameters	= &ramfs_fs_parameters,
4033	.kill_sb	= kill_litter_super,
4034	.fs_flags	= FS_USERNS_MOUNT,
4035};
4036
4037int __init shmem_init(void)
4038{
4039	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4040
4041	shm_mnt = kern_mount(&shmem_fs_type);
4042	BUG_ON(IS_ERR(shm_mnt));
4043
4044	return 0;
4045}
4046
4047int shmem_unuse(unsigned int type, bool frontswap,
4048		unsigned long *fs_pages_to_unuse)
4049{
4050	return 0;
4051}
4052
4053int shmem_lock(struct file *file, int lock, struct user_struct *user)
4054{
4055	return 0;
4056}
4057
4058void shmem_unlock_mapping(struct address_space *mapping)
4059{
4060}
4061
4062#ifdef CONFIG_MMU
4063unsigned long shmem_get_unmapped_area(struct file *file,
4064				      unsigned long addr, unsigned long len,
4065				      unsigned long pgoff, unsigned long flags)
4066{
4067	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4068}
4069#endif
4070
4071void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4072{
4073	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4074}
4075EXPORT_SYMBOL_GPL(shmem_truncate_range);
4076
4077#define shmem_vm_ops				generic_file_vm_ops
 
4078#define shmem_file_operations			ramfs_file_operations
4079#define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
4080#define shmem_acct_size(flags, size)		0
4081#define shmem_unacct_size(flags, size)		do {} while (0)
4082
 
 
 
 
 
 
 
 
4083#endif /* CONFIG_SHMEM */
4084
4085/* common code */
4086
4087static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4088				       unsigned long flags, unsigned int i_flags)
4089{
4090	struct inode *inode;
4091	struct file *res;
4092
4093	if (IS_ERR(mnt))
4094		return ERR_CAST(mnt);
4095
4096	if (size < 0 || size > MAX_LFS_FILESIZE)
4097		return ERR_PTR(-EINVAL);
4098
4099	if (shmem_acct_size(flags, size))
4100		return ERR_PTR(-ENOMEM);
4101
4102	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4103				flags);
4104	if (unlikely(!inode)) {
 
 
 
4105		shmem_unacct_size(flags, size);
4106		return ERR_PTR(-ENOSPC);
4107	}
4108	inode->i_flags |= i_flags;
4109	inode->i_size = size;
4110	clear_nlink(inode);	/* It is unlinked */
4111	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4112	if (!IS_ERR(res))
4113		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4114				&shmem_file_operations);
4115	if (IS_ERR(res))
4116		iput(inode);
4117	return res;
4118}
4119
4120/**
4121 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4122 * 	kernel internal.  There will be NO LSM permission checks against the
4123 * 	underlying inode.  So users of this interface must do LSM checks at a
4124 *	higher layer.  The users are the big_key and shm implementations.  LSM
4125 *	checks are provided at the key or shm level rather than the inode.
4126 * @name: name for dentry (to be seen in /proc/<pid>/maps
4127 * @size: size to be set for the file
4128 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4129 */
4130struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4131{
4132	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4133}
4134
4135/**
4136 * shmem_file_setup - get an unlinked file living in tmpfs
4137 * @name: name for dentry (to be seen in /proc/<pid>/maps
4138 * @size: size to be set for the file
4139 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4140 */
4141struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4142{
4143	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4144}
4145EXPORT_SYMBOL_GPL(shmem_file_setup);
4146
4147/**
4148 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4149 * @mnt: the tmpfs mount where the file will be created
4150 * @name: name for dentry (to be seen in /proc/<pid>/maps
4151 * @size: size to be set for the file
4152 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4153 */
4154struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4155				       loff_t size, unsigned long flags)
4156{
4157	return __shmem_file_setup(mnt, name, size, flags, 0);
4158}
4159EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4160
4161/**
4162 * shmem_zero_setup - setup a shared anonymous mapping
4163 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
4164 */
4165int shmem_zero_setup(struct vm_area_struct *vma)
4166{
4167	struct file *file;
4168	loff_t size = vma->vm_end - vma->vm_start;
4169
4170	/*
4171	 * Cloning a new file under mmap_sem leads to a lock ordering conflict
4172	 * between XFS directory reading and selinux: since this file is only
4173	 * accessible to the user through its mapping, use S_PRIVATE flag to
4174	 * bypass file security, in the same way as shmem_kernel_file_setup().
4175	 */
4176	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4177	if (IS_ERR(file))
4178		return PTR_ERR(file);
4179
4180	if (vma->vm_file)
4181		fput(vma->vm_file);
4182	vma->vm_file = file;
4183	vma->vm_ops = &shmem_vm_ops;
4184
4185	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
4186			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4187			(vma->vm_end & HPAGE_PMD_MASK)) {
4188		khugepaged_enter(vma, vma->vm_flags);
4189	}
4190
4191	return 0;
4192}
4193
4194/**
4195 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4196 * @mapping:	the page's address_space
4197 * @index:	the page index
4198 * @gfp:	the page allocator flags to use if allocating
4199 *
4200 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4201 * with any new page allocations done using the specified allocation flags.
4202 * But read_cache_page_gfp() uses the ->readpage() method: which does not
4203 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4204 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4205 *
4206 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4207 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4208 */
4209struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4210					 pgoff_t index, gfp_t gfp)
4211{
4212#ifdef CONFIG_SHMEM
4213	struct inode *inode = mapping->host;
4214	struct page *page;
4215	int error;
4216
4217	BUG_ON(mapping->a_ops != &shmem_aops);
4218	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4219				  gfp, NULL, NULL, NULL);
4220	if (error)
4221		page = ERR_PTR(error);
4222	else
4223		unlock_page(page);
4224	return page;
4225#else
4226	/*
4227	 * The tiny !SHMEM case uses ramfs without swap
4228	 */
4229	return read_cache_page_gfp(mapping, index, gfp);
4230#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4231}
4232EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
v6.8
   1/*
   2 * Resizable virtual memory filesystem for Linux.
   3 *
   4 * Copyright (C) 2000 Linus Torvalds.
   5 *		 2000 Transmeta Corp.
   6 *		 2000-2001 Christoph Rohland
   7 *		 2000-2001 SAP AG
   8 *		 2002 Red Hat Inc.
   9 * Copyright (C) 2002-2011 Hugh Dickins.
  10 * Copyright (C) 2011 Google Inc.
  11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
  12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
  13 *
  14 * Extended attribute support for tmpfs:
  15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
  16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
  17 *
  18 * tiny-shmem:
  19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
  20 *
  21 * This file is released under the GPL.
  22 */
  23
  24#include <linux/fs.h>
  25#include <linux/init.h>
  26#include <linux/vfs.h>
  27#include <linux/mount.h>
  28#include <linux/ramfs.h>
  29#include <linux/pagemap.h>
  30#include <linux/file.h>
  31#include <linux/fileattr.h>
  32#include <linux/mm.h>
  33#include <linux/random.h>
  34#include <linux/sched/signal.h>
  35#include <linux/export.h>
  36#include <linux/shmem_fs.h>
  37#include <linux/swap.h>
  38#include <linux/uio.h>
 
  39#include <linux/hugetlb.h>
 
  40#include <linux/fs_parser.h>
  41#include <linux/swapfile.h>
  42#include <linux/iversion.h>
  43#include "swap.h"
  44
  45static struct vfsmount *shm_mnt __ro_after_init;
 
 
  46
  47#ifdef CONFIG_SHMEM
  48/*
  49 * This virtual memory filesystem is heavily based on the ramfs. It
  50 * extends ramfs by the ability to use swap and honor resource limits
  51 * which makes it a completely usable filesystem.
  52 */
  53
  54#include <linux/xattr.h>
  55#include <linux/exportfs.h>
  56#include <linux/posix_acl.h>
  57#include <linux/posix_acl_xattr.h>
  58#include <linux/mman.h>
  59#include <linux/string.h>
  60#include <linux/slab.h>
  61#include <linux/backing-dev.h>
 
  62#include <linux/writeback.h>
 
  63#include <linux/pagevec.h>
  64#include <linux/percpu_counter.h>
  65#include <linux/falloc.h>
  66#include <linux/splice.h>
  67#include <linux/security.h>
  68#include <linux/swapops.h>
  69#include <linux/mempolicy.h>
  70#include <linux/namei.h>
  71#include <linux/ctype.h>
  72#include <linux/migrate.h>
  73#include <linux/highmem.h>
  74#include <linux/seq_file.h>
  75#include <linux/magic.h>
  76#include <linux/syscalls.h>
  77#include <linux/fcntl.h>
  78#include <uapi/linux/memfd.h>
 
  79#include <linux/rmap.h>
  80#include <linux/uuid.h>
  81#include <linux/quotaops.h>
  82#include <linux/rcupdate_wait.h>
  83
  84#include <linux/uaccess.h>
 
  85
  86#include "internal.h"
  87
  88#define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
  89#define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
  90
  91/* Pretend that each entry is of this size in directory's i_size */
  92#define BOGO_DIRENT_SIZE 20
  93
  94/* Pretend that one inode + its dentry occupy this much memory */
  95#define BOGO_INODE_SIZE 1024
  96
  97/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
  98#define SHORT_SYMLINK_LEN 128
  99
 100/*
 101 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
 102 * inode->i_private (with i_rwsem making sure that it has only one user at
 103 * a time): we would prefer not to enlarge the shmem inode just for that.
 104 */
 105struct shmem_falloc {
 106	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
 107	pgoff_t start;		/* start of range currently being fallocated */
 108	pgoff_t next;		/* the next page offset to be fallocated */
 109	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
 110	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
 111};
 112
 113struct shmem_options {
 114	unsigned long long blocks;
 115	unsigned long long inodes;
 116	struct mempolicy *mpol;
 117	kuid_t uid;
 118	kgid_t gid;
 119	umode_t mode;
 120	bool full_inums;
 121	int huge;
 122	int seen;
 123	bool noswap;
 124	unsigned short quota_types;
 125	struct shmem_quota_limits qlimits;
 126#define SHMEM_SEEN_BLOCKS 1
 127#define SHMEM_SEEN_INODES 2
 128#define SHMEM_SEEN_HUGE 4
 129#define SHMEM_SEEN_INUMS 8
 130#define SHMEM_SEEN_NOSWAP 16
 131#define SHMEM_SEEN_QUOTA 32
 132};
 133
 134#ifdef CONFIG_TMPFS
 135static unsigned long shmem_default_max_blocks(void)
 136{
 137	return totalram_pages() / 2;
 138}
 139
 140static unsigned long shmem_default_max_inodes(void)
 141{
 142	unsigned long nr_pages = totalram_pages();
 143
 144	return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
 145			ULONG_MAX / BOGO_INODE_SIZE);
 146}
 147#endif
 148
 149static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
 150			struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
 151			struct mm_struct *fault_mm, vm_fault_t *fault_type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 152
 153static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
 154{
 155	return sb->s_fs_info;
 156}
 157
 158/*
 159 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
 160 * for shared memory and for shared anonymous (/dev/zero) mappings
 161 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
 162 * consistent with the pre-accounting of private mappings ...
 163 */
 164static inline int shmem_acct_size(unsigned long flags, loff_t size)
 165{
 166	return (flags & VM_NORESERVE) ?
 167		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
 168}
 169
 170static inline void shmem_unacct_size(unsigned long flags, loff_t size)
 171{
 172	if (!(flags & VM_NORESERVE))
 173		vm_unacct_memory(VM_ACCT(size));
 174}
 175
 176static inline int shmem_reacct_size(unsigned long flags,
 177		loff_t oldsize, loff_t newsize)
 178{
 179	if (!(flags & VM_NORESERVE)) {
 180		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
 181			return security_vm_enough_memory_mm(current->mm,
 182					VM_ACCT(newsize) - VM_ACCT(oldsize));
 183		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
 184			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
 185	}
 186	return 0;
 187}
 188
 189/*
 190 * ... whereas tmpfs objects are accounted incrementally as
 191 * pages are allocated, in order to allow large sparse files.
 192 * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
 193 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
 194 */
 195static inline int shmem_acct_blocks(unsigned long flags, long pages)
 196{
 197	if (!(flags & VM_NORESERVE))
 198		return 0;
 199
 200	return security_vm_enough_memory_mm(current->mm,
 201			pages * VM_ACCT(PAGE_SIZE));
 202}
 203
 204static inline void shmem_unacct_blocks(unsigned long flags, long pages)
 205{
 206	if (flags & VM_NORESERVE)
 207		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
 208}
 209
 210static int shmem_inode_acct_blocks(struct inode *inode, long pages)
 211{
 212	struct shmem_inode_info *info = SHMEM_I(inode);
 213	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 214	int err = -ENOSPC;
 215
 216	if (shmem_acct_blocks(info->flags, pages))
 217		return err;
 218
 219	might_sleep();	/* when quotas */
 220	if (sbinfo->max_blocks) {
 221		if (!percpu_counter_limited_add(&sbinfo->used_blocks,
 222						sbinfo->max_blocks, pages))
 223			goto unacct;
 224
 225		err = dquot_alloc_block_nodirty(inode, pages);
 226		if (err) {
 227			percpu_counter_sub(&sbinfo->used_blocks, pages);
 228			goto unacct;
 229		}
 230	} else {
 231		err = dquot_alloc_block_nodirty(inode, pages);
 232		if (err)
 233			goto unacct;
 
 234	}
 235
 236	return 0;
 237
 238unacct:
 239	shmem_unacct_blocks(info->flags, pages);
 240	return err;
 241}
 242
 243static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
 244{
 245	struct shmem_inode_info *info = SHMEM_I(inode);
 246	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 247
 248	might_sleep();	/* when quotas */
 249	dquot_free_block_nodirty(inode, pages);
 250
 251	if (sbinfo->max_blocks)
 252		percpu_counter_sub(&sbinfo->used_blocks, pages);
 253	shmem_unacct_blocks(info->flags, pages);
 254}
 255
 256static const struct super_operations shmem_ops;
 257const struct address_space_operations shmem_aops;
 258static const struct file_operations shmem_file_operations;
 259static const struct inode_operations shmem_inode_operations;
 260static const struct inode_operations shmem_dir_inode_operations;
 261static const struct inode_operations shmem_special_inode_operations;
 262static const struct vm_operations_struct shmem_vm_ops;
 263static const struct vm_operations_struct shmem_anon_vm_ops;
 264static struct file_system_type shmem_fs_type;
 265
 266bool vma_is_anon_shmem(struct vm_area_struct *vma)
 267{
 268	return vma->vm_ops == &shmem_anon_vm_ops;
 269}
 270
 271bool vma_is_shmem(struct vm_area_struct *vma)
 272{
 273	return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
 274}
 275
 276static LIST_HEAD(shmem_swaplist);
 277static DEFINE_MUTEX(shmem_swaplist_mutex);
 278
 279#ifdef CONFIG_TMPFS_QUOTA
 280
 281static int shmem_enable_quotas(struct super_block *sb,
 282			       unsigned short quota_types)
 283{
 284	int type, err = 0;
 285
 286	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
 287	for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
 288		if (!(quota_types & (1 << type)))
 289			continue;
 290		err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
 291					  DQUOT_USAGE_ENABLED |
 292					  DQUOT_LIMITS_ENABLED);
 293		if (err)
 294			goto out_err;
 295	}
 296	return 0;
 297
 298out_err:
 299	pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
 300		type, err);
 301	for (type--; type >= 0; type--)
 302		dquot_quota_off(sb, type);
 303	return err;
 304}
 305
 306static void shmem_disable_quotas(struct super_block *sb)
 307{
 308	int type;
 309
 310	for (type = 0; type < SHMEM_MAXQUOTAS; type++)
 311		dquot_quota_off(sb, type);
 312}
 313
 314static struct dquot **shmem_get_dquots(struct inode *inode)
 315{
 316	return SHMEM_I(inode)->i_dquot;
 317}
 318#endif /* CONFIG_TMPFS_QUOTA */
 319
 320/*
 321 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
 322 * produces a novel ino for the newly allocated inode.
 323 *
 324 * It may also be called when making a hard link to permit the space needed by
 325 * each dentry. However, in that case, no new inode number is needed since that
 326 * internally draws from another pool of inode numbers (currently global
 327 * get_next_ino()). This case is indicated by passing NULL as inop.
 328 */
 329#define SHMEM_INO_BATCH 1024
 330static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
 331{
 332	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 333	ino_t ino;
 334
 335	if (!(sb->s_flags & SB_KERNMOUNT)) {
 336		raw_spin_lock(&sbinfo->stat_lock);
 337		if (sbinfo->max_inodes) {
 338			if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
 339				raw_spin_unlock(&sbinfo->stat_lock);
 340				return -ENOSPC;
 341			}
 342			sbinfo->free_ispace -= BOGO_INODE_SIZE;
 343		}
 344		if (inop) {
 345			ino = sbinfo->next_ino++;
 346			if (unlikely(is_zero_ino(ino)))
 347				ino = sbinfo->next_ino++;
 348			if (unlikely(!sbinfo->full_inums &&
 349				     ino > UINT_MAX)) {
 350				/*
 351				 * Emulate get_next_ino uint wraparound for
 352				 * compatibility
 353				 */
 354				if (IS_ENABLED(CONFIG_64BIT))
 355					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
 356						__func__, MINOR(sb->s_dev));
 357				sbinfo->next_ino = 1;
 358				ino = sbinfo->next_ino++;
 359			}
 360			*inop = ino;
 361		}
 362		raw_spin_unlock(&sbinfo->stat_lock);
 363	} else if (inop) {
 364		/*
 365		 * __shmem_file_setup, one of our callers, is lock-free: it
 366		 * doesn't hold stat_lock in shmem_reserve_inode since
 367		 * max_inodes is always 0, and is called from potentially
 368		 * unknown contexts. As such, use a per-cpu batched allocator
 369		 * which doesn't require the per-sb stat_lock unless we are at
 370		 * the batch boundary.
 371		 *
 372		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
 373		 * shmem mounts are not exposed to userspace, so we don't need
 374		 * to worry about things like glibc compatibility.
 375		 */
 376		ino_t *next_ino;
 377
 378		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
 379		ino = *next_ino;
 380		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
 381			raw_spin_lock(&sbinfo->stat_lock);
 382			ino = sbinfo->next_ino;
 383			sbinfo->next_ino += SHMEM_INO_BATCH;
 384			raw_spin_unlock(&sbinfo->stat_lock);
 385			if (unlikely(is_zero_ino(ino)))
 386				ino++;
 387		}
 388		*inop = ino;
 389		*next_ino = ++ino;
 390		put_cpu();
 391	}
 392
 393	return 0;
 394}
 395
 396static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
 397{
 398	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 399	if (sbinfo->max_inodes) {
 400		raw_spin_lock(&sbinfo->stat_lock);
 401		sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
 402		raw_spin_unlock(&sbinfo->stat_lock);
 403	}
 404}
 405
 406/**
 407 * shmem_recalc_inode - recalculate the block usage of an inode
 408 * @inode: inode to recalc
 409 * @alloced: the change in number of pages allocated to inode
 410 * @swapped: the change in number of pages swapped from inode
 411 *
 412 * We have to calculate the free blocks since the mm can drop
 413 * undirtied hole pages behind our back.
 414 *
 415 * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
 416 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
 
 
 417 */
 418static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
 419{
 420	struct shmem_inode_info *info = SHMEM_I(inode);
 421	long freed;
 422
 423	spin_lock(&info->lock);
 424	info->alloced += alloced;
 425	info->swapped += swapped;
 426	freed = info->alloced - info->swapped -
 427		READ_ONCE(inode->i_mapping->nrpages);
 428	/*
 429	 * Special case: whereas normally shmem_recalc_inode() is called
 430	 * after i_mapping->nrpages has already been adjusted (up or down),
 431	 * shmem_writepage() has to raise swapped before nrpages is lowered -
 432	 * to stop a racing shmem_recalc_inode() from thinking that a page has
 433	 * been freed.  Compensate here, to avoid the need for a followup call.
 434	 */
 435	if (swapped > 0)
 436		freed += swapped;
 437	if (freed > 0)
 438		info->alloced -= freed;
 439	spin_unlock(&info->lock);
 440
 441	/* The quota case may block */
 442	if (freed > 0)
 443		shmem_inode_unacct_blocks(inode, freed);
 
 444}
 445
 446bool shmem_charge(struct inode *inode, long pages)
 447{
 448	struct address_space *mapping = inode->i_mapping;
 
 449
 450	if (shmem_inode_acct_blocks(inode, pages))
 451		return false;
 452
 453	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
 454	xa_lock_irq(&mapping->i_pages);
 455	mapping->nrpages += pages;
 456	xa_unlock_irq(&mapping->i_pages);
 
 
 
 
 457
 458	shmem_recalc_inode(inode, pages, 0);
 459	return true;
 460}
 461
 462void shmem_uncharge(struct inode *inode, long pages)
 463{
 464	/* pages argument is currently unused: keep it to help debugging */
 465	/* nrpages adjustment done by __filemap_remove_folio() or caller */
 
 
 
 
 
 
 
 
 466
 467	shmem_recalc_inode(inode, 0, 0);
 468}
 469
 470/*
 471 * Replace item expected in xarray by a new item, while holding xa_lock.
 472 */
 473static int shmem_replace_entry(struct address_space *mapping,
 474			pgoff_t index, void *expected, void *replacement)
 475{
 476	XA_STATE(xas, &mapping->i_pages, index);
 477	void *item;
 478
 479	VM_BUG_ON(!expected);
 480	VM_BUG_ON(!replacement);
 481	item = xas_load(&xas);
 482	if (item != expected)
 483		return -ENOENT;
 484	xas_store(&xas, replacement);
 485	return 0;
 486}
 487
 488/*
 489 * Sometimes, before we decide whether to proceed or to fail, we must check
 490 * that an entry was not already brought back from swap by a racing thread.
 491 *
 492 * Checking page is not enough: by the time a SwapCache page is locked, it
 493 * might be reused, and again be SwapCache, using the same swap as before.
 494 */
 495static bool shmem_confirm_swap(struct address_space *mapping,
 496			       pgoff_t index, swp_entry_t swap)
 497{
 498	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
 499}
 500
 501/*
 502 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
 503 *
 504 * SHMEM_HUGE_NEVER:
 505 *	disables huge pages for the mount;
 506 * SHMEM_HUGE_ALWAYS:
 507 *	enables huge pages for the mount;
 508 * SHMEM_HUGE_WITHIN_SIZE:
 509 *	only allocate huge pages if the page will be fully within i_size,
 510 *	also respect fadvise()/madvise() hints;
 511 * SHMEM_HUGE_ADVISE:
 512 *	only allocate huge pages if requested with fadvise()/madvise();
 513 */
 514
 515#define SHMEM_HUGE_NEVER	0
 516#define SHMEM_HUGE_ALWAYS	1
 517#define SHMEM_HUGE_WITHIN_SIZE	2
 518#define SHMEM_HUGE_ADVISE	3
 519
 520/*
 521 * Special values.
 522 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
 523 *
 524 * SHMEM_HUGE_DENY:
 525 *	disables huge on shm_mnt and all mounts, for emergency use;
 526 * SHMEM_HUGE_FORCE:
 527 *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
 528 *
 529 */
 530#define SHMEM_HUGE_DENY		(-1)
 531#define SHMEM_HUGE_FORCE	(-2)
 532
 533#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 534/* ifdef here to avoid bloating shmem.o when not necessary */
 535
 536static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
 537
 538bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
 539		   struct mm_struct *mm, unsigned long vm_flags)
 540{
 541	loff_t i_size;
 542
 543	if (!S_ISREG(inode->i_mode))
 544		return false;
 545	if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
 546		return false;
 547	if (shmem_huge == SHMEM_HUGE_DENY)
 548		return false;
 549	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
 550		return true;
 551
 552	switch (SHMEM_SB(inode->i_sb)->huge) {
 553	case SHMEM_HUGE_ALWAYS:
 554		return true;
 555	case SHMEM_HUGE_WITHIN_SIZE:
 556		index = round_up(index + 1, HPAGE_PMD_NR);
 557		i_size = round_up(i_size_read(inode), PAGE_SIZE);
 558		if (i_size >> PAGE_SHIFT >= index)
 559			return true;
 560		fallthrough;
 561	case SHMEM_HUGE_ADVISE:
 562		if (mm && (vm_flags & VM_HUGEPAGE))
 563			return true;
 564		fallthrough;
 565	default:
 566		return false;
 567	}
 568}
 569
 570#if defined(CONFIG_SYSFS)
 571static int shmem_parse_huge(const char *str)
 572{
 573	if (!strcmp(str, "never"))
 574		return SHMEM_HUGE_NEVER;
 575	if (!strcmp(str, "always"))
 576		return SHMEM_HUGE_ALWAYS;
 577	if (!strcmp(str, "within_size"))
 578		return SHMEM_HUGE_WITHIN_SIZE;
 579	if (!strcmp(str, "advise"))
 580		return SHMEM_HUGE_ADVISE;
 581	if (!strcmp(str, "deny"))
 582		return SHMEM_HUGE_DENY;
 583	if (!strcmp(str, "force"))
 584		return SHMEM_HUGE_FORCE;
 585	return -EINVAL;
 586}
 587#endif
 588
 589#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
 590static const char *shmem_format_huge(int huge)
 591{
 592	switch (huge) {
 593	case SHMEM_HUGE_NEVER:
 594		return "never";
 595	case SHMEM_HUGE_ALWAYS:
 596		return "always";
 597	case SHMEM_HUGE_WITHIN_SIZE:
 598		return "within_size";
 599	case SHMEM_HUGE_ADVISE:
 600		return "advise";
 601	case SHMEM_HUGE_DENY:
 602		return "deny";
 603	case SHMEM_HUGE_FORCE:
 604		return "force";
 605	default:
 606		VM_BUG_ON(1);
 607		return "bad_val";
 608	}
 609}
 610#endif
 611
 612static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 613		struct shrink_control *sc, unsigned long nr_to_split)
 614{
 615	LIST_HEAD(list), *pos, *next;
 616	LIST_HEAD(to_remove);
 617	struct inode *inode;
 618	struct shmem_inode_info *info;
 619	struct folio *folio;
 620	unsigned long batch = sc ? sc->nr_to_scan : 128;
 621	int split = 0;
 622
 623	if (list_empty(&sbinfo->shrinklist))
 624		return SHRINK_STOP;
 625
 626	spin_lock(&sbinfo->shrinklist_lock);
 627	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
 628		info = list_entry(pos, struct shmem_inode_info, shrinklist);
 629
 630		/* pin the inode */
 631		inode = igrab(&info->vfs_inode);
 632
 633		/* inode is about to be evicted */
 634		if (!inode) {
 635			list_del_init(&info->shrinklist);
 
 636			goto next;
 637		}
 638
 639		/* Check if there's anything to gain */
 640		if (round_up(inode->i_size, PAGE_SIZE) ==
 641				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
 642			list_move(&info->shrinklist, &to_remove);
 
 643			goto next;
 644		}
 645
 646		list_move(&info->shrinklist, &list);
 647next:
 648		sbinfo->shrinklist_len--;
 649		if (!--batch)
 650			break;
 651	}
 652	spin_unlock(&sbinfo->shrinklist_lock);
 653
 654	list_for_each_safe(pos, next, &to_remove) {
 655		info = list_entry(pos, struct shmem_inode_info, shrinklist);
 656		inode = &info->vfs_inode;
 657		list_del_init(&info->shrinklist);
 658		iput(inode);
 659	}
 660
 661	list_for_each_safe(pos, next, &list) {
 662		int ret;
 663		pgoff_t index;
 664
 665		info = list_entry(pos, struct shmem_inode_info, shrinklist);
 666		inode = &info->vfs_inode;
 667
 668		if (nr_to_split && split >= nr_to_split)
 669			goto move_back;
 670
 671		index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
 672		folio = filemap_get_folio(inode->i_mapping, index);
 673		if (IS_ERR(folio))
 674			goto drop;
 675
 676		/* No huge page at the end of the file: nothing to split */
 677		if (!folio_test_large(folio)) {
 678			folio_put(folio);
 679			goto drop;
 680		}
 681
 682		/*
 683		 * Move the inode on the list back to shrinklist if we failed
 684		 * to lock the page at this time.
 685		 *
 686		 * Waiting for the lock may lead to deadlock in the
 687		 * reclaim path.
 688		 */
 689		if (!folio_trylock(folio)) {
 690			folio_put(folio);
 691			goto move_back;
 692		}
 693
 694		ret = split_folio(folio);
 695		folio_unlock(folio);
 696		folio_put(folio);
 697
 698		/* If split failed move the inode on the list back to shrinklist */
 699		if (ret)
 700			goto move_back;
 701
 702		split++;
 703drop:
 704		list_del_init(&info->shrinklist);
 705		goto put;
 706move_back:
 707		/*
 708		 * Make sure the inode is either on the global list or deleted
 709		 * from any local list before iput() since it could be deleted
 710		 * in another thread once we put the inode (then the local list
 711		 * is corrupted).
 712		 */
 713		spin_lock(&sbinfo->shrinklist_lock);
 714		list_move(&info->shrinklist, &sbinfo->shrinklist);
 715		sbinfo->shrinklist_len++;
 716		spin_unlock(&sbinfo->shrinklist_lock);
 717put:
 718		iput(inode);
 719	}
 720
 
 
 
 
 
 721	return split;
 722}
 723
 724static long shmem_unused_huge_scan(struct super_block *sb,
 725		struct shrink_control *sc)
 726{
 727	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 728
 729	if (!READ_ONCE(sbinfo->shrinklist_len))
 730		return SHRINK_STOP;
 731
 732	return shmem_unused_huge_shrink(sbinfo, sc, 0);
 733}
 734
 735static long shmem_unused_huge_count(struct super_block *sb,
 736		struct shrink_control *sc)
 737{
 738	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 739	return READ_ONCE(sbinfo->shrinklist_len);
 740}
 741#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
 742
 743#define shmem_huge SHMEM_HUGE_DENY
 744
 745bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
 746		   struct mm_struct *mm, unsigned long vm_flags)
 747{
 748	return false;
 749}
 
 750
 751static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 752		struct shrink_control *sc, unsigned long nr_to_split)
 753{
 754	return 0;
 
 
 
 
 755}
 756#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 757
 758/*
 759 * Somewhat like filemap_add_folio, but error if expected item has gone.
 760 */
 761static int shmem_add_to_page_cache(struct folio *folio,
 762				   struct address_space *mapping,
 763				   pgoff_t index, void *expected, gfp_t gfp)
 764{
 765	XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
 766	long nr = folio_nr_pages(folio);
 767
 768	VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
 769	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 770	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
 771	VM_BUG_ON(expected && folio_test_large(folio));
 772
 773	folio_ref_add(folio, nr);
 774	folio->mapping = mapping;
 775	folio->index = index;
 776
 777	gfp &= GFP_RECLAIM_MASK;
 778	folio_throttle_swaprate(folio, gfp);
 779
 780	do {
 
 781		xas_lock_irq(&xas);
 782		if (expected != xas_find_conflict(&xas)) {
 
 783			xas_set_err(&xas, -EEXIST);
 
 
 784			goto unlock;
 
 
 
 
 
 785		}
 786		if (expected && xas_find_conflict(&xas)) {
 787			xas_set_err(&xas, -EEXIST);
 788			goto unlock;
 789		}
 790		xas_store(&xas, folio);
 791		if (xas_error(&xas))
 792			goto unlock;
 793		if (folio_test_pmd_mappable(folio))
 794			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
 795		__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
 796		__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
 797		mapping->nrpages += nr;
 
 
 798unlock:
 799		xas_unlock_irq(&xas);
 800	} while (xas_nomem(&xas, gfp));
 801
 802	if (xas_error(&xas)) {
 803		folio->mapping = NULL;
 804		folio_ref_sub(folio, nr);
 805		return xas_error(&xas);
 806	}
 807
 808	return 0;
 809}
 810
 811/*
 812 * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
 813 */
 814static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
 815{
 816	struct address_space *mapping = folio->mapping;
 817	long nr = folio_nr_pages(folio);
 818	int error;
 819
 
 
 820	xa_lock_irq(&mapping->i_pages);
 821	error = shmem_replace_entry(mapping, folio->index, folio, radswap);
 822	folio->mapping = NULL;
 823	mapping->nrpages -= nr;
 824	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
 825	__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
 826	xa_unlock_irq(&mapping->i_pages);
 827	folio_put(folio);
 828	BUG_ON(error);
 829}
 830
 831/*
 832 * Remove swap entry from page cache, free the swap and its page cache.
 833 */
 834static int shmem_free_swap(struct address_space *mapping,
 835			   pgoff_t index, void *radswap)
 836{
 837	void *old;
 838
 839	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
 840	if (old != radswap)
 841		return -ENOENT;
 842	free_swap_and_cache(radix_to_swp_entry(radswap));
 843	return 0;
 844}
 845
 846/*
 847 * Determine (in bytes) how many of the shmem object's pages mapped by the
 848 * given offsets are swapped out.
 849 *
 850 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
 851 * as long as the inode doesn't go away and racy results are not a problem.
 852 */
 853unsigned long shmem_partial_swap_usage(struct address_space *mapping,
 854						pgoff_t start, pgoff_t end)
 855{
 856	XA_STATE(xas, &mapping->i_pages, start);
 857	struct page *page;
 858	unsigned long swapped = 0;
 859	unsigned long max = end - 1;
 860
 861	rcu_read_lock();
 862	xas_for_each(&xas, page, max) {
 863		if (xas_retry(&xas, page))
 864			continue;
 865		if (xa_is_value(page))
 866			swapped++;
 867		if (xas.xa_index == max)
 868			break;
 869		if (need_resched()) {
 870			xas_pause(&xas);
 871			cond_resched_rcu();
 872		}
 873	}
 
 874	rcu_read_unlock();
 875
 876	return swapped << PAGE_SHIFT;
 877}
 878
 879/*
 880 * Determine (in bytes) how many of the shmem object's pages mapped by the
 881 * given vma is swapped out.
 882 *
 883 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
 884 * as long as the inode doesn't go away and racy results are not a problem.
 885 */
 886unsigned long shmem_swap_usage(struct vm_area_struct *vma)
 887{
 888	struct inode *inode = file_inode(vma->vm_file);
 889	struct shmem_inode_info *info = SHMEM_I(inode);
 890	struct address_space *mapping = inode->i_mapping;
 891	unsigned long swapped;
 892
 893	/* Be careful as we don't hold info->lock */
 894	swapped = READ_ONCE(info->swapped);
 895
 896	/*
 897	 * The easier cases are when the shmem object has nothing in swap, or
 898	 * the vma maps it whole. Then we can simply use the stats that we
 899	 * already track.
 900	 */
 901	if (!swapped)
 902		return 0;
 903
 904	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
 905		return swapped << PAGE_SHIFT;
 906
 907	/* Here comes the more involved part */
 908	return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
 909					vma->vm_pgoff + vma_pages(vma));
 
 910}
 911
 912/*
 913 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
 914 */
 915void shmem_unlock_mapping(struct address_space *mapping)
 916{
 917	struct folio_batch fbatch;
 
 918	pgoff_t index = 0;
 919
 920	folio_batch_init(&fbatch);
 921	/*
 922	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
 923	 */
 924	while (!mapping_unevictable(mapping) &&
 925	       filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
 926		check_move_unevictable_folios(&fbatch);
 927		folio_batch_release(&fbatch);
 
 
 
 
 
 
 
 
 
 928		cond_resched();
 929	}
 930}
 931
 932static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
 933{
 934	struct folio *folio;
 935
 936	/*
 937	 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
 938	 * beyond i_size, and reports fallocated folios as holes.
 939	 */
 940	folio = filemap_get_entry(inode->i_mapping, index);
 941	if (!folio)
 942		return folio;
 943	if (!xa_is_value(folio)) {
 944		folio_lock(folio);
 945		if (folio->mapping == inode->i_mapping)
 946			return folio;
 947		/* The folio has been swapped out */
 948		folio_unlock(folio);
 949		folio_put(folio);
 950	}
 951	/*
 952	 * But read a folio back from swap if any of it is within i_size
 953	 * (although in some cases this is just a waste of time).
 954	 */
 955	folio = NULL;
 956	shmem_get_folio(inode, index, &folio, SGP_READ);
 957	return folio;
 958}
 959
 960/*
 961 * Remove range of pages and swap entries from page cache, and free them.
 962 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
 963 */
 964static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 965								 bool unfalloc)
 966{
 967	struct address_space *mapping = inode->i_mapping;
 968	struct shmem_inode_info *info = SHMEM_I(inode);
 969	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
 970	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
 971	struct folio_batch fbatch;
 
 
 972	pgoff_t indices[PAGEVEC_SIZE];
 973	struct folio *folio;
 974	bool same_folio;
 975	long nr_swaps_freed = 0;
 976	pgoff_t index;
 977	int i;
 978
 979	if (lend == -1)
 980		end = -1;	/* unsigned, so actually very big */
 981
 982	if (info->fallocend > start && info->fallocend <= end && !unfalloc)
 983		info->fallocend = start;
 
 
 
 
 
 
 
 
 984
 985	folio_batch_init(&fbatch);
 986	index = start;
 987	while (index < end && find_lock_entries(mapping, &index, end - 1,
 988			&fbatch, indices)) {
 989		for (i = 0; i < folio_batch_count(&fbatch); i++) {
 990			folio = fbatch.folios[i];
 991
 992			if (xa_is_value(folio)) {
 993				if (unfalloc)
 994					continue;
 995				nr_swaps_freed += !shmem_free_swap(mapping,
 996							indices[i], folio);
 997				continue;
 998			}
 999
1000			if (!unfalloc || !folio_test_uptodate(folio))
1001				truncate_inode_folio(mapping, folio);
1002			folio_unlock(folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1003		}
1004		folio_batch_remove_exceptionals(&fbatch);
1005		folio_batch_release(&fbatch);
1006		cond_resched();
 
1007	}
1008
1009	/*
1010	 * When undoing a failed fallocate, we want none of the partial folio
1011	 * zeroing and splitting below, but shall want to truncate the whole
1012	 * folio when !uptodate indicates that it was added by this fallocate,
1013	 * even when [lstart, lend] covers only a part of the folio.
1014	 */
1015	if (unfalloc)
1016		goto whole_folios;
1017
1018	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1019	folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1020	if (folio) {
1021		same_folio = lend < folio_pos(folio) + folio_size(folio);
1022		folio_mark_dirty(folio);
1023		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1024			start = folio_next_index(folio);
1025			if (same_folio)
1026				end = folio->index;
1027		}
1028		folio_unlock(folio);
1029		folio_put(folio);
1030		folio = NULL;
1031	}
1032
1033	if (!same_folio)
1034		folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1035	if (folio) {
1036		folio_mark_dirty(folio);
1037		if (!truncate_inode_partial_folio(folio, lstart, lend))
1038			end = folio->index;
1039		folio_unlock(folio);
1040		folio_put(folio);
1041	}
1042
1043whole_folios:
1044
1045	index = start;
1046	while (index < end) {
1047		cond_resched();
1048
1049		if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1050				indices)) {
 
 
1051			/* If all gone or hole-punch or unfalloc, we're done */
1052			if (index == start || end != -1)
1053				break;
1054			/* But if truncating, restart to make sure all gone */
1055			index = start;
1056			continue;
1057		}
1058		for (i = 0; i < folio_batch_count(&fbatch); i++) {
1059			folio = fbatch.folios[i];
 
 
 
 
1060
1061			if (xa_is_value(folio)) {
1062				if (unfalloc)
1063					continue;
1064				if (shmem_free_swap(mapping, indices[i], folio)) {
1065					/* Swap was replaced by page: retry */
1066					index = indices[i];
1067					break;
1068				}
1069				nr_swaps_freed++;
1070				continue;
1071			}
1072
1073			folio_lock(folio);
1074
1075			if (!unfalloc || !folio_test_uptodate(folio)) {
1076				if (folio_mapping(folio) != mapping) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077					/* Page was replaced by swap: retry */
1078					folio_unlock(folio);
1079					index = indices[i];
1080					break;
1081				}
1082				VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1083						folio);
1084
1085				if (!folio_test_large(folio)) {
1086					truncate_inode_folio(mapping, folio);
1087				} else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1088					/*
1089					 * If we split a page, reset the loop so
1090					 * that we pick up the new sub pages.
1091					 * Otherwise the THP was entirely
1092					 * dropped or the target range was
1093					 * zeroed, so just continue the loop as
1094					 * is.
1095					 */
1096					if (!folio_test_large(folio)) {
1097						folio_unlock(folio);
1098						index = start;
1099						break;
1100					}
1101				}
1102			}
1103			folio_unlock(folio);
1104		}
1105		folio_batch_remove_exceptionals(&fbatch);
1106		folio_batch_release(&fbatch);
 
1107	}
1108
1109	shmem_recalc_inode(inode, 0, -nr_swaps_freed);
 
 
 
1110}
1111
1112void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1113{
1114	shmem_undo_range(inode, lstart, lend, false);
1115	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1116	inode_inc_iversion(inode);
1117}
1118EXPORT_SYMBOL_GPL(shmem_truncate_range);
1119
1120static int shmem_getattr(struct mnt_idmap *idmap,
1121			 const struct path *path, struct kstat *stat,
1122			 u32 request_mask, unsigned int query_flags)
1123{
1124	struct inode *inode = path->dentry->d_inode;
1125	struct shmem_inode_info *info = SHMEM_I(inode);
 
1126
1127	if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1128		shmem_recalc_inode(inode, 0, 0);
1129
1130	if (info->fsflags & FS_APPEND_FL)
1131		stat->attributes |= STATX_ATTR_APPEND;
1132	if (info->fsflags & FS_IMMUTABLE_FL)
1133		stat->attributes |= STATX_ATTR_IMMUTABLE;
1134	if (info->fsflags & FS_NODUMP_FL)
1135		stat->attributes |= STATX_ATTR_NODUMP;
1136	stat->attributes_mask |= (STATX_ATTR_APPEND |
1137			STATX_ATTR_IMMUTABLE |
1138			STATX_ATTR_NODUMP);
1139	generic_fillattr(idmap, request_mask, inode, stat);
1140
1141	if (shmem_is_huge(inode, 0, false, NULL, 0))
1142		stat->blksize = HPAGE_PMD_SIZE;
1143
1144	if (request_mask & STATX_BTIME) {
1145		stat->result_mask |= STATX_BTIME;
1146		stat->btime.tv_sec = info->i_crtime.tv_sec;
1147		stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1148	}
1149
1150	return 0;
1151}
1152
1153static int shmem_setattr(struct mnt_idmap *idmap,
1154			 struct dentry *dentry, struct iattr *attr)
1155{
1156	struct inode *inode = d_inode(dentry);
1157	struct shmem_inode_info *info = SHMEM_I(inode);
 
1158	int error;
1159	bool update_mtime = false;
1160	bool update_ctime = true;
1161
1162	error = setattr_prepare(idmap, dentry, attr);
1163	if (error)
1164		return error;
1165
1166	if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1167		if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1168			return -EPERM;
1169		}
1170	}
1171
1172	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1173		loff_t oldsize = inode->i_size;
1174		loff_t newsize = attr->ia_size;
1175
1176		/* protected by i_rwsem */
1177		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1178		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1179			return -EPERM;
1180
1181		if (newsize != oldsize) {
1182			error = shmem_reacct_size(SHMEM_I(inode)->flags,
1183					oldsize, newsize);
1184			if (error)
1185				return error;
1186			i_size_write(inode, newsize);
1187			update_mtime = true;
1188		} else {
1189			update_ctime = false;
1190		}
1191		if (newsize <= oldsize) {
1192			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1193			if (oldsize > holebegin)
1194				unmap_mapping_range(inode->i_mapping,
1195							holebegin, 0, 1);
1196			if (info->alloced)
1197				shmem_truncate_range(inode,
1198							newsize, (loff_t)-1);
1199			/* unmap again to remove racily COWed private pages */
1200			if (oldsize > holebegin)
1201				unmap_mapping_range(inode->i_mapping,
1202							holebegin, 0, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1203		}
1204	}
1205
1206	if (is_quota_modification(idmap, inode, attr)) {
1207		error = dquot_initialize(inode);
1208		if (error)
1209			return error;
1210	}
1211
1212	/* Transfer quota accounting */
1213	if (i_uid_needs_update(idmap, attr, inode) ||
1214	    i_gid_needs_update(idmap, attr, inode)) {
1215		error = dquot_transfer(idmap, inode, attr);
1216		if (error)
1217			return error;
1218	}
1219
1220	setattr_copy(idmap, inode, attr);
1221	if (attr->ia_valid & ATTR_MODE)
1222		error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1223	if (!error && update_ctime) {
1224		inode_set_ctime_current(inode);
1225		if (update_mtime)
1226			inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1227		inode_inc_iversion(inode);
1228	}
1229	return error;
1230}
1231
1232static void shmem_evict_inode(struct inode *inode)
1233{
1234	struct shmem_inode_info *info = SHMEM_I(inode);
1235	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1236	size_t freed = 0;
1237
1238	if (shmem_mapping(inode->i_mapping)) {
1239		shmem_unacct_size(info->flags, inode->i_size);
1240		inode->i_size = 0;
1241		mapping_set_exiting(inode->i_mapping);
1242		shmem_truncate_range(inode, 0, (loff_t)-1);
1243		if (!list_empty(&info->shrinklist)) {
1244			spin_lock(&sbinfo->shrinklist_lock);
1245			if (!list_empty(&info->shrinklist)) {
1246				list_del_init(&info->shrinklist);
1247				sbinfo->shrinklist_len--;
1248			}
1249			spin_unlock(&sbinfo->shrinklist_lock);
1250		}
1251		while (!list_empty(&info->swaplist)) {
1252			/* Wait while shmem_unuse() is scanning this inode... */
1253			wait_var_event(&info->stop_eviction,
1254				       !atomic_read(&info->stop_eviction));
1255			mutex_lock(&shmem_swaplist_mutex);
1256			/* ...but beware of the race if we peeked too early */
1257			if (!atomic_read(&info->stop_eviction))
1258				list_del_init(&info->swaplist);
1259			mutex_unlock(&shmem_swaplist_mutex);
1260		}
1261	}
1262
1263	simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1264	shmem_free_inode(inode->i_sb, freed);
1265	WARN_ON(inode->i_blocks);
 
1266	clear_inode(inode);
1267#ifdef CONFIG_TMPFS_QUOTA
1268	dquot_free_inode(inode);
1269	dquot_drop(inode);
1270#endif
1271}
1272
 
 
1273static int shmem_find_swap_entries(struct address_space *mapping,
1274				   pgoff_t start, struct folio_batch *fbatch,
1275				   pgoff_t *indices, unsigned int type)
 
1276{
1277	XA_STATE(xas, &mapping->i_pages, start);
1278	struct folio *folio;
1279	swp_entry_t entry;
 
 
 
 
1280
1281	rcu_read_lock();
1282	xas_for_each(&xas, folio, ULONG_MAX) {
1283		if (xas_retry(&xas, folio))
1284			continue;
1285
1286		if (!xa_is_value(folio))
1287			continue;
1288
1289		entry = radix_to_swp_entry(folio);
1290		/*
1291		 * swapin error entries can be found in the mapping. But they're
1292		 * deliberately ignored here as we've done everything we can do.
1293		 */
1294		if (swp_type(entry) != type)
1295			continue;
 
 
 
1296
1297		indices[folio_batch_count(fbatch)] = xas.xa_index;
1298		if (!folio_batch_add(fbatch, folio))
1299			break;
1300
1301		if (need_resched()) {
1302			xas_pause(&xas);
1303			cond_resched_rcu();
1304		}
 
 
1305	}
1306	rcu_read_unlock();
1307
1308	return xas.xa_index;
1309}
1310
1311/*
1312 * Move the swapped pages for an inode to page cache. Returns the count
1313 * of pages swapped in, or the error in case of failure.
1314 */
1315static int shmem_unuse_swap_entries(struct inode *inode,
1316		struct folio_batch *fbatch, pgoff_t *indices)
1317{
1318	int i = 0;
1319	int ret = 0;
1320	int error = 0;
1321	struct address_space *mapping = inode->i_mapping;
1322
1323	for (i = 0; i < folio_batch_count(fbatch); i++) {
1324		struct folio *folio = fbatch->folios[i];
1325
1326		if (!xa_is_value(folio))
1327			continue;
1328		error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1329					mapping_gfp_mask(mapping), NULL, NULL);
 
 
1330		if (error == 0) {
1331			folio_unlock(folio);
1332			folio_put(folio);
1333			ret++;
1334		}
1335		if (error == -ENOMEM)
1336			break;
1337		error = 0;
1338	}
1339	return error ? error : ret;
1340}
1341
1342/*
1343 * If swap found in inode, free it and move page from swapcache to filecache.
1344 */
1345static int shmem_unuse_inode(struct inode *inode, unsigned int type)
 
1346{
1347	struct address_space *mapping = inode->i_mapping;
1348	pgoff_t start = 0;
1349	struct folio_batch fbatch;
1350	pgoff_t indices[PAGEVEC_SIZE];
 
1351	int ret = 0;
1352
 
1353	do {
1354		folio_batch_init(&fbatch);
1355		shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1356		if (folio_batch_count(&fbatch) == 0) {
 
 
 
 
 
 
1357			ret = 0;
1358			break;
1359		}
1360
1361		ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1362		if (ret < 0)
1363			break;
1364
1365		start = indices[folio_batch_count(&fbatch) - 1];
 
 
 
 
 
 
 
 
1366	} while (true);
1367
1368	return ret;
1369}
1370
1371/*
1372 * Read all the shared memory data that resides in the swap
1373 * device 'type' back into memory, so the swap device can be
1374 * unused.
1375 */
1376int shmem_unuse(unsigned int type)
 
1377{
1378	struct shmem_inode_info *info, *next;
1379	int error = 0;
1380
1381	if (list_empty(&shmem_swaplist))
1382		return 0;
1383
1384	mutex_lock(&shmem_swaplist_mutex);
1385	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1386		if (!info->swapped) {
1387			list_del_init(&info->swaplist);
1388			continue;
1389		}
1390		/*
1391		 * Drop the swaplist mutex while searching the inode for swap;
1392		 * but before doing so, make sure shmem_evict_inode() will not
1393		 * remove placeholder inode from swaplist, nor let it be freed
1394		 * (igrab() would protect from unlink, but not from unmount).
1395		 */
1396		atomic_inc(&info->stop_eviction);
1397		mutex_unlock(&shmem_swaplist_mutex);
1398
1399		error = shmem_unuse_inode(&info->vfs_inode, type);
 
1400		cond_resched();
1401
1402		mutex_lock(&shmem_swaplist_mutex);
1403		next = list_next_entry(info, swaplist);
1404		if (!info->swapped)
1405			list_del_init(&info->swaplist);
1406		if (atomic_dec_and_test(&info->stop_eviction))
1407			wake_up_var(&info->stop_eviction);
1408		if (error)
1409			break;
1410	}
1411	mutex_unlock(&shmem_swaplist_mutex);
1412
1413	return error;
1414}
1415
1416/*
1417 * Move the page from the page cache to the swap cache.
1418 */
1419static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1420{
1421	struct folio *folio = page_folio(page);
1422	struct address_space *mapping = folio->mapping;
1423	struct inode *inode = mapping->host;
1424	struct shmem_inode_info *info = SHMEM_I(inode);
1425	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1426	swp_entry_t swap;
1427	pgoff_t index;
1428
 
 
 
 
 
 
 
 
 
 
 
1429	/*
1430	 * Our capabilities prevent regular writeback or sync from ever calling
1431	 * shmem_writepage; but a stacking filesystem might use ->writepage of
1432	 * its underlying filesystem, in which case tmpfs should write out to
1433	 * swap only in response to memory pressure, and not for the writeback
1434	 * threads or sync.
1435	 */
1436	if (WARN_ON_ONCE(!wbc->for_reclaim))
1437		goto redirty;
1438
1439	if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap))
1440		goto redirty;
1441
1442	if (!total_swap_pages)
1443		goto redirty;
1444
1445	/*
1446	 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
1447	 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
1448	 * and its shmem_writeback() needs them to be split when swapping.
1449	 */
1450	if (folio_test_large(folio)) {
1451		/* Ensure the subpages are still dirty */
1452		folio_test_set_dirty(folio);
1453		if (split_huge_page(page) < 0)
1454			goto redirty;
1455		folio = page_folio(page);
1456		folio_clear_dirty(folio);
1457	}
1458
1459	index = folio->index;
1460
1461	/*
1462	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1463	 * value into swapfile.c, the only way we can correctly account for a
1464	 * fallocated folio arriving here is now to initialize it and write it.
1465	 *
1466	 * That's okay for a folio already fallocated earlier, but if we have
1467	 * not yet completed the fallocation, then (a) we want to keep track
1468	 * of this folio in case we have to undo it, and (b) it may not be a
1469	 * good idea to continue anyway, once we're pushing into swap.  So
1470	 * reactivate the folio, and let shmem_fallocate() quit when too many.
1471	 */
1472	if (!folio_test_uptodate(folio)) {
1473		if (inode->i_private) {
1474			struct shmem_falloc *shmem_falloc;
1475			spin_lock(&inode->i_lock);
1476			shmem_falloc = inode->i_private;
1477			if (shmem_falloc &&
1478			    !shmem_falloc->waitq &&
1479			    index >= shmem_falloc->start &&
1480			    index < shmem_falloc->next)
1481				shmem_falloc->nr_unswapped++;
1482			else
1483				shmem_falloc = NULL;
1484			spin_unlock(&inode->i_lock);
1485			if (shmem_falloc)
1486				goto redirty;
1487		}
1488		folio_zero_range(folio, 0, folio_size(folio));
1489		flush_dcache_folio(folio);
1490		folio_mark_uptodate(folio);
1491	}
1492
1493	swap = folio_alloc_swap(folio);
1494	if (!swap.val)
1495		goto redirty;
1496
1497	/*
1498	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1499	 * if it's not already there.  Do it now before the folio is
1500	 * moved to swap cache, when its pagelock no longer protects
1501	 * the inode from eviction.  But don't unlock the mutex until
1502	 * we've incremented swapped, because shmem_unuse_inode() will
1503	 * prune a !swapped inode from the swaplist under this mutex.
1504	 */
1505	mutex_lock(&shmem_swaplist_mutex);
1506	if (list_empty(&info->swaplist))
1507		list_add(&info->swaplist, &shmem_swaplist);
1508
1509	if (add_to_swap_cache(folio, swap,
1510			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1511			NULL) == 0) {
1512		shmem_recalc_inode(inode, 0, 1);
 
 
1513		swap_shmem_alloc(swap);
1514		shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1515
1516		mutex_unlock(&shmem_swaplist_mutex);
1517		BUG_ON(folio_mapped(folio));
1518		return swap_writepage(&folio->page, wbc);
 
1519	}
1520
1521	mutex_unlock(&shmem_swaplist_mutex);
1522	put_swap_folio(folio, swap);
1523redirty:
1524	folio_mark_dirty(folio);
1525	if (wbc->for_reclaim)
1526		return AOP_WRITEPAGE_ACTIVATE;	/* Return with folio locked */
1527	folio_unlock(folio);
1528	return 0;
1529}
1530
1531#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1532static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1533{
1534	char buffer[64];
1535
1536	if (!mpol || mpol->mode == MPOL_DEFAULT)
1537		return;		/* show nothing */
1538
1539	mpol_to_str(buffer, sizeof(buffer), mpol);
1540
1541	seq_printf(seq, ",mpol=%s", buffer);
1542}
1543
1544static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1545{
1546	struct mempolicy *mpol = NULL;
1547	if (sbinfo->mpol) {
1548		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
1549		mpol = sbinfo->mpol;
1550		mpol_get(mpol);
1551		raw_spin_unlock(&sbinfo->stat_lock);
1552	}
1553	return mpol;
1554}
1555#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1556static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1557{
1558}
1559static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1560{
1561	return NULL;
1562}
1563#endif /* CONFIG_NUMA && CONFIG_TMPFS */
 
 
 
1564
1565static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
1566			pgoff_t index, unsigned int order, pgoff_t *ilx);
 
 
 
 
 
 
 
1567
1568static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1569			struct shmem_inode_info *info, pgoff_t index)
1570{
1571	struct mempolicy *mpol;
1572	pgoff_t ilx;
1573	struct folio *folio;
1574
1575	mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1576	folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1577	mpol_cond_put(mpol);
1578
1579	return folio;
1580}
1581
1582/*
1583 * Make sure huge_gfp is always more limited than limit_gfp.
1584 * Some of the flags set permissions, while others set limitations.
1585 */
1586static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1587{
1588	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1589	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1590	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1591	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1592
1593	/* Allow allocations only from the originally specified zones. */
1594	result |= zoneflags;
 
 
 
1595
1596	/*
1597	 * Minimize the result gfp by taking the union with the deny flags,
1598	 * and the intersection of the allow flags.
1599	 */
1600	result |= (limit_gfp & denyflags);
1601	result |= (huge_gfp & limit_gfp) & allowflags;
1602
1603	return result;
1604}
1605
1606static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
1607		struct shmem_inode_info *info, pgoff_t index)
1608{
1609	struct mempolicy *mpol;
1610	pgoff_t ilx;
 
1611	struct page *page;
1612
1613	mpol = shmem_get_pgoff_policy(info, index, HPAGE_PMD_ORDER, &ilx);
1614	page = alloc_pages_mpol(gfp, HPAGE_PMD_ORDER, mpol, ilx, numa_node_id());
1615	mpol_cond_put(mpol);
 
 
 
 
1616
1617	return page_rmappable_folio(page);
 
 
 
 
 
 
1618}
1619
1620static struct folio *shmem_alloc_folio(gfp_t gfp,
1621		struct shmem_inode_info *info, pgoff_t index)
1622{
1623	struct mempolicy *mpol;
1624	pgoff_t ilx;
1625	struct page *page;
1626
1627	mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1628	page = alloc_pages_mpol(gfp, 0, mpol, ilx, numa_node_id());
1629	mpol_cond_put(mpol);
1630
1631	return (struct folio *)page;
1632}
1633
1634static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
1635		struct inode *inode, pgoff_t index,
1636		struct mm_struct *fault_mm, bool huge)
1637{
1638	struct address_space *mapping = inode->i_mapping;
1639	struct shmem_inode_info *info = SHMEM_I(inode);
1640	struct folio *folio;
1641	long pages;
1642	int error;
1643
1644	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1645		huge = false;
 
1646
1647	if (huge) {
1648		pages = HPAGE_PMD_NR;
1649		index = round_down(index, HPAGE_PMD_NR);
1650
1651		/*
1652		 * Check for conflict before waiting on a huge allocation.
1653		 * Conflict might be that a huge page has just been allocated
1654		 * and added to page cache by a racing thread, or that there
1655		 * is already at least one small page in the huge extent.
1656		 * Be careful to retry when appropriate, but not forever!
1657		 * Elsewhere -EEXIST would be the right code, but not here.
1658		 */
1659		if (xa_find(&mapping->i_pages, &index,
1660				index + HPAGE_PMD_NR - 1, XA_PRESENT))
1661			return ERR_PTR(-E2BIG);
1662
1663		folio = shmem_alloc_hugefolio(gfp, info, index);
1664		if (!folio)
1665			count_vm_event(THP_FILE_FALLBACK);
1666	} else {
1667		pages = 1;
1668		folio = shmem_alloc_folio(gfp, info, index);
1669	}
1670	if (!folio)
1671		return ERR_PTR(-ENOMEM);
1672
1673	__folio_set_locked(folio);
1674	__folio_set_swapbacked(folio);
1675
1676	gfp &= GFP_RECLAIM_MASK;
1677	error = mem_cgroup_charge(folio, fault_mm, gfp);
1678	if (error) {
1679		if (xa_find(&mapping->i_pages, &index,
1680				index + pages - 1, XA_PRESENT)) {
1681			error = -EEXIST;
1682		} else if (huge) {
1683			count_vm_event(THP_FILE_FALLBACK);
1684			count_vm_event(THP_FILE_FALLBACK_CHARGE);
1685		}
1686		goto unlock;
1687	}
1688
1689	error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
1690	if (error)
1691		goto unlock;
1692
1693	error = shmem_inode_acct_blocks(inode, pages);
1694	if (error) {
1695		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1696		long freed;
1697		/*
1698		 * Try to reclaim some space by splitting a few
1699		 * large folios beyond i_size on the filesystem.
1700		 */
1701		shmem_unused_huge_shrink(sbinfo, NULL, 2);
1702		/*
1703		 * And do a shmem_recalc_inode() to account for freed pages:
1704		 * except our folio is there in cache, so not quite balanced.
1705		 */
1706		spin_lock(&info->lock);
1707		freed = pages + info->alloced - info->swapped -
1708			READ_ONCE(mapping->nrpages);
1709		if (freed > 0)
1710			info->alloced -= freed;
1711		spin_unlock(&info->lock);
1712		if (freed > 0)
1713			shmem_inode_unacct_blocks(inode, freed);
1714		error = shmem_inode_acct_blocks(inode, pages);
1715		if (error) {
1716			filemap_remove_folio(folio);
1717			goto unlock;
1718		}
1719	}
1720
1721	shmem_recalc_inode(inode, pages, 0);
1722	folio_add_lru(folio);
1723	return folio;
1724
1725unlock:
1726	folio_unlock(folio);
1727	folio_put(folio);
1728	return ERR_PTR(error);
1729}
1730
1731/*
1732 * When a page is moved from swapcache to shmem filecache (either by the
1733 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
1734 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1735 * ignorance of the mapping it belongs to.  If that mapping has special
1736 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1737 * we may need to copy to a suitable page before moving to filecache.
1738 *
1739 * In a future release, this may well be extended to respect cpuset and
1740 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1741 * but for now it is a simple matter of zone.
1742 */
1743static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1744{
1745	return folio_zonenum(folio) > gfp_zone(gfp);
1746}
1747
1748static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1749				struct shmem_inode_info *info, pgoff_t index)
1750{
1751	struct folio *old, *new;
1752	struct address_space *swap_mapping;
1753	swp_entry_t entry;
1754	pgoff_t swap_index;
1755	int error;
1756
1757	old = *foliop;
1758	entry = old->swap;
1759	swap_index = swp_offset(entry);
1760	swap_mapping = swap_address_space(entry);
1761
1762	/*
1763	 * We have arrived here because our zones are constrained, so don't
1764	 * limit chance of success by further cpuset and node constraints.
1765	 */
1766	gfp &= ~GFP_CONSTRAINT_MASK;
1767	VM_BUG_ON_FOLIO(folio_test_large(old), old);
1768	new = shmem_alloc_folio(gfp, info, index);
1769	if (!new)
1770		return -ENOMEM;
1771
1772	folio_get(new);
1773	folio_copy(new, old);
1774	flush_dcache_folio(new);
1775
1776	__folio_set_locked(new);
1777	__folio_set_swapbacked(new);
1778	folio_mark_uptodate(new);
1779	new->swap = entry;
1780	folio_set_swapcache(new);
1781
1782	/*
1783	 * Our caller will very soon move newpage out of swapcache, but it's
1784	 * a nice clean interface for us to replace oldpage by newpage there.
1785	 */
1786	xa_lock_irq(&swap_mapping->i_pages);
1787	error = shmem_replace_entry(swap_mapping, swap_index, old, new);
1788	if (!error) {
1789		mem_cgroup_migrate(old, new);
1790		__lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
1791		__lruvec_stat_mod_folio(new, NR_SHMEM, 1);
1792		__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
1793		__lruvec_stat_mod_folio(old, NR_SHMEM, -1);
1794	}
1795	xa_unlock_irq(&swap_mapping->i_pages);
1796
1797	if (unlikely(error)) {
1798		/*
1799		 * Is this possible?  I think not, now that our callers check
1800		 * both PageSwapCache and page_private after getting page lock;
1801		 * but be defensive.  Reverse old to newpage for clear and free.
1802		 */
1803		old = new;
1804	} else {
1805		folio_add_lru(new);
1806		*foliop = new;
 
1807	}
1808
1809	folio_clear_swapcache(old);
1810	old->private = NULL;
1811
1812	folio_unlock(old);
1813	folio_put_refs(old, 2);
 
1814	return error;
1815}
1816
1817static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1818					 struct folio *folio, swp_entry_t swap)
1819{
1820	struct address_space *mapping = inode->i_mapping;
1821	swp_entry_t swapin_error;
1822	void *old;
1823
1824	swapin_error = make_poisoned_swp_entry();
1825	old = xa_cmpxchg_irq(&mapping->i_pages, index,
1826			     swp_to_radix_entry(swap),
1827			     swp_to_radix_entry(swapin_error), 0);
1828	if (old != swp_to_radix_entry(swap))
1829		return;
1830
1831	folio_wait_writeback(folio);
1832	delete_from_swap_cache(folio);
1833	/*
1834	 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
1835	 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
1836	 * in shmem_evict_inode().
1837	 */
1838	shmem_recalc_inode(inode, -1, -1);
1839	swap_free(swap);
1840}
1841
1842/*
1843 * Swap in the folio pointed to by *foliop.
1844 * Caller has to make sure that *foliop contains a valid swapped folio.
1845 * Returns 0 and the folio in foliop if success. On failure, returns the
1846 * error code and NULL in *foliop.
1847 */
1848static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1849			     struct folio **foliop, enum sgp_type sgp,
1850			     gfp_t gfp, struct mm_struct *fault_mm,
1851			     vm_fault_t *fault_type)
1852{
1853	struct address_space *mapping = inode->i_mapping;
1854	struct shmem_inode_info *info = SHMEM_I(inode);
1855	struct swap_info_struct *si;
1856	struct folio *folio = NULL;
 
1857	swp_entry_t swap;
1858	int error;
1859
1860	VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
1861	swap = radix_to_swp_entry(*foliop);
1862	*foliop = NULL;
1863
1864	if (is_poisoned_swp_entry(swap))
1865		return -EIO;
1866
1867	si = get_swap_device(swap);
1868	if (!si) {
1869		if (!shmem_confirm_swap(mapping, index, swap))
1870			return -EEXIST;
1871		else
1872			return -EINVAL;
1873	}
1874
1875	/* Look it up and read it in.. */
1876	folio = swap_cache_get_folio(swap, NULL, 0);
1877	if (!folio) {
1878		/* Or update major stats only when swapin succeeds?? */
1879		if (fault_type) {
1880			*fault_type |= VM_FAULT_MAJOR;
1881			count_vm_event(PGMAJFAULT);
1882			count_memcg_event_mm(fault_mm, PGMAJFAULT);
1883		}
1884		/* Here we actually start the io */
1885		folio = shmem_swapin_cluster(swap, gfp, info, index);
1886		if (!folio) {
1887			error = -ENOMEM;
1888			goto failed;
1889		}
1890	}
1891
1892	/* We have to do this with folio locked to prevent races */
1893	folio_lock(folio);
1894	if (!folio_test_swapcache(folio) ||
1895	    folio->swap.val != swap.val ||
1896	    !shmem_confirm_swap(mapping, index, swap)) {
1897		error = -EEXIST;
1898		goto unlock;
1899	}
1900	if (!folio_test_uptodate(folio)) {
1901		error = -EIO;
1902		goto failed;
1903	}
1904	folio_wait_writeback(folio);
1905
1906	/*
1907	 * Some architectures may have to restore extra metadata to the
1908	 * folio after reading from swap.
1909	 */
1910	arch_swap_restore(swap, folio);
1911
1912	if (shmem_should_replace_folio(folio, gfp)) {
1913		error = shmem_replace_folio(&folio, gfp, info, index);
1914		if (error)
1915			goto failed;
1916	}
1917
1918	error = shmem_add_to_page_cache(folio, mapping, index,
1919					swp_to_radix_entry(swap), gfp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1920	if (error)
1921		goto failed;
1922
1923	shmem_recalc_inode(inode, 0, -1);
 
 
 
 
 
1924
1925	if (sgp == SGP_WRITE)
1926		folio_mark_accessed(folio);
1927
1928	delete_from_swap_cache(folio);
1929	folio_mark_dirty(folio);
1930	swap_free(swap);
1931	put_swap_device(si);
1932
1933	*foliop = folio;
1934	return 0;
1935failed:
1936	if (!shmem_confirm_swap(mapping, index, swap))
1937		error = -EEXIST;
1938	if (error == -EIO)
1939		shmem_set_folio_swapin_error(inode, index, folio, swap);
1940unlock:
1941	if (folio) {
1942		folio_unlock(folio);
1943		folio_put(folio);
1944	}
1945	put_swap_device(si);
1946
1947	return error;
1948}
1949
1950/*
1951 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
1952 *
1953 * If we allocate a new one we do not mark it dirty. That's up to the
1954 * vm. If we swap it in we mark it dirty since we also free the swap
1955 * entry since a page cannot live in both the swap and page cache.
1956 *
1957 * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
 
1958 */
1959static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
1960		struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
1961		struct vm_fault *vmf, vm_fault_t *fault_type)
1962{
1963	struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1964	struct mm_struct *fault_mm;
1965	struct folio *folio;
 
 
 
 
 
 
1966	int error;
1967	bool alloced;
 
1968
1969	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1970		return -EFBIG;
 
 
1971repeat:
1972	if (sgp <= SGP_CACHE &&
1973	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
1974		return -EINVAL;
 
1975
1976	alloced = false;
1977	fault_mm = vma ? vma->vm_mm : NULL;
1978
1979	folio = filemap_get_entry(inode->i_mapping, index);
1980	if (folio && vma && userfaultfd_minor(vma)) {
1981		if (!xa_is_value(folio))
1982			folio_put(folio);
1983		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1984		return 0;
1985	}
1986
1987	if (xa_is_value(folio)) {
1988		error = shmem_swapin_folio(inode, index, &folio,
1989					   sgp, gfp, fault_mm, fault_type);
1990		if (error == -EEXIST)
1991			goto repeat;
1992
1993		*foliop = folio;
1994		return error;
1995	}
1996
1997	if (folio) {
1998		folio_lock(folio);
1999
2000		/* Has the folio been truncated or swapped out? */
2001		if (unlikely(folio->mapping != inode->i_mapping)) {
2002			folio_unlock(folio);
2003			folio_put(folio);
2004			goto repeat;
2005		}
2006		if (sgp == SGP_WRITE)
2007			folio_mark_accessed(folio);
2008		if (folio_test_uptodate(folio))
2009			goto out;
2010		/* fallocated folio */
2011		if (sgp != SGP_READ)
2012			goto clear;
2013		folio_unlock(folio);
2014		folio_put(folio);
 
2015	}
2016
2017	/*
2018	 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2019	 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2020	 */
2021	*foliop = NULL;
2022	if (sgp == SGP_READ)
2023		return 0;
2024	if (sgp == SGP_NOALLOC)
2025		return -ENOENT;
2026
2027	/*
2028	 * Fast cache lookup and swap lookup did not find it: allocate.
 
2029	 */
2030
2031	if (vma && userfaultfd_missing(vma)) {
2032		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2033		return 0;
2034	}
2035
2036	if (shmem_is_huge(inode, index, false, fault_mm,
2037			  vma ? vma->vm_flags : 0)) {
2038		gfp_t huge_gfp;
2039
2040		huge_gfp = vma_thp_gfp_mask(vma);
2041		huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2042		folio = shmem_alloc_and_add_folio(huge_gfp,
2043				inode, index, fault_mm, true);
2044		if (!IS_ERR(folio)) {
2045			count_vm_event(THP_FILE_ALLOC);
2046			goto alloced;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2047		}
2048		if (PTR_ERR(folio) == -EEXIST)
2049			goto repeat;
2050	}
2051
2052	folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, false);
2053	if (IS_ERR(folio)) {
2054		error = PTR_ERR(folio);
2055		if (error == -EEXIST)
2056			goto repeat;
2057		folio = NULL;
2058		goto unlock;
2059	}
2060
2061alloced:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2062	alloced = true;
2063	if (folio_test_pmd_mappable(folio) &&
 
2064	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2065					folio_next_index(folio) - 1) {
2066		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2067		struct shmem_inode_info *info = SHMEM_I(inode);
2068		/*
2069		 * Part of the large folio is beyond i_size: subject
2070		 * to shrink under memory pressure.
2071		 */
2072		spin_lock(&sbinfo->shrinklist_lock);
2073		/*
2074		 * _careful to defend against unlocked access to
2075		 * ->shrink_list in shmem_unused_huge_shrink()
2076		 */
2077		if (list_empty_careful(&info->shrinklist)) {
2078			list_add_tail(&info->shrinklist,
2079				      &sbinfo->shrinklist);
2080			sbinfo->shrinklist_len++;
2081		}
2082		spin_unlock(&sbinfo->shrinklist_lock);
2083	}
2084
2085	if (sgp == SGP_WRITE)
2086		folio_set_referenced(folio);
2087	/*
2088	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2089	 */
2090	if (sgp == SGP_FALLOC)
2091		sgp = SGP_WRITE;
2092clear:
2093	/*
2094	 * Let SGP_WRITE caller clear ends if write does not fill folio;
2095	 * but SGP_FALLOC on a folio fallocated earlier must initialize
2096	 * it now, lest undo on failure cancel our earlier guarantee.
2097	 */
2098	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2099		long i, n = folio_nr_pages(folio);
2100
2101		for (i = 0; i < n; i++)
2102			clear_highpage(folio_page(folio, i));
2103		flush_dcache_folio(folio);
2104		folio_mark_uptodate(folio);
 
 
2105	}
2106
2107	/* Perhaps the file has been truncated since we checked */
2108	if (sgp <= SGP_CACHE &&
2109	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
 
 
 
 
 
 
 
2110		error = -EINVAL;
2111		goto unlock;
2112	}
2113out:
2114	*foliop = folio;
2115	return 0;
2116
2117	/*
2118	 * Error recovery.
2119	 */
 
 
 
 
 
 
 
 
2120unlock:
2121	if (alloced)
2122		filemap_remove_folio(folio);
2123	shmem_recalc_inode(inode, 0, 0);
2124	if (folio) {
2125		folio_unlock(folio);
2126		folio_put(folio);
 
 
 
2127	}
 
 
2128	return error;
2129}
2130
2131int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
2132		enum sgp_type sgp)
2133{
2134	return shmem_get_folio_gfp(inode, index, foliop, sgp,
2135			mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2136}
2137
2138/*
2139 * This is like autoremove_wake_function, but it removes the wait queue
2140 * entry unconditionally - even if something else had already woken the
2141 * target.
2142 */
2143static int synchronous_wake_function(wait_queue_entry_t *wait,
2144			unsigned int mode, int sync, void *key)
2145{
2146	int ret = default_wake_function(wait, mode, sync, key);
2147	list_del_init(&wait->entry);
2148	return ret;
2149}
2150
2151/*
2152 * Trinity finds that probing a hole which tmpfs is punching can
2153 * prevent the hole-punch from ever completing: which in turn
2154 * locks writers out with its hold on i_rwsem.  So refrain from
2155 * faulting pages into the hole while it's being punched.  Although
2156 * shmem_undo_range() does remove the additions, it may be unable to
2157 * keep up, as each new page needs its own unmap_mapping_range() call,
2158 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2159 *
2160 * It does not matter if we sometimes reach this check just before the
2161 * hole-punch begins, so that one fault then races with the punch:
2162 * we just need to make racing faults a rare case.
2163 *
2164 * The implementation below would be much simpler if we just used a
2165 * standard mutex or completion: but we cannot take i_rwsem in fault,
2166 * and bloating every shmem inode for this unlikely case would be sad.
2167 */
2168static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2169{
2170	struct shmem_falloc *shmem_falloc;
2171	struct file *fpin = NULL;
2172	vm_fault_t ret = 0;
2173
2174	spin_lock(&inode->i_lock);
2175	shmem_falloc = inode->i_private;
2176	if (shmem_falloc &&
2177	    shmem_falloc->waitq &&
2178	    vmf->pgoff >= shmem_falloc->start &&
2179	    vmf->pgoff < shmem_falloc->next) {
2180		wait_queue_head_t *shmem_falloc_waitq;
2181		DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2182
2183		ret = VM_FAULT_NOPAGE;
2184		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2185		shmem_falloc_waitq = shmem_falloc->waitq;
2186		prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2187				TASK_UNINTERRUPTIBLE);
2188		spin_unlock(&inode->i_lock);
2189		schedule();
2190
2191		/*
2192		 * shmem_falloc_waitq points into the shmem_fallocate()
2193		 * stack of the hole-punching task: shmem_falloc_waitq
2194		 * is usually invalid by the time we reach here, but
2195		 * finish_wait() does not dereference it in that case;
2196		 * though i_lock needed lest racing with wake_up_all().
2197		 */
2198		spin_lock(&inode->i_lock);
2199		finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2200	}
2201	spin_unlock(&inode->i_lock);
2202	if (fpin) {
2203		fput(fpin);
2204		ret = VM_FAULT_RETRY;
2205	}
2206	return ret;
2207}
2208
2209static vm_fault_t shmem_fault(struct vm_fault *vmf)
2210{
2211	struct inode *inode = file_inode(vmf->vma->vm_file);
 
2212	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2213	struct folio *folio = NULL;
2214	vm_fault_t ret = 0;
2215	int err;
 
2216
2217	/*
2218	 * Trinity finds that probing a hole which tmpfs is punching can
2219	 * prevent the hole-punch from ever completing: noted in i_private.
 
 
 
 
 
 
 
 
 
 
 
 
 
2220	 */
2221	if (unlikely(inode->i_private)) {
2222		ret = shmem_falloc_wait(vmf, inode);
2223		if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2224			return ret;
 
 
2225	}
2226
2227	WARN_ON_ONCE(vmf->page != NULL);
2228	err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
2229				  gfp, vmf, &ret);
 
 
 
 
 
 
 
2230	if (err)
2231		return vmf_error(err);
2232	if (folio) {
2233		vmf->page = folio_file_page(folio, vmf->pgoff);
2234		ret |= VM_FAULT_LOCKED;
2235	}
2236	return ret;
2237}
2238
2239unsigned long shmem_get_unmapped_area(struct file *file,
2240				      unsigned long uaddr, unsigned long len,
2241				      unsigned long pgoff, unsigned long flags)
2242{
2243	unsigned long (*get_area)(struct file *,
2244		unsigned long, unsigned long, unsigned long, unsigned long);
2245	unsigned long addr;
2246	unsigned long offset;
2247	unsigned long inflated_len;
2248	unsigned long inflated_addr;
2249	unsigned long inflated_offset;
2250
2251	if (len > TASK_SIZE)
2252		return -ENOMEM;
2253
2254	get_area = current->mm->get_unmapped_area;
2255	addr = get_area(file, uaddr, len, pgoff, flags);
2256
2257	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2258		return addr;
2259	if (IS_ERR_VALUE(addr))
2260		return addr;
2261	if (addr & ~PAGE_MASK)
2262		return addr;
2263	if (addr > TASK_SIZE - len)
2264		return addr;
2265
2266	if (shmem_huge == SHMEM_HUGE_DENY)
2267		return addr;
2268	if (len < HPAGE_PMD_SIZE)
2269		return addr;
2270	if (flags & MAP_FIXED)
2271		return addr;
2272	/*
2273	 * Our priority is to support MAP_SHARED mapped hugely;
2274	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2275	 * But if caller specified an address hint and we allocated area there
2276	 * successfully, respect that as before.
2277	 */
2278	if (uaddr == addr)
2279		return addr;
2280
2281	if (shmem_huge != SHMEM_HUGE_FORCE) {
2282		struct super_block *sb;
2283
2284		if (file) {
2285			VM_BUG_ON(file->f_op != &shmem_file_operations);
2286			sb = file_inode(file)->i_sb;
2287		} else {
2288			/*
2289			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2290			 * for "/dev/zero", to create a shared anonymous object.
2291			 */
2292			if (IS_ERR(shm_mnt))
2293				return addr;
2294			sb = shm_mnt->mnt_sb;
2295		}
2296		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2297			return addr;
2298	}
2299
2300	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2301	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2302		return addr;
2303	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2304		return addr;
2305
2306	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2307	if (inflated_len > TASK_SIZE)
2308		return addr;
2309	if (inflated_len < len)
2310		return addr;
2311
2312	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2313	if (IS_ERR_VALUE(inflated_addr))
2314		return addr;
2315	if (inflated_addr & ~PAGE_MASK)
2316		return addr;
2317
2318	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2319	inflated_addr += offset - inflated_offset;
2320	if (inflated_offset > offset)
2321		inflated_addr += HPAGE_PMD_SIZE;
2322
2323	if (inflated_addr > TASK_SIZE - len)
2324		return addr;
2325	return inflated_addr;
2326}
2327
2328#ifdef CONFIG_NUMA
2329static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2330{
2331	struct inode *inode = file_inode(vma->vm_file);
2332	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2333}
2334
2335static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2336					  unsigned long addr, pgoff_t *ilx)
2337{
2338	struct inode *inode = file_inode(vma->vm_file);
2339	pgoff_t index;
2340
2341	/*
2342	 * Bias interleave by inode number to distribute better across nodes;
2343	 * but this interface is independent of which page order is used, so
2344	 * supplies only that bias, letting caller apply the offset (adjusted
2345	 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()).
2346	 */
2347	*ilx = inode->i_ino;
2348	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2349	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2350}
 
2351
2352static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2353			pgoff_t index, unsigned int order, pgoff_t *ilx)
2354{
2355	struct mempolicy *mpol;
2356
2357	/* Bias interleave by inode number to distribute better across nodes */
2358	*ilx = info->vfs_inode.i_ino + (index >> order);
2359
2360	mpol = mpol_shared_policy_lookup(&info->policy, index);
2361	return mpol ? mpol : get_task_policy(current);
2362}
2363#else
2364static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2365			pgoff_t index, unsigned int order, pgoff_t *ilx)
2366{
2367	*ilx = 0;
2368	return NULL;
2369}
2370#endif /* CONFIG_NUMA */
2371
2372int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2373{
2374	struct inode *inode = file_inode(file);
2375	struct shmem_inode_info *info = SHMEM_I(inode);
2376	int retval = -ENOMEM;
2377
2378	/*
2379	 * What serializes the accesses to info->flags?
2380	 * ipc_lock_object() when called from shmctl_do_lock(),
2381	 * no serialization needed when called from shm_destroy().
2382	 */
2383	if (lock && !(info->flags & VM_LOCKED)) {
2384		if (!user_shm_lock(inode->i_size, ucounts))
2385			goto out_nomem;
2386		info->flags |= VM_LOCKED;
2387		mapping_set_unevictable(file->f_mapping);
2388	}
2389	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2390		user_shm_unlock(inode->i_size, ucounts);
2391		info->flags &= ~VM_LOCKED;
2392		mapping_clear_unevictable(file->f_mapping);
2393	}
2394	retval = 0;
2395
2396out_nomem:
 
2397	return retval;
2398}
2399
2400static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2401{
2402	struct inode *inode = file_inode(file);
2403	struct shmem_inode_info *info = SHMEM_I(inode);
2404	int ret;
2405
2406	ret = seal_check_write(info->seals, vma);
2407	if (ret)
2408		return ret;
 
 
 
 
2409
2410	/* arm64 - allow memory tagging on RAM-based files */
2411	vm_flags_set(vma, VM_MTE_ALLOWED);
 
 
 
 
 
2412
2413	file_accessed(file);
2414	/* This is anonymous shared memory if it is unlinked at the time of mmap */
2415	if (inode->i_nlink)
2416		vma->vm_ops = &shmem_vm_ops;
2417	else
2418		vma->vm_ops = &shmem_anon_vm_ops;
 
2419	return 0;
2420}
2421
2422static int shmem_file_open(struct inode *inode, struct file *file)
2423{
2424	file->f_mode |= FMODE_CAN_ODIRECT;
2425	return generic_file_open(inode, file);
2426}
2427
2428#ifdef CONFIG_TMPFS_XATTR
2429static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2430
2431/*
2432 * chattr's fsflags are unrelated to extended attributes,
2433 * but tmpfs has chosen to enable them under the same config option.
2434 */
2435static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2436{
2437	unsigned int i_flags = 0;
2438
2439	if (fsflags & FS_NOATIME_FL)
2440		i_flags |= S_NOATIME;
2441	if (fsflags & FS_APPEND_FL)
2442		i_flags |= S_APPEND;
2443	if (fsflags & FS_IMMUTABLE_FL)
2444		i_flags |= S_IMMUTABLE;
2445	/*
2446	 * But FS_NODUMP_FL does not require any action in i_flags.
2447	 */
2448	inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2449}
2450#else
2451static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2452{
2453}
2454#define shmem_initxattrs NULL
2455#endif
2456
2457static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
2458{
2459	return &SHMEM_I(inode)->dir_offsets;
2460}
2461
2462static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
2463					     struct super_block *sb,
2464					     struct inode *dir, umode_t mode,
2465					     dev_t dev, unsigned long flags)
2466{
2467	struct inode *inode;
2468	struct shmem_inode_info *info;
2469	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2470	ino_t ino;
2471	int err;
2472
2473	err = shmem_reserve_inode(sb, &ino);
2474	if (err)
2475		return ERR_PTR(err);
2476
2477	inode = new_inode(sb);
2478	if (!inode) {
2479		shmem_free_inode(sb, 0);
2480		return ERR_PTR(-ENOSPC);
2481	}
2482
2483	inode->i_ino = ino;
2484	inode_init_owner(idmap, inode, dir, mode);
2485	inode->i_blocks = 0;
2486	simple_inode_init_ts(inode);
2487	inode->i_generation = get_random_u32();
2488	info = SHMEM_I(inode);
2489	memset(info, 0, (char *)inode - (char *)info);
2490	spin_lock_init(&info->lock);
2491	atomic_set(&info->stop_eviction, 0);
2492	info->seals = F_SEAL_SEAL;
2493	info->flags = flags & VM_NORESERVE;
2494	info->i_crtime = inode_get_mtime(inode);
2495	info->fsflags = (dir == NULL) ? 0 :
2496		SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2497	if (info->fsflags)
2498		shmem_set_inode_flags(inode, info->fsflags);
2499	INIT_LIST_HEAD(&info->shrinklist);
2500	INIT_LIST_HEAD(&info->swaplist);
2501	simple_xattrs_init(&info->xattrs);
2502	cache_no_acl(inode);
2503	if (sbinfo->noswap)
2504		mapping_set_unevictable(inode->i_mapping);
2505	mapping_set_large_folios(inode->i_mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2506
2507	switch (mode & S_IFMT) {
2508	default:
2509		inode->i_op = &shmem_special_inode_operations;
2510		init_special_inode(inode, mode, dev);
2511		break;
2512	case S_IFREG:
2513		inode->i_mapping->a_ops = &shmem_aops;
2514		inode->i_op = &shmem_inode_operations;
2515		inode->i_fop = &shmem_file_operations;
2516		mpol_shared_policy_init(&info->policy,
2517					 shmem_get_sbmpol(sbinfo));
2518		break;
2519	case S_IFDIR:
2520		inc_nlink(inode);
2521		/* Some things misbehave if size == 0 on a directory */
2522		inode->i_size = 2 * BOGO_DIRENT_SIZE;
2523		inode->i_op = &shmem_dir_inode_operations;
2524		inode->i_fop = &simple_offset_dir_operations;
2525		simple_offset_init(shmem_get_offset_ctx(inode));
2526		break;
2527	case S_IFLNK:
2528		/*
2529		 * Must not load anything in the rbtree,
2530		 * mpol_free_shared_policy will not be called.
2531		 */
2532		mpol_shared_policy_init(&info->policy, NULL);
2533		break;
2534	}
2535
2536	lockdep_annotate_inode_mutex_key(inode);
2537	return inode;
2538}
2539
2540#ifdef CONFIG_TMPFS_QUOTA
2541static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2542				     struct super_block *sb, struct inode *dir,
2543				     umode_t mode, dev_t dev, unsigned long flags)
2544{
2545	int err;
2546	struct inode *inode;
2547
2548	inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2549	if (IS_ERR(inode))
2550		return inode;
2551
2552	err = dquot_initialize(inode);
2553	if (err)
2554		goto errout;
2555
2556	err = dquot_alloc_inode(inode);
2557	if (err) {
2558		dquot_drop(inode);
2559		goto errout;
2560	}
2561	return inode;
2562
2563errout:
2564	inode->i_flags |= S_NOQUOTA;
2565	iput(inode);
2566	return ERR_PTR(err);
2567}
2568#else
2569static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2570				     struct super_block *sb, struct inode *dir,
2571				     umode_t mode, dev_t dev, unsigned long flags)
2572{
2573	return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2574}
2575#endif /* CONFIG_TMPFS_QUOTA */
2576
2577#ifdef CONFIG_USERFAULTFD
2578int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
2579			   struct vm_area_struct *dst_vma,
2580			   unsigned long dst_addr,
2581			   unsigned long src_addr,
2582			   uffd_flags_t flags,
2583			   struct folio **foliop)
2584{
2585	struct inode *inode = file_inode(dst_vma->vm_file);
2586	struct shmem_inode_info *info = SHMEM_I(inode);
2587	struct address_space *mapping = inode->i_mapping;
2588	gfp_t gfp = mapping_gfp_mask(mapping);
2589	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
 
 
2590	void *page_kaddr;
2591	struct folio *folio;
 
2592	int ret;
2593	pgoff_t max_off;
2594
2595	if (shmem_inode_acct_blocks(inode, 1)) {
2596		/*
2597		 * We may have got a page, returned -ENOENT triggering a retry,
2598		 * and now we find ourselves with -ENOMEM. Release the page, to
2599		 * avoid a BUG_ON in our caller.
2600		 */
2601		if (unlikely(*foliop)) {
2602			folio_put(*foliop);
2603			*foliop = NULL;
2604		}
2605		return -ENOMEM;
2606	}
2607
2608	if (!*foliop) {
2609		ret = -ENOMEM;
2610		folio = shmem_alloc_folio(gfp, info, pgoff);
2611		if (!folio)
2612			goto out_unacct_blocks;
2613
2614		if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
2615			page_kaddr = kmap_local_folio(folio, 0);
2616			/*
2617			 * The read mmap_lock is held here.  Despite the
2618			 * mmap_lock being read recursive a deadlock is still
2619			 * possible if a writer has taken a lock.  For example:
2620			 *
2621			 * process A thread 1 takes read lock on own mmap_lock
2622			 * process A thread 2 calls mmap, blocks taking write lock
2623			 * process B thread 1 takes page fault, read lock on own mmap lock
2624			 * process B thread 2 calls mmap, blocks taking write lock
2625			 * process A thread 1 blocks taking read lock on process B
2626			 * process B thread 1 blocks taking read lock on process A
2627			 *
2628			 * Disable page faults to prevent potential deadlock
2629			 * and retry the copy outside the mmap_lock.
2630			 */
2631			pagefault_disable();
2632			ret = copy_from_user(page_kaddr,
2633					     (const void __user *)src_addr,
2634					     PAGE_SIZE);
2635			pagefault_enable();
2636			kunmap_local(page_kaddr);
2637
2638			/* fallback to copy_from_user outside mmap_lock */
2639			if (unlikely(ret)) {
2640				*foliop = folio;
2641				ret = -ENOENT;
2642				/* don't free the page */
2643				goto out_unacct_blocks;
2644			}
2645
2646			flush_dcache_folio(folio);
2647		} else {		/* ZEROPAGE */
2648			clear_user_highpage(&folio->page, dst_addr);
2649		}
2650	} else {
2651		folio = *foliop;
2652		VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2653		*foliop = NULL;
2654	}
2655
2656	VM_BUG_ON(folio_test_locked(folio));
2657	VM_BUG_ON(folio_test_swapbacked(folio));
2658	__folio_set_locked(folio);
2659	__folio_set_swapbacked(folio);
2660	__folio_mark_uptodate(folio);
2661
2662	ret = -EFAULT;
 
2663	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2664	if (unlikely(pgoff >= max_off))
2665		goto out_release;
2666
2667	ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
2668	if (ret)
2669		goto out_release;
2670	ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
 
 
2671	if (ret)
2672		goto out_release;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2673
2674	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
2675				       &folio->page, true, flags);
2676	if (ret)
2677		goto out_delete_from_cache;
 
2678
2679	shmem_recalc_inode(inode, 1, 0);
2680	folio_unlock(folio);
2681	return 0;
2682out_delete_from_cache:
2683	filemap_remove_folio(folio);
 
 
 
 
 
 
 
 
 
 
 
 
2684out_release:
2685	folio_unlock(folio);
2686	folio_put(folio);
2687out_unacct_blocks:
2688	shmem_inode_unacct_blocks(inode, 1);
2689	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2690}
2691#endif /* CONFIG_USERFAULTFD */
2692
2693#ifdef CONFIG_TMPFS
2694static const struct inode_operations shmem_symlink_inode_operations;
2695static const struct inode_operations shmem_short_symlink_operations;
2696
 
 
 
 
 
 
2697static int
2698shmem_write_begin(struct file *file, struct address_space *mapping,
2699			loff_t pos, unsigned len,
2700			struct page **pagep, void **fsdata)
2701{
2702	struct inode *inode = mapping->host;
2703	struct shmem_inode_info *info = SHMEM_I(inode);
2704	pgoff_t index = pos >> PAGE_SHIFT;
2705	struct folio *folio;
2706	int ret = 0;
2707
2708	/* i_rwsem is held by caller */
2709	if (unlikely(info->seals & (F_SEAL_GROW |
2710				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2711		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2712			return -EPERM;
2713		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2714			return -EPERM;
2715	}
2716
2717	ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
2718	if (ret)
2719		return ret;
2720
2721	*pagep = folio_file_page(folio, index);
2722	if (PageHWPoison(*pagep)) {
2723		folio_unlock(folio);
2724		folio_put(folio);
2725		*pagep = NULL;
2726		return -EIO;
2727	}
2728
2729	return 0;
2730}
2731
2732static int
2733shmem_write_end(struct file *file, struct address_space *mapping,
2734			loff_t pos, unsigned len, unsigned copied,
2735			struct page *page, void *fsdata)
2736{
2737	struct folio *folio = page_folio(page);
2738	struct inode *inode = mapping->host;
2739
2740	if (pos + copied > inode->i_size)
2741		i_size_write(inode, pos + copied);
2742
2743	if (!folio_test_uptodate(folio)) {
2744		if (copied < folio_size(folio)) {
2745			size_t from = offset_in_folio(folio, pos);
2746			folio_zero_segments(folio, 0, from,
2747					from + copied, folio_size(folio));
2748		}
2749		folio_mark_uptodate(folio);
2750	}
2751	folio_mark_dirty(folio);
2752	folio_unlock(folio);
2753	folio_put(folio);
 
 
 
 
 
 
 
 
 
 
 
2754
2755	return copied;
2756}
2757
2758static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2759{
2760	struct file *file = iocb->ki_filp;
2761	struct inode *inode = file_inode(file);
2762	struct address_space *mapping = inode->i_mapping;
2763	pgoff_t index;
2764	unsigned long offset;
 
2765	int error = 0;
2766	ssize_t retval = 0;
2767	loff_t *ppos = &iocb->ki_pos;
2768
 
 
 
 
 
 
 
 
2769	index = *ppos >> PAGE_SHIFT;
2770	offset = *ppos & ~PAGE_MASK;
2771
2772	for (;;) {
2773		struct folio *folio = NULL;
2774		struct page *page = NULL;
2775		pgoff_t end_index;
2776		unsigned long nr, ret;
2777		loff_t i_size = i_size_read(inode);
2778
2779		end_index = i_size >> PAGE_SHIFT;
2780		if (index > end_index)
2781			break;
2782		if (index == end_index) {
2783			nr = i_size & ~PAGE_MASK;
2784			if (nr <= offset)
2785				break;
2786		}
2787
2788		error = shmem_get_folio(inode, index, &folio, SGP_READ);
2789		if (error) {
2790			if (error == -EINVAL)
2791				error = 0;
2792			break;
2793		}
2794		if (folio) {
2795			folio_unlock(folio);
2796
2797			page = folio_file_page(folio, index);
2798			if (PageHWPoison(page)) {
2799				folio_put(folio);
2800				error = -EIO;
2801				break;
2802			}
2803		}
2804
2805		/*
2806		 * We must evaluate after, since reads (unlike writes)
2807		 * are called without i_rwsem protection against truncate
2808		 */
2809		nr = PAGE_SIZE;
2810		i_size = i_size_read(inode);
2811		end_index = i_size >> PAGE_SHIFT;
2812		if (index == end_index) {
2813			nr = i_size & ~PAGE_MASK;
2814			if (nr <= offset) {
2815				if (folio)
2816					folio_put(folio);
2817				break;
2818			}
2819		}
2820		nr -= offset;
2821
2822		if (folio) {
2823			/*
2824			 * If users can be writing to this page using arbitrary
2825			 * virtual addresses, take care about potential aliasing
2826			 * before reading the page on the kernel side.
2827			 */
2828			if (mapping_writably_mapped(mapping))
2829				flush_dcache_page(page);
2830			/*
2831			 * Mark the page accessed if we read the beginning.
2832			 */
2833			if (!offset)
2834				folio_mark_accessed(folio);
2835			/*
2836			 * Ok, we have the page, and it's up-to-date, so
2837			 * now we can copy it to user space...
2838			 */
2839			ret = copy_page_to_iter(page, offset, nr, to);
2840			folio_put(folio);
2841
2842		} else if (user_backed_iter(to)) {
2843			/*
2844			 * Copy to user tends to be so well optimized, but
2845			 * clear_user() not so much, that it is noticeably
2846			 * faster to copy the zero page instead of clearing.
2847			 */
2848			ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
2849		} else {
2850			/*
2851			 * But submitting the same page twice in a row to
2852			 * splice() - or others? - can result in confusion:
2853			 * so don't attempt that optimization on pipes etc.
2854			 */
2855			ret = iov_iter_zero(nr, to);
2856		}
2857
 
 
 
 
 
2858		retval += ret;
2859		offset += ret;
2860		index += offset >> PAGE_SHIFT;
2861		offset &= ~PAGE_MASK;
2862
 
2863		if (!iov_iter_count(to))
2864			break;
2865		if (ret < nr) {
2866			error = -EFAULT;
2867			break;
2868		}
2869		cond_resched();
2870	}
2871
2872	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2873	file_accessed(file);
2874	return retval ? retval : error;
2875}
2876
2877static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
 
 
 
2878{
2879	struct file *file = iocb->ki_filp;
2880	struct inode *inode = file->f_mapping->host;
2881	ssize_t ret;
2882
2883	inode_lock(inode);
2884	ret = generic_write_checks(iocb, from);
2885	if (ret <= 0)
2886		goto unlock;
2887	ret = file_remove_privs(file);
2888	if (ret)
2889		goto unlock;
2890	ret = file_update_time(file);
2891	if (ret)
2892		goto unlock;
2893	ret = generic_perform_write(iocb, from);
2894unlock:
2895	inode_unlock(inode);
2896	return ret;
2897}
2898
2899static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
2900			      struct pipe_buffer *buf)
2901{
2902	return true;
2903}
2904
2905static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
2906				  struct pipe_buffer *buf)
2907{
2908}
2909
2910static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
2911				    struct pipe_buffer *buf)
2912{
2913	return false;
2914}
2915
2916static const struct pipe_buf_operations zero_pipe_buf_ops = {
2917	.release	= zero_pipe_buf_release,
2918	.try_steal	= zero_pipe_buf_try_steal,
2919	.get		= zero_pipe_buf_get,
2920};
2921
2922static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
2923					loff_t fpos, size_t size)
2924{
2925	size_t offset = fpos & ~PAGE_MASK;
2926
2927	size = min_t(size_t, size, PAGE_SIZE - offset);
2928
2929	if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2930		struct pipe_buffer *buf = pipe_head_buf(pipe);
2931
2932		*buf = (struct pipe_buffer) {
2933			.ops	= &zero_pipe_buf_ops,
2934			.page	= ZERO_PAGE(0),
2935			.offset	= offset,
2936			.len	= size,
2937		};
2938		pipe->head++;
2939	}
2940
2941	return size;
2942}
2943
2944static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
2945				      struct pipe_inode_info *pipe,
2946				      size_t len, unsigned int flags)
2947{
2948	struct inode *inode = file_inode(in);
2949	struct address_space *mapping = inode->i_mapping;
2950	struct folio *folio = NULL;
2951	size_t total_spliced = 0, used, npages, n, part;
2952	loff_t isize;
2953	int error = 0;
2954
2955	/* Work out how much data we can actually add into the pipe */
2956	used = pipe_occupancy(pipe->head, pipe->tail);
2957	npages = max_t(ssize_t, pipe->max_usage - used, 0);
2958	len = min_t(size_t, len, npages * PAGE_SIZE);
2959
2960	do {
2961		if (*ppos >= i_size_read(inode))
2962			break;
2963
2964		error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
2965					SGP_READ);
2966		if (error) {
2967			if (error == -EINVAL)
2968				error = 0;
2969			break;
2970		}
2971		if (folio) {
2972			folio_unlock(folio);
2973
2974			if (folio_test_hwpoison(folio) ||
2975			    (folio_test_large(folio) &&
2976			     folio_test_has_hwpoisoned(folio))) {
2977				error = -EIO;
 
 
 
 
 
 
 
 
 
 
2978				break;
2979			}
2980		}
2981
2982		/*
2983		 * i_size must be checked after we know the pages are Uptodate.
2984		 *
2985		 * Checking i_size after the check allows us to calculate
2986		 * the correct value for "nr", which means the zero-filled
2987		 * part of the page is not copied back to userspace (unless
2988		 * another truncate extends the file - this is desired though).
2989		 */
2990		isize = i_size_read(inode);
2991		if (unlikely(*ppos >= isize))
2992			break;
2993		part = min_t(loff_t, isize - *ppos, len);
2994
2995		if (folio) {
2996			/*
2997			 * If users can be writing to this page using arbitrary
2998			 * virtual addresses, take care about potential aliasing
2999			 * before reading the page on the kernel side.
3000			 */
3001			if (mapping_writably_mapped(mapping))
3002				flush_dcache_folio(folio);
3003			folio_mark_accessed(folio);
3004			/*
3005			 * Ok, we have the page, and it's up-to-date, so we can
3006			 * now splice it into the pipe.
3007			 */
3008			n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3009			folio_put(folio);
3010			folio = NULL;
3011		} else {
3012			n = splice_zeropage_into_pipe(pipe, *ppos, part);
3013		}
3014
3015		if (!n)
3016			break;
3017		len -= n;
3018		total_spliced += n;
3019		*ppos += n;
3020		in->f_ra.prev_pos = *ppos;
3021		if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
3022			break;
3023
3024		cond_resched();
3025	} while (len);
3026
3027	if (folio)
3028		folio_put(folio);
3029
3030	file_accessed(in);
3031	return total_spliced ? total_spliced : error;
3032}
3033
3034static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3035{
3036	struct address_space *mapping = file->f_mapping;
3037	struct inode *inode = mapping->host;
 
 
3038
3039	if (whence != SEEK_DATA && whence != SEEK_HOLE)
3040		return generic_file_llseek_size(file, offset, whence,
3041					MAX_LFS_FILESIZE, i_size_read(inode));
3042	if (offset < 0)
3043		return -ENXIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3044
3045	inode_lock(inode);
3046	/* We're holding i_rwsem so we can access i_size directly */
3047	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3048	if (offset >= 0)
3049		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
3050	inode_unlock(inode);
3051	return offset;
3052}
3053
3054static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3055							 loff_t len)
3056{
3057	struct inode *inode = file_inode(file);
3058	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3059	struct shmem_inode_info *info = SHMEM_I(inode);
3060	struct shmem_falloc shmem_falloc;
3061	pgoff_t start, index, end, undo_fallocend;
3062	int error;
3063
3064	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3065		return -EOPNOTSUPP;
3066
3067	inode_lock(inode);
3068
3069	if (mode & FALLOC_FL_PUNCH_HOLE) {
3070		struct address_space *mapping = file->f_mapping;
3071		loff_t unmap_start = round_up(offset, PAGE_SIZE);
3072		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3073		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3074
3075		/* protected by i_rwsem */
3076		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3077			error = -EPERM;
3078			goto out;
3079		}
3080
3081		shmem_falloc.waitq = &shmem_falloc_waitq;
3082		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3083		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3084		spin_lock(&inode->i_lock);
3085		inode->i_private = &shmem_falloc;
3086		spin_unlock(&inode->i_lock);
3087
3088		if ((u64)unmap_end > (u64)unmap_start)
3089			unmap_mapping_range(mapping, unmap_start,
3090					    1 + unmap_end - unmap_start, 0);
3091		shmem_truncate_range(inode, offset, offset + len - 1);
3092		/* No need to unmap again: hole-punching leaves COWed pages */
3093
3094		spin_lock(&inode->i_lock);
3095		inode->i_private = NULL;
3096		wake_up_all(&shmem_falloc_waitq);
3097		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3098		spin_unlock(&inode->i_lock);
3099		error = 0;
3100		goto out;
3101	}
3102
3103	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3104	error = inode_newsize_ok(inode, offset + len);
3105	if (error)
3106		goto out;
3107
3108	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3109		error = -EPERM;
3110		goto out;
3111	}
3112
3113	start = offset >> PAGE_SHIFT;
3114	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3115	/* Try to avoid a swapstorm if len is impossible to satisfy */
3116	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3117		error = -ENOSPC;
3118		goto out;
3119	}
3120
3121	shmem_falloc.waitq = NULL;
3122	shmem_falloc.start = start;
3123	shmem_falloc.next  = start;
3124	shmem_falloc.nr_falloced = 0;
3125	shmem_falloc.nr_unswapped = 0;
3126	spin_lock(&inode->i_lock);
3127	inode->i_private = &shmem_falloc;
3128	spin_unlock(&inode->i_lock);
3129
3130	/*
3131	 * info->fallocend is only relevant when huge pages might be
3132	 * involved: to prevent split_huge_page() freeing fallocated
3133	 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3134	 */
3135	undo_fallocend = info->fallocend;
3136	if (info->fallocend < end)
3137		info->fallocend = end;
3138
3139	for (index = start; index < end; ) {
3140		struct folio *folio;
3141
3142		/*
3143		 * Good, the fallocate(2) manpage permits EINTR: we may have
3144		 * been interrupted because we are using up too much memory.
3145		 */
3146		if (signal_pending(current))
3147			error = -EINTR;
3148		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3149			error = -ENOMEM;
3150		else
3151			error = shmem_get_folio(inode, index, &folio,
3152						SGP_FALLOC);
3153		if (error) {
3154			info->fallocend = undo_fallocend;
3155			/* Remove the !uptodate folios we added */
3156			if (index > start) {
3157				shmem_undo_range(inode,
3158				    (loff_t)start << PAGE_SHIFT,
3159				    ((loff_t)index << PAGE_SHIFT) - 1, true);
3160			}
3161			goto undone;
3162		}
3163
3164		/*
3165		 * Here is a more important optimization than it appears:
3166		 * a second SGP_FALLOC on the same large folio will clear it,
3167		 * making it uptodate and un-undoable if we fail later.
3168		 */
3169		index = folio_next_index(folio);
3170		/* Beware 32-bit wraparound */
3171		if (!index)
3172			index--;
3173
3174		/*
3175		 * Inform shmem_writepage() how far we have reached.
3176		 * No need for lock or barrier: we have the page lock.
3177		 */
3178		if (!folio_test_uptodate(folio))
3179			shmem_falloc.nr_falloced += index - shmem_falloc.next;
3180		shmem_falloc.next = index;
3181
3182		/*
3183		 * If !uptodate, leave it that way so that freeable folios
3184		 * can be recognized if we need to rollback on error later.
3185		 * But mark it dirty so that memory pressure will swap rather
3186		 * than free the folios we are allocating (and SGP_CACHE folios
3187		 * might still be clean: we now need to mark those dirty too).
3188		 */
3189		folio_mark_dirty(folio);
3190		folio_unlock(folio);
3191		folio_put(folio);
3192		cond_resched();
3193	}
3194
3195	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3196		i_size_write(inode, offset + len);
 
3197undone:
3198	spin_lock(&inode->i_lock);
3199	inode->i_private = NULL;
3200	spin_unlock(&inode->i_lock);
3201out:
3202	if (!error)
3203		file_modified(file);
3204	inode_unlock(inode);
3205	return error;
3206}
3207
3208static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3209{
3210	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3211
3212	buf->f_type = TMPFS_MAGIC;
3213	buf->f_bsize = PAGE_SIZE;
3214	buf->f_namelen = NAME_MAX;
3215	if (sbinfo->max_blocks) {
3216		buf->f_blocks = sbinfo->max_blocks;
3217		buf->f_bavail =
3218		buf->f_bfree  = sbinfo->max_blocks -
3219				percpu_counter_sum(&sbinfo->used_blocks);
3220	}
3221	if (sbinfo->max_inodes) {
3222		buf->f_files = sbinfo->max_inodes;
3223		buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3224	}
3225	/* else leave those fields 0 like simple_statfs */
3226
3227	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3228
3229	return 0;
3230}
3231
3232/*
3233 * File creation. Allocate an inode, and we're done..
3234 */
3235static int
3236shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3237	    struct dentry *dentry, umode_t mode, dev_t dev)
3238{
3239	struct inode *inode;
3240	int error;
3241
3242	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3243	if (IS_ERR(inode))
3244		return PTR_ERR(inode);
 
 
 
 
 
 
 
3245
3246	error = simple_acl_create(dir, inode);
3247	if (error)
3248		goto out_iput;
3249	error = security_inode_init_security(inode, dir, &dentry->d_name,
3250					     shmem_initxattrs, NULL);
3251	if (error && error != -EOPNOTSUPP)
3252		goto out_iput;
3253
3254	error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3255	if (error)
3256		goto out_iput;
3257
3258	dir->i_size += BOGO_DIRENT_SIZE;
3259	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3260	inode_inc_iversion(dir);
3261	d_instantiate(dentry, inode);
3262	dget(dentry); /* Extra count - pin the dentry in core */
3263	return error;
3264
3265out_iput:
3266	iput(inode);
3267	return error;
3268}
3269
3270static int
3271shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3272	      struct file *file, umode_t mode)
3273{
3274	struct inode *inode;
3275	int error;
3276
3277	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3278	if (IS_ERR(inode)) {
3279		error = PTR_ERR(inode);
3280		goto err_out;
 
 
 
 
 
 
 
3281	}
3282	error = security_inode_init_security(inode, dir, NULL,
3283					     shmem_initxattrs, NULL);
3284	if (error && error != -EOPNOTSUPP)
3285		goto out_iput;
3286	error = simple_acl_create(dir, inode);
3287	if (error)
3288		goto out_iput;
3289	d_tmpfile(file, inode);
3290
3291err_out:
3292	return finish_open_simple(file, error);
3293out_iput:
3294	iput(inode);
3295	return error;
3296}
3297
3298static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3299		       struct dentry *dentry, umode_t mode)
3300{
3301	int error;
3302
3303	error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3304	if (error)
3305		return error;
3306	inc_nlink(dir);
3307	return 0;
3308}
3309
3310static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3311			struct dentry *dentry, umode_t mode, bool excl)
3312{
3313	return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3314}
3315
3316/*
3317 * Link a file..
3318 */
3319static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3320		      struct dentry *dentry)
3321{
3322	struct inode *inode = d_inode(old_dentry);
3323	int ret = 0;
3324
3325	/*
3326	 * No ordinary (disk based) filesystem counts links as inodes;
3327	 * but each new link needs a new dentry, pinning lowmem, and
3328	 * tmpfs dentries cannot be pruned until they are unlinked.
3329	 * But if an O_TMPFILE file is linked into the tmpfs, the
3330	 * first link must skip that, to get the accounting right.
3331	 */
3332	if (inode->i_nlink) {
3333		ret = shmem_reserve_inode(inode->i_sb, NULL);
3334		if (ret)
3335			goto out;
3336	}
3337
3338	ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3339	if (ret) {
3340		if (inode->i_nlink)
3341			shmem_free_inode(inode->i_sb, 0);
3342		goto out;
3343	}
3344
3345	dir->i_size += BOGO_DIRENT_SIZE;
3346	inode_set_mtime_to_ts(dir,
3347			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3348	inode_inc_iversion(dir);
3349	inc_nlink(inode);
3350	ihold(inode);	/* New dentry reference */
3351	dget(dentry);	/* Extra pinning count for the created dentry */
3352	d_instantiate(dentry, inode);
3353out:
3354	return ret;
3355}
3356
3357static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3358{
3359	struct inode *inode = d_inode(dentry);
3360
3361	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3362		shmem_free_inode(inode->i_sb, 0);
3363
3364	simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3365
3366	dir->i_size -= BOGO_DIRENT_SIZE;
3367	inode_set_mtime_to_ts(dir,
3368			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3369	inode_inc_iversion(dir);
3370	drop_nlink(inode);
3371	dput(dentry);	/* Undo the count from "create" - does all the work */
3372	return 0;
3373}
3374
3375static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3376{
3377	if (!simple_empty(dentry))
3378		return -ENOTEMPTY;
3379
3380	drop_nlink(d_inode(dentry));
3381	drop_nlink(dir);
3382	return shmem_unlink(dir, dentry);
3383}
3384
3385static int shmem_whiteout(struct mnt_idmap *idmap,
3386			  struct inode *old_dir, struct dentry *old_dentry)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3387{
3388	struct dentry *whiteout;
3389	int error;
3390
3391	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3392	if (!whiteout)
3393		return -ENOMEM;
3394
3395	error = shmem_mknod(idmap, old_dir, whiteout,
3396			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3397	dput(whiteout);
3398	if (error)
3399		return error;
3400
3401	/*
3402	 * Cheat and hash the whiteout while the old dentry is still in
3403	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3404	 *
3405	 * d_lookup() will consistently find one of them at this point,
3406	 * not sure which one, but that isn't even important.
3407	 */
3408	d_rehash(whiteout);
3409	return 0;
3410}
3411
3412/*
3413 * The VFS layer already does all the dentry stuff for rename,
3414 * we just have to decrement the usage count for the target if
3415 * it exists so that the VFS layer correctly free's it when it
3416 * gets overwritten.
3417 */
3418static int shmem_rename2(struct mnt_idmap *idmap,
3419			 struct inode *old_dir, struct dentry *old_dentry,
3420			 struct inode *new_dir, struct dentry *new_dentry,
3421			 unsigned int flags)
3422{
3423	struct inode *inode = d_inode(old_dentry);
3424	int they_are_dirs = S_ISDIR(inode->i_mode);
3425	int error;
3426
3427	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3428		return -EINVAL;
3429
3430	if (flags & RENAME_EXCHANGE)
3431		return simple_offset_rename_exchange(old_dir, old_dentry,
3432						     new_dir, new_dentry);
3433
3434	if (!simple_empty(new_dentry))
3435		return -ENOTEMPTY;
3436
3437	if (flags & RENAME_WHITEOUT) {
3438		error = shmem_whiteout(idmap, old_dir, old_dentry);
 
 
3439		if (error)
3440			return error;
3441	}
3442
3443	simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry);
3444	error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry);
3445	if (error)
3446		return error;
3447
3448	if (d_really_is_positive(new_dentry)) {
3449		(void) shmem_unlink(new_dir, new_dentry);
3450		if (they_are_dirs) {
3451			drop_nlink(d_inode(new_dentry));
3452			drop_nlink(old_dir);
3453		}
3454	} else if (they_are_dirs) {
3455		drop_nlink(old_dir);
3456		inc_nlink(new_dir);
3457	}
3458
3459	old_dir->i_size -= BOGO_DIRENT_SIZE;
3460	new_dir->i_size += BOGO_DIRENT_SIZE;
3461	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
3462	inode_inc_iversion(old_dir);
3463	inode_inc_iversion(new_dir);
3464	return 0;
3465}
3466
3467static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
3468			 struct dentry *dentry, const char *symname)
3469{
3470	int error;
3471	int len;
3472	struct inode *inode;
3473	struct folio *folio;
3474
3475	len = strlen(symname) + 1;
3476	if (len > PAGE_SIZE)
3477		return -ENAMETOOLONG;
3478
3479	inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
3480				VM_NORESERVE);
3481	if (IS_ERR(inode))
3482		return PTR_ERR(inode);
3483
3484	error = security_inode_init_security(inode, dir, &dentry->d_name,
3485					     shmem_initxattrs, NULL);
3486	if (error && error != -EOPNOTSUPP)
3487		goto out_iput;
3488
3489	error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3490	if (error)
3491		goto out_iput;
 
3492
3493	inode->i_size = len-1;
3494	if (len <= SHORT_SYMLINK_LEN) {
3495		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3496		if (!inode->i_link) {
3497			error = -ENOMEM;
3498			goto out_remove_offset;
3499		}
3500		inode->i_op = &shmem_short_symlink_operations;
3501	} else {
3502		inode_nohighmem(inode);
3503		error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
3504		if (error)
3505			goto out_remove_offset;
 
 
3506		inode->i_mapping->a_ops = &shmem_aops;
3507		inode->i_op = &shmem_symlink_inode_operations;
3508		memcpy(folio_address(folio), symname, len);
3509		folio_mark_uptodate(folio);
3510		folio_mark_dirty(folio);
3511		folio_unlock(folio);
3512		folio_put(folio);
3513	}
3514	dir->i_size += BOGO_DIRENT_SIZE;
3515	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3516	inode_inc_iversion(dir);
3517	d_instantiate(dentry, inode);
3518	dget(dentry);
3519	return 0;
3520
3521out_remove_offset:
3522	simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3523out_iput:
3524	iput(inode);
3525	return error;
3526}
3527
3528static void shmem_put_link(void *arg)
3529{
3530	folio_mark_accessed(arg);
3531	folio_put(arg);
3532}
3533
3534static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
 
3535				  struct delayed_call *done)
3536{
3537	struct folio *folio = NULL;
3538	int error;
3539
3540	if (!dentry) {
3541		folio = filemap_get_folio(inode->i_mapping, 0);
3542		if (IS_ERR(folio))
3543			return ERR_PTR(-ECHILD);
3544		if (PageHWPoison(folio_page(folio, 0)) ||
3545		    !folio_test_uptodate(folio)) {
3546			folio_put(folio);
3547			return ERR_PTR(-ECHILD);
3548		}
3549	} else {
3550		error = shmem_get_folio(inode, 0, &folio, SGP_READ);
3551		if (error)
3552			return ERR_PTR(error);
3553		if (!folio)
3554			return ERR_PTR(-ECHILD);
3555		if (PageHWPoison(folio_page(folio, 0))) {
3556			folio_unlock(folio);
3557			folio_put(folio);
3558			return ERR_PTR(-ECHILD);
3559		}
3560		folio_unlock(folio);
3561	}
3562	set_delayed_call(done, shmem_put_link, folio);
3563	return folio_address(folio);
3564}
3565
3566#ifdef CONFIG_TMPFS_XATTR
3567
3568static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3569{
3570	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3571
3572	fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3573
3574	return 0;
3575}
3576
3577static int shmem_fileattr_set(struct mnt_idmap *idmap,
3578			      struct dentry *dentry, struct fileattr *fa)
3579{
3580	struct inode *inode = d_inode(dentry);
3581	struct shmem_inode_info *info = SHMEM_I(inode);
3582
3583	if (fileattr_has_fsx(fa))
3584		return -EOPNOTSUPP;
3585	if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3586		return -EOPNOTSUPP;
3587
3588	info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3589		(fa->flags & SHMEM_FL_USER_MODIFIABLE);
3590
3591	shmem_set_inode_flags(inode, info->fsflags);
3592	inode_set_ctime_current(inode);
3593	inode_inc_iversion(inode);
3594	return 0;
3595}
3596
3597/*
3598 * Superblocks without xattr inode operations may get some security.* xattr
3599 * support from the LSM "for free". As soon as we have any other xattrs
3600 * like ACLs, we also need to implement the security.* handlers at
3601 * filesystem level, though.
3602 */
3603
3604/*
3605 * Callback for security_inode_init_security() for acquiring xattrs.
3606 */
3607static int shmem_initxattrs(struct inode *inode,
3608			    const struct xattr *xattr_array, void *fs_info)
 
3609{
3610	struct shmem_inode_info *info = SHMEM_I(inode);
3611	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3612	const struct xattr *xattr;
3613	struct simple_xattr *new_xattr;
3614	size_t ispace = 0;
3615	size_t len;
3616
3617	if (sbinfo->max_inodes) {
3618		for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3619			ispace += simple_xattr_space(xattr->name,
3620				xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
3621		}
3622		if (ispace) {
3623			raw_spin_lock(&sbinfo->stat_lock);
3624			if (sbinfo->free_ispace < ispace)
3625				ispace = 0;
3626			else
3627				sbinfo->free_ispace -= ispace;
3628			raw_spin_unlock(&sbinfo->stat_lock);
3629			if (!ispace)
3630				return -ENOSPC;
3631		}
3632	}
3633
3634	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3635		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3636		if (!new_xattr)
3637			break;
3638
3639		len = strlen(xattr->name) + 1;
3640		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3641					  GFP_KERNEL_ACCOUNT);
3642		if (!new_xattr->name) {
3643			kvfree(new_xattr);
3644			break;
3645		}
3646
3647		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3648		       XATTR_SECURITY_PREFIX_LEN);
3649		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3650		       xattr->name, len);
3651
3652		simple_xattr_add(&info->xattrs, new_xattr);
3653	}
3654
3655	if (xattr->name != NULL) {
3656		if (ispace) {
3657			raw_spin_lock(&sbinfo->stat_lock);
3658			sbinfo->free_ispace += ispace;
3659			raw_spin_unlock(&sbinfo->stat_lock);
3660		}
3661		simple_xattrs_free(&info->xattrs, NULL);
3662		return -ENOMEM;
3663	}
3664
3665	return 0;
3666}
3667
3668static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3669				   struct dentry *unused, struct inode *inode,
3670				   const char *name, void *buffer, size_t size)
3671{
3672	struct shmem_inode_info *info = SHMEM_I(inode);
3673
3674	name = xattr_full_name(handler, name);
3675	return simple_xattr_get(&info->xattrs, name, buffer, size);
3676}
3677
3678static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3679				   struct mnt_idmap *idmap,
3680				   struct dentry *unused, struct inode *inode,
3681				   const char *name, const void *value,
3682				   size_t size, int flags)
3683{
3684	struct shmem_inode_info *info = SHMEM_I(inode);
3685	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3686	struct simple_xattr *old_xattr;
3687	size_t ispace = 0;
3688
3689	name = xattr_full_name(handler, name);
3690	if (value && sbinfo->max_inodes) {
3691		ispace = simple_xattr_space(name, size);
3692		raw_spin_lock(&sbinfo->stat_lock);
3693		if (sbinfo->free_ispace < ispace)
3694			ispace = 0;
3695		else
3696			sbinfo->free_ispace -= ispace;
3697		raw_spin_unlock(&sbinfo->stat_lock);
3698		if (!ispace)
3699			return -ENOSPC;
3700	}
3701
3702	old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
3703	if (!IS_ERR(old_xattr)) {
3704		ispace = 0;
3705		if (old_xattr && sbinfo->max_inodes)
3706			ispace = simple_xattr_space(old_xattr->name,
3707						    old_xattr->size);
3708		simple_xattr_free(old_xattr);
3709		old_xattr = NULL;
3710		inode_set_ctime_current(inode);
3711		inode_inc_iversion(inode);
3712	}
3713	if (ispace) {
3714		raw_spin_lock(&sbinfo->stat_lock);
3715		sbinfo->free_ispace += ispace;
3716		raw_spin_unlock(&sbinfo->stat_lock);
3717	}
3718	return PTR_ERR(old_xattr);
3719}
3720
3721static const struct xattr_handler shmem_security_xattr_handler = {
3722	.prefix = XATTR_SECURITY_PREFIX,
3723	.get = shmem_xattr_handler_get,
3724	.set = shmem_xattr_handler_set,
3725};
3726
3727static const struct xattr_handler shmem_trusted_xattr_handler = {
3728	.prefix = XATTR_TRUSTED_PREFIX,
3729	.get = shmem_xattr_handler_get,
3730	.set = shmem_xattr_handler_set,
3731};
3732
3733static const struct xattr_handler shmem_user_xattr_handler = {
3734	.prefix = XATTR_USER_PREFIX,
3735	.get = shmem_xattr_handler_get,
3736	.set = shmem_xattr_handler_set,
3737};
3738
3739static const struct xattr_handler * const shmem_xattr_handlers[] = {
3740	&shmem_security_xattr_handler,
3741	&shmem_trusted_xattr_handler,
3742	&shmem_user_xattr_handler,
3743	NULL
3744};
3745
3746static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3747{
3748	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3749	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3750}
3751#endif /* CONFIG_TMPFS_XATTR */
3752
3753static const struct inode_operations shmem_short_symlink_operations = {
3754	.getattr	= shmem_getattr,
3755	.setattr	= shmem_setattr,
3756	.get_link	= simple_get_link,
3757#ifdef CONFIG_TMPFS_XATTR
3758	.listxattr	= shmem_listxattr,
3759#endif
3760};
3761
3762static const struct inode_operations shmem_symlink_inode_operations = {
3763	.getattr	= shmem_getattr,
3764	.setattr	= shmem_setattr,
3765	.get_link	= shmem_get_link,
3766#ifdef CONFIG_TMPFS_XATTR
3767	.listxattr	= shmem_listxattr,
3768#endif
3769};
3770
3771static struct dentry *shmem_get_parent(struct dentry *child)
3772{
3773	return ERR_PTR(-ESTALE);
3774}
3775
3776static int shmem_match(struct inode *ino, void *vfh)
3777{
3778	__u32 *fh = vfh;
3779	__u64 inum = fh[2];
3780	inum = (inum << 32) | fh[1];
3781	return ino->i_ino == inum && fh[0] == ino->i_generation;
3782}
3783
3784/* Find any alias of inode, but prefer a hashed alias */
3785static struct dentry *shmem_find_alias(struct inode *inode)
3786{
3787	struct dentry *alias = d_find_alias(inode);
3788
3789	return alias ?: d_find_any_alias(inode);
3790}
3791
 
3792static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3793		struct fid *fid, int fh_len, int fh_type)
3794{
3795	struct inode *inode;
3796	struct dentry *dentry = NULL;
3797	u64 inum;
3798
3799	if (fh_len < 3)
3800		return NULL;
3801
3802	inum = fid->raw[2];
3803	inum = (inum << 32) | fid->raw[1];
3804
3805	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3806			shmem_match, fid->raw);
3807	if (inode) {
3808		dentry = shmem_find_alias(inode);
3809		iput(inode);
3810	}
3811
3812	return dentry;
3813}
3814
3815static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3816				struct inode *parent)
3817{
3818	if (*len < 3) {
3819		*len = 3;
3820		return FILEID_INVALID;
3821	}
3822
3823	if (inode_unhashed(inode)) {
3824		/* Unfortunately insert_inode_hash is not idempotent,
3825		 * so as we hash inodes here rather than at creation
3826		 * time, we need a lock to ensure we only try
3827		 * to do it once
3828		 */
3829		static DEFINE_SPINLOCK(lock);
3830		spin_lock(&lock);
3831		if (inode_unhashed(inode))
3832			__insert_inode_hash(inode,
3833					    inode->i_ino + inode->i_generation);
3834		spin_unlock(&lock);
3835	}
3836
3837	fh[0] = inode->i_generation;
3838	fh[1] = inode->i_ino;
3839	fh[2] = ((__u64)inode->i_ino) >> 32;
3840
3841	*len = 3;
3842	return 1;
3843}
3844
3845static const struct export_operations shmem_export_ops = {
3846	.get_parent     = shmem_get_parent,
3847	.encode_fh      = shmem_encode_fh,
3848	.fh_to_dentry	= shmem_fh_to_dentry,
3849};
3850
3851enum shmem_param {
3852	Opt_gid,
3853	Opt_huge,
3854	Opt_mode,
3855	Opt_mpol,
3856	Opt_nr_blocks,
3857	Opt_nr_inodes,
3858	Opt_size,
3859	Opt_uid,
3860	Opt_inode32,
3861	Opt_inode64,
3862	Opt_noswap,
3863	Opt_quota,
3864	Opt_usrquota,
3865	Opt_grpquota,
3866	Opt_usrquota_block_hardlimit,
3867	Opt_usrquota_inode_hardlimit,
3868	Opt_grpquota_block_hardlimit,
3869	Opt_grpquota_inode_hardlimit,
3870};
3871
3872static const struct constant_table shmem_param_enums_huge[] = {
3873	{"never",	SHMEM_HUGE_NEVER },
3874	{"always",	SHMEM_HUGE_ALWAYS },
3875	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
3876	{"advise",	SHMEM_HUGE_ADVISE },
3877	{}
3878};
3879
3880const struct fs_parameter_spec shmem_fs_parameters[] = {
3881	fsparam_u32   ("gid",		Opt_gid),
3882	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
3883	fsparam_u32oct("mode",		Opt_mode),
3884	fsparam_string("mpol",		Opt_mpol),
3885	fsparam_string("nr_blocks",	Opt_nr_blocks),
3886	fsparam_string("nr_inodes",	Opt_nr_inodes),
3887	fsparam_string("size",		Opt_size),
3888	fsparam_u32   ("uid",		Opt_uid),
3889	fsparam_flag  ("inode32",	Opt_inode32),
3890	fsparam_flag  ("inode64",	Opt_inode64),
3891	fsparam_flag  ("noswap",	Opt_noswap),
3892#ifdef CONFIG_TMPFS_QUOTA
3893	fsparam_flag  ("quota",		Opt_quota),
3894	fsparam_flag  ("usrquota",	Opt_usrquota),
3895	fsparam_flag  ("grpquota",	Opt_grpquota),
3896	fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
3897	fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
3898	fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
3899	fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
3900#endif
3901	{}
3902};
3903
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3904static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3905{
3906	struct shmem_options *ctx = fc->fs_private;
3907	struct fs_parse_result result;
3908	unsigned long long size;
3909	char *rest;
3910	int opt;
3911	kuid_t kuid;
3912	kgid_t kgid;
3913
3914	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3915	if (opt < 0)
3916		return opt;
3917
3918	switch (opt) {
3919	case Opt_size:
3920		size = memparse(param->string, &rest);
3921		if (*rest == '%') {
3922			size <<= PAGE_SHIFT;
3923			size *= totalram_pages();
3924			do_div(size, 100);
3925			rest++;
3926		}
3927		if (*rest)
3928			goto bad_value;
3929		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3930		ctx->seen |= SHMEM_SEEN_BLOCKS;
3931		break;
3932	case Opt_nr_blocks:
3933		ctx->blocks = memparse(param->string, &rest);
3934		if (*rest || ctx->blocks > LONG_MAX)
3935			goto bad_value;
3936		ctx->seen |= SHMEM_SEEN_BLOCKS;
3937		break;
3938	case Opt_nr_inodes:
3939		ctx->inodes = memparse(param->string, &rest);
3940		if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
3941			goto bad_value;
3942		ctx->seen |= SHMEM_SEEN_INODES;
3943		break;
3944	case Opt_mode:
3945		ctx->mode = result.uint_32 & 07777;
3946		break;
3947	case Opt_uid:
3948		kuid = make_kuid(current_user_ns(), result.uint_32);
3949		if (!uid_valid(kuid))
3950			goto bad_value;
3951
3952		/*
3953		 * The requested uid must be representable in the
3954		 * filesystem's idmapping.
3955		 */
3956		if (!kuid_has_mapping(fc->user_ns, kuid))
3957			goto bad_value;
3958
3959		ctx->uid = kuid;
3960		break;
3961	case Opt_gid:
3962		kgid = make_kgid(current_user_ns(), result.uint_32);
3963		if (!gid_valid(kgid))
3964			goto bad_value;
3965
3966		/*
3967		 * The requested gid must be representable in the
3968		 * filesystem's idmapping.
3969		 */
3970		if (!kgid_has_mapping(fc->user_ns, kgid))
3971			goto bad_value;
3972
3973		ctx->gid = kgid;
3974		break;
3975	case Opt_huge:
3976		ctx->huge = result.uint_32;
3977		if (ctx->huge != SHMEM_HUGE_NEVER &&
3978		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3979		      has_transparent_hugepage()))
3980			goto unsupported_parameter;
3981		ctx->seen |= SHMEM_SEEN_HUGE;
3982		break;
3983	case Opt_mpol:
3984		if (IS_ENABLED(CONFIG_NUMA)) {
3985			mpol_put(ctx->mpol);
3986			ctx->mpol = NULL;
3987			if (mpol_parse_str(param->string, &ctx->mpol))
3988				goto bad_value;
3989			break;
3990		}
3991		goto unsupported_parameter;
3992	case Opt_inode32:
3993		ctx->full_inums = false;
3994		ctx->seen |= SHMEM_SEEN_INUMS;
3995		break;
3996	case Opt_inode64:
3997		if (sizeof(ino_t) < 8) {
3998			return invalfc(fc,
3999				       "Cannot use inode64 with <64bit inums in kernel\n");
4000		}
4001		ctx->full_inums = true;
4002		ctx->seen |= SHMEM_SEEN_INUMS;
4003		break;
4004	case Opt_noswap:
4005		if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
4006			return invalfc(fc,
4007				       "Turning off swap in unprivileged tmpfs mounts unsupported");
4008		}
4009		ctx->noswap = true;
4010		ctx->seen |= SHMEM_SEEN_NOSWAP;
4011		break;
4012	case Opt_quota:
4013		if (fc->user_ns != &init_user_ns)
4014			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4015		ctx->seen |= SHMEM_SEEN_QUOTA;
4016		ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4017		break;
4018	case Opt_usrquota:
4019		if (fc->user_ns != &init_user_ns)
4020			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4021		ctx->seen |= SHMEM_SEEN_QUOTA;
4022		ctx->quota_types |= QTYPE_MASK_USR;
4023		break;
4024	case Opt_grpquota:
4025		if (fc->user_ns != &init_user_ns)
4026			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4027		ctx->seen |= SHMEM_SEEN_QUOTA;
4028		ctx->quota_types |= QTYPE_MASK_GRP;
4029		break;
4030	case Opt_usrquota_block_hardlimit:
4031		size = memparse(param->string, &rest);
4032		if (*rest || !size)
4033			goto bad_value;
4034		if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4035			return invalfc(fc,
4036				       "User quota block hardlimit too large.");
4037		ctx->qlimits.usrquota_bhardlimit = size;
4038		break;
4039	case Opt_grpquota_block_hardlimit:
4040		size = memparse(param->string, &rest);
4041		if (*rest || !size)
4042			goto bad_value;
4043		if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4044			return invalfc(fc,
4045				       "Group quota block hardlimit too large.");
4046		ctx->qlimits.grpquota_bhardlimit = size;
4047		break;
4048	case Opt_usrquota_inode_hardlimit:
4049		size = memparse(param->string, &rest);
4050		if (*rest || !size)
4051			goto bad_value;
4052		if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4053			return invalfc(fc,
4054				       "User quota inode hardlimit too large.");
4055		ctx->qlimits.usrquota_ihardlimit = size;
4056		break;
4057	case Opt_grpquota_inode_hardlimit:
4058		size = memparse(param->string, &rest);
4059		if (*rest || !size)
4060			goto bad_value;
4061		if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4062			return invalfc(fc,
4063				       "Group quota inode hardlimit too large.");
4064		ctx->qlimits.grpquota_ihardlimit = size;
4065		break;
4066	}
4067	return 0;
4068
4069unsupported_parameter:
4070	return invalfc(fc, "Unsupported parameter '%s'", param->key);
4071bad_value:
4072	return invalfc(fc, "Bad value for '%s'", param->key);
4073}
4074
4075static int shmem_parse_options(struct fs_context *fc, void *data)
4076{
4077	char *options = data;
4078
4079	if (options) {
4080		int err = security_sb_eat_lsm_opts(options, &fc->security);
4081		if (err)
4082			return err;
4083	}
4084
4085	while (options != NULL) {
4086		char *this_char = options;
4087		for (;;) {
4088			/*
4089			 * NUL-terminate this option: unfortunately,
4090			 * mount options form a comma-separated list,
4091			 * but mpol's nodelist may also contain commas.
4092			 */
4093			options = strchr(options, ',');
4094			if (options == NULL)
4095				break;
4096			options++;
4097			if (!isdigit(*options)) {
4098				options[-1] = '\0';
4099				break;
4100			}
4101		}
4102		if (*this_char) {
4103			char *value = strchr(this_char, '=');
4104			size_t len = 0;
4105			int err;
4106
4107			if (value) {
4108				*value++ = '\0';
4109				len = strlen(value);
4110			}
4111			err = vfs_parse_fs_string(fc, this_char, value, len);
4112			if (err < 0)
4113				return err;
4114		}
4115	}
4116	return 0;
4117}
4118
4119/*
4120 * Reconfigure a shmem filesystem.
 
 
 
 
4121 */
4122static int shmem_reconfigure(struct fs_context *fc)
4123{
4124	struct shmem_options *ctx = fc->fs_private;
4125	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4126	unsigned long used_isp;
4127	struct mempolicy *mpol = NULL;
4128	const char *err;
4129
4130	raw_spin_lock(&sbinfo->stat_lock);
4131	used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
4132
4133	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4134		if (!sbinfo->max_blocks) {
4135			err = "Cannot retroactively limit size";
4136			goto out;
4137		}
4138		if (percpu_counter_compare(&sbinfo->used_blocks,
4139					   ctx->blocks) > 0) {
4140			err = "Too small a size for current use";
4141			goto out;
4142		}
4143	}
4144	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4145		if (!sbinfo->max_inodes) {
4146			err = "Cannot retroactively limit inodes";
4147			goto out;
4148		}
4149		if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4150			err = "Too few inodes for current use";
4151			goto out;
4152		}
4153	}
4154
4155	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4156	    sbinfo->next_ino > UINT_MAX) {
4157		err = "Current inum too high to switch to 32-bit inums";
4158		goto out;
4159	}
4160	if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
4161		err = "Cannot disable swap on remount";
4162		goto out;
4163	}
4164	if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
4165		err = "Cannot enable swap on remount if it was disabled on first mount";
4166		goto out;
4167	}
4168
4169	if (ctx->seen & SHMEM_SEEN_QUOTA &&
4170	    !sb_any_quota_loaded(fc->root->d_sb)) {
4171		err = "Cannot enable quota on remount";
4172		goto out;
4173	}
4174
4175#ifdef CONFIG_TMPFS_QUOTA
4176#define CHANGED_LIMIT(name)						\
4177	(ctx->qlimits.name## hardlimit &&				\
4178	(ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4179
4180	if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4181	    CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4182		err = "Cannot change global quota limit on remount";
4183		goto out;
4184	}
4185#endif /* CONFIG_TMPFS_QUOTA */
4186
4187	if (ctx->seen & SHMEM_SEEN_HUGE)
4188		sbinfo->huge = ctx->huge;
4189	if (ctx->seen & SHMEM_SEEN_INUMS)
4190		sbinfo->full_inums = ctx->full_inums;
4191	if (ctx->seen & SHMEM_SEEN_BLOCKS)
4192		sbinfo->max_blocks  = ctx->blocks;
4193	if (ctx->seen & SHMEM_SEEN_INODES) {
4194		sbinfo->max_inodes  = ctx->inodes;
4195		sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
4196	}
4197
4198	/*
4199	 * Preserve previous mempolicy unless mpol remount option was specified.
4200	 */
4201	if (ctx->mpol) {
4202		mpol = sbinfo->mpol;
4203		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
4204		ctx->mpol = NULL;
4205	}
4206
4207	if (ctx->noswap)
4208		sbinfo->noswap = true;
4209
4210	raw_spin_unlock(&sbinfo->stat_lock);
4211	mpol_put(mpol);
4212	return 0;
4213out:
4214	raw_spin_unlock(&sbinfo->stat_lock);
4215	return invalfc(fc, "%s", err);
4216}
4217
4218static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4219{
4220	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4221	struct mempolicy *mpol;
4222
4223	if (sbinfo->max_blocks != shmem_default_max_blocks())
4224		seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
 
4225	if (sbinfo->max_inodes != shmem_default_max_inodes())
4226		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4227	if (sbinfo->mode != (0777 | S_ISVTX))
4228		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4229	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4230		seq_printf(seq, ",uid=%u",
4231				from_kuid_munged(&init_user_ns, sbinfo->uid));
4232	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4233		seq_printf(seq, ",gid=%u",
4234				from_kgid_munged(&init_user_ns, sbinfo->gid));
4235
4236	/*
4237	 * Showing inode{64,32} might be useful even if it's the system default,
4238	 * since then people don't have to resort to checking both here and
4239	 * /proc/config.gz to confirm 64-bit inums were successfully applied
4240	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4241	 *
4242	 * We hide it when inode64 isn't the default and we are using 32-bit
4243	 * inodes, since that probably just means the feature isn't even under
4244	 * consideration.
4245	 *
4246	 * As such:
4247	 *
4248	 *                     +-----------------+-----------------+
4249	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
4250	 *  +------------------+-----------------+-----------------+
4251	 *  | full_inums=true  | show            | show            |
4252	 *  | full_inums=false | show            | hide            |
4253	 *  +------------------+-----------------+-----------------+
4254	 *
4255	 */
4256	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4257		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4258#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4259	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4260	if (sbinfo->huge)
4261		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4262#endif
4263	mpol = shmem_get_sbmpol(sbinfo);
4264	shmem_show_mpol(seq, mpol);
4265	mpol_put(mpol);
4266	if (sbinfo->noswap)
4267		seq_printf(seq, ",noswap");
4268	return 0;
4269}
4270
4271#endif /* CONFIG_TMPFS */
4272
4273static void shmem_put_super(struct super_block *sb)
4274{
4275	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4276
4277#ifdef CONFIG_TMPFS_QUOTA
4278	shmem_disable_quotas(sb);
4279#endif
4280	free_percpu(sbinfo->ino_batch);
4281	percpu_counter_destroy(&sbinfo->used_blocks);
4282	mpol_put(sbinfo->mpol);
4283	kfree(sbinfo);
4284	sb->s_fs_info = NULL;
4285}
4286
4287static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
4288{
4289	struct shmem_options *ctx = fc->fs_private;
4290	struct inode *inode;
4291	struct shmem_sb_info *sbinfo;
4292	int error = -ENOMEM;
4293
4294	/* Round up to L1_CACHE_BYTES to resist false sharing */
4295	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
4296				L1_CACHE_BYTES), GFP_KERNEL);
4297	if (!sbinfo)
4298		return error;
4299
4300	sb->s_fs_info = sbinfo;
4301
4302#ifdef CONFIG_TMPFS
4303	/*
4304	 * Per default we only allow half of the physical ram per
4305	 * tmpfs instance, limiting inodes to one per page of lowmem;
4306	 * but the internal instance is left unlimited.
4307	 */
4308	if (!(sb->s_flags & SB_KERNMOUNT)) {
4309		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
4310			ctx->blocks = shmem_default_max_blocks();
4311		if (!(ctx->seen & SHMEM_SEEN_INODES))
4312			ctx->inodes = shmem_default_max_inodes();
4313		if (!(ctx->seen & SHMEM_SEEN_INUMS))
4314			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
4315		sbinfo->noswap = ctx->noswap;
4316	} else {
4317		sb->s_flags |= SB_NOUSER;
4318	}
4319	sb->s_export_op = &shmem_export_ops;
4320	sb->s_flags |= SB_NOSEC | SB_I_VERSION;
4321#else
4322	sb->s_flags |= SB_NOUSER;
4323#endif
4324	sbinfo->max_blocks = ctx->blocks;
4325	sbinfo->max_inodes = ctx->inodes;
4326	sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
4327	if (sb->s_flags & SB_KERNMOUNT) {
4328		sbinfo->ino_batch = alloc_percpu(ino_t);
4329		if (!sbinfo->ino_batch)
4330			goto failed;
4331	}
4332	sbinfo->uid = ctx->uid;
4333	sbinfo->gid = ctx->gid;
4334	sbinfo->full_inums = ctx->full_inums;
4335	sbinfo->mode = ctx->mode;
4336	sbinfo->huge = ctx->huge;
4337	sbinfo->mpol = ctx->mpol;
4338	ctx->mpol = NULL;
4339
4340	raw_spin_lock_init(&sbinfo->stat_lock);
4341	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
4342		goto failed;
4343	spin_lock_init(&sbinfo->shrinklist_lock);
4344	INIT_LIST_HEAD(&sbinfo->shrinklist);
4345
4346	sb->s_maxbytes = MAX_LFS_FILESIZE;
4347	sb->s_blocksize = PAGE_SIZE;
4348	sb->s_blocksize_bits = PAGE_SHIFT;
4349	sb->s_magic = TMPFS_MAGIC;
4350	sb->s_op = &shmem_ops;
4351	sb->s_time_gran = 1;
4352#ifdef CONFIG_TMPFS_XATTR
4353	sb->s_xattr = shmem_xattr_handlers;
4354#endif
4355#ifdef CONFIG_TMPFS_POSIX_ACL
4356	sb->s_flags |= SB_POSIXACL;
4357#endif
4358	uuid_gen(&sb->s_uuid);
4359
4360#ifdef CONFIG_TMPFS_QUOTA
4361	if (ctx->seen & SHMEM_SEEN_QUOTA) {
4362		sb->dq_op = &shmem_quota_operations;
4363		sb->s_qcop = &dquot_quotactl_sysfile_ops;
4364		sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
4365
4366		/* Copy the default limits from ctx into sbinfo */
4367		memcpy(&sbinfo->qlimits, &ctx->qlimits,
4368		       sizeof(struct shmem_quota_limits));
4369
4370		if (shmem_enable_quotas(sb, ctx->quota_types))
4371			goto failed;
4372	}
4373#endif /* CONFIG_TMPFS_QUOTA */
4374
4375	inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
4376				S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
4377	if (IS_ERR(inode)) {
4378		error = PTR_ERR(inode);
4379		goto failed;
4380	}
4381	inode->i_uid = sbinfo->uid;
4382	inode->i_gid = sbinfo->gid;
4383	sb->s_root = d_make_root(inode);
4384	if (!sb->s_root)
4385		goto failed;
4386	return 0;
4387
4388failed:
4389	shmem_put_super(sb);
4390	return error;
4391}
4392
4393static int shmem_get_tree(struct fs_context *fc)
4394{
4395	return get_tree_nodev(fc, shmem_fill_super);
4396}
4397
4398static void shmem_free_fc(struct fs_context *fc)
4399{
4400	struct shmem_options *ctx = fc->fs_private;
4401
4402	if (ctx) {
4403		mpol_put(ctx->mpol);
4404		kfree(ctx);
4405	}
4406}
4407
4408static const struct fs_context_operations shmem_fs_context_ops = {
4409	.free			= shmem_free_fc,
4410	.get_tree		= shmem_get_tree,
4411#ifdef CONFIG_TMPFS
4412	.parse_monolithic	= shmem_parse_options,
4413	.parse_param		= shmem_parse_one,
4414	.reconfigure		= shmem_reconfigure,
4415#endif
4416};
4417
4418static struct kmem_cache *shmem_inode_cachep __ro_after_init;
4419
4420static struct inode *shmem_alloc_inode(struct super_block *sb)
4421{
4422	struct shmem_inode_info *info;
4423	info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
4424	if (!info)
4425		return NULL;
4426	return &info->vfs_inode;
4427}
4428
4429static void shmem_free_in_core_inode(struct inode *inode)
4430{
4431	if (S_ISLNK(inode->i_mode))
4432		kfree(inode->i_link);
4433	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4434}
4435
4436static void shmem_destroy_inode(struct inode *inode)
4437{
4438	if (S_ISREG(inode->i_mode))
4439		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
4440	if (S_ISDIR(inode->i_mode))
4441		simple_offset_destroy(shmem_get_offset_ctx(inode));
4442}
4443
4444static void shmem_init_inode(void *foo)
4445{
4446	struct shmem_inode_info *info = foo;
4447	inode_init_once(&info->vfs_inode);
4448}
4449
4450static void __init shmem_init_inodecache(void)
4451{
4452	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
4453				sizeof(struct shmem_inode_info),
4454				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
4455}
4456
4457static void __init shmem_destroy_inodecache(void)
4458{
4459	kmem_cache_destroy(shmem_inode_cachep);
4460}
4461
4462/* Keep the page in page cache instead of truncating it */
4463static int shmem_error_remove_folio(struct address_space *mapping,
4464				   struct folio *folio)
4465{
4466	return 0;
4467}
4468
4469const struct address_space_operations shmem_aops = {
4470	.writepage	= shmem_writepage,
4471	.dirty_folio	= noop_dirty_folio,
4472#ifdef CONFIG_TMPFS
4473	.write_begin	= shmem_write_begin,
4474	.write_end	= shmem_write_end,
4475#endif
4476#ifdef CONFIG_MIGRATION
4477	.migrate_folio	= migrate_folio,
4478#endif
4479	.error_remove_folio = shmem_error_remove_folio,
4480};
4481EXPORT_SYMBOL(shmem_aops);
4482
4483static const struct file_operations shmem_file_operations = {
4484	.mmap		= shmem_mmap,
4485	.open		= shmem_file_open,
4486	.get_unmapped_area = shmem_get_unmapped_area,
4487#ifdef CONFIG_TMPFS
4488	.llseek		= shmem_file_llseek,
4489	.read_iter	= shmem_file_read_iter,
4490	.write_iter	= shmem_file_write_iter,
4491	.fsync		= noop_fsync,
4492	.splice_read	= shmem_file_splice_read,
4493	.splice_write	= iter_file_splice_write,
4494	.fallocate	= shmem_fallocate,
4495#endif
4496};
4497
4498static const struct inode_operations shmem_inode_operations = {
4499	.getattr	= shmem_getattr,
4500	.setattr	= shmem_setattr,
4501#ifdef CONFIG_TMPFS_XATTR
4502	.listxattr	= shmem_listxattr,
4503	.set_acl	= simple_set_acl,
4504	.fileattr_get	= shmem_fileattr_get,
4505	.fileattr_set	= shmem_fileattr_set,
4506#endif
4507};
4508
4509static const struct inode_operations shmem_dir_inode_operations = {
4510#ifdef CONFIG_TMPFS
4511	.getattr	= shmem_getattr,
4512	.create		= shmem_create,
4513	.lookup		= simple_lookup,
4514	.link		= shmem_link,
4515	.unlink		= shmem_unlink,
4516	.symlink	= shmem_symlink,
4517	.mkdir		= shmem_mkdir,
4518	.rmdir		= shmem_rmdir,
4519	.mknod		= shmem_mknod,
4520	.rename		= shmem_rename2,
4521	.tmpfile	= shmem_tmpfile,
4522	.get_offset_ctx	= shmem_get_offset_ctx,
4523#endif
4524#ifdef CONFIG_TMPFS_XATTR
4525	.listxattr	= shmem_listxattr,
4526	.fileattr_get	= shmem_fileattr_get,
4527	.fileattr_set	= shmem_fileattr_set,
4528#endif
4529#ifdef CONFIG_TMPFS_POSIX_ACL
4530	.setattr	= shmem_setattr,
4531	.set_acl	= simple_set_acl,
4532#endif
4533};
4534
4535static const struct inode_operations shmem_special_inode_operations = {
4536	.getattr	= shmem_getattr,
4537#ifdef CONFIG_TMPFS_XATTR
4538	.listxattr	= shmem_listxattr,
4539#endif
4540#ifdef CONFIG_TMPFS_POSIX_ACL
4541	.setattr	= shmem_setattr,
4542	.set_acl	= simple_set_acl,
4543#endif
4544};
4545
4546static const struct super_operations shmem_ops = {
4547	.alloc_inode	= shmem_alloc_inode,
4548	.free_inode	= shmem_free_in_core_inode,
4549	.destroy_inode	= shmem_destroy_inode,
4550#ifdef CONFIG_TMPFS
4551	.statfs		= shmem_statfs,
4552	.show_options	= shmem_show_options,
4553#endif
4554#ifdef CONFIG_TMPFS_QUOTA
4555	.get_dquots	= shmem_get_dquots,
4556#endif
4557	.evict_inode	= shmem_evict_inode,
4558	.drop_inode	= generic_delete_inode,
4559	.put_super	= shmem_put_super,
4560#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4561	.nr_cached_objects	= shmem_unused_huge_count,
4562	.free_cached_objects	= shmem_unused_huge_scan,
4563#endif
4564};
4565
4566static const struct vm_operations_struct shmem_vm_ops = {
4567	.fault		= shmem_fault,
4568	.map_pages	= filemap_map_pages,
4569#ifdef CONFIG_NUMA
4570	.set_policy     = shmem_set_policy,
4571	.get_policy     = shmem_get_policy,
4572#endif
4573};
4574
4575static const struct vm_operations_struct shmem_anon_vm_ops = {
4576	.fault		= shmem_fault,
4577	.map_pages	= filemap_map_pages,
4578#ifdef CONFIG_NUMA
4579	.set_policy     = shmem_set_policy,
4580	.get_policy     = shmem_get_policy,
4581#endif
4582};
4583
4584int shmem_init_fs_context(struct fs_context *fc)
4585{
4586	struct shmem_options *ctx;
4587
4588	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
4589	if (!ctx)
4590		return -ENOMEM;
4591
4592	ctx->mode = 0777 | S_ISVTX;
4593	ctx->uid = current_fsuid();
4594	ctx->gid = current_fsgid();
4595
4596	fc->fs_private = ctx;
4597	fc->ops = &shmem_fs_context_ops;
4598	return 0;
4599}
4600
4601static struct file_system_type shmem_fs_type = {
4602	.owner		= THIS_MODULE,
4603	.name		= "tmpfs",
4604	.init_fs_context = shmem_init_fs_context,
4605#ifdef CONFIG_TMPFS
4606	.parameters	= shmem_fs_parameters,
4607#endif
4608	.kill_sb	= kill_litter_super,
4609	.fs_flags	= FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
4610};
4611
4612void __init shmem_init(void)
4613{
4614	int error;
4615
4616	shmem_init_inodecache();
4617
4618#ifdef CONFIG_TMPFS_QUOTA
4619	error = register_quota_format(&shmem_quota_format);
4620	if (error < 0) {
4621		pr_err("Could not register quota format\n");
4622		goto out3;
4623	}
4624#endif
4625
4626	error = register_filesystem(&shmem_fs_type);
4627	if (error) {
4628		pr_err("Could not register tmpfs\n");
4629		goto out2;
4630	}
4631
4632	shm_mnt = kern_mount(&shmem_fs_type);
4633	if (IS_ERR(shm_mnt)) {
4634		error = PTR_ERR(shm_mnt);
4635		pr_err("Could not kern_mount tmpfs\n");
4636		goto out1;
4637	}
4638
4639#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4640	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
4641		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4642	else
4643		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
4644#endif
4645	return;
4646
4647out1:
4648	unregister_filesystem(&shmem_fs_type);
4649out2:
4650#ifdef CONFIG_TMPFS_QUOTA
4651	unregister_quota_format(&shmem_quota_format);
4652out3:
4653#endif
4654	shmem_destroy_inodecache();
4655	shm_mnt = ERR_PTR(error);
 
4656}
4657
4658#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
4659static ssize_t shmem_enabled_show(struct kobject *kobj,
4660				  struct kobj_attribute *attr, char *buf)
4661{
4662	static const int values[] = {
4663		SHMEM_HUGE_ALWAYS,
4664		SHMEM_HUGE_WITHIN_SIZE,
4665		SHMEM_HUGE_ADVISE,
4666		SHMEM_HUGE_NEVER,
4667		SHMEM_HUGE_DENY,
4668		SHMEM_HUGE_FORCE,
4669	};
4670	int len = 0;
4671	int i;
 
 
4672
4673	for (i = 0; i < ARRAY_SIZE(values); i++) {
4674		len += sysfs_emit_at(buf, len,
4675				shmem_huge == values[i] ? "%s[%s]" : "%s%s",
4676				i ? " " : "", shmem_format_huge(values[i]));
4677	}
4678	len += sysfs_emit_at(buf, len, "\n");
4679
4680	return len;
4681}
4682
4683static ssize_t shmem_enabled_store(struct kobject *kobj,
4684		struct kobj_attribute *attr, const char *buf, size_t count)
4685{
4686	char tmp[16];
4687	int huge;
4688
4689	if (count + 1 > sizeof(tmp))
4690		return -EINVAL;
4691	memcpy(tmp, buf, count);
4692	tmp[count] = '\0';
4693	if (count && tmp[count - 1] == '\n')
4694		tmp[count - 1] = '\0';
4695
4696	huge = shmem_parse_huge(tmp);
4697	if (huge == -EINVAL)
4698		return -EINVAL;
4699	if (!has_transparent_hugepage() &&
4700			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4701		return -EINVAL;
4702
4703	shmem_huge = huge;
4704	if (shmem_huge > SHMEM_HUGE_DENY)
4705		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4706	return count;
4707}
4708
4709struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
4710#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4711
4712#else /* !CONFIG_SHMEM */
4713
4714/*
4715 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4716 *
4717 * This is intended for small system where the benefits of the full
4718 * shmem code (swap-backed and resource-limited) are outweighed by
4719 * their complexity. On systems without swap this code should be
4720 * effectively equivalent, but much lighter weight.
4721 */
4722
4723static struct file_system_type shmem_fs_type = {
4724	.name		= "tmpfs",
4725	.init_fs_context = ramfs_init_fs_context,
4726	.parameters	= ramfs_fs_parameters,
4727	.kill_sb	= ramfs_kill_sb,
4728	.fs_flags	= FS_USERNS_MOUNT,
4729};
4730
4731void __init shmem_init(void)
4732{
4733	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4734
4735	shm_mnt = kern_mount(&shmem_fs_type);
4736	BUG_ON(IS_ERR(shm_mnt));
 
 
4737}
4738
4739int shmem_unuse(unsigned int type)
 
4740{
4741	return 0;
4742}
4743
4744int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
4745{
4746	return 0;
4747}
4748
4749void shmem_unlock_mapping(struct address_space *mapping)
4750{
4751}
4752
4753#ifdef CONFIG_MMU
4754unsigned long shmem_get_unmapped_area(struct file *file,
4755				      unsigned long addr, unsigned long len,
4756				      unsigned long pgoff, unsigned long flags)
4757{
4758	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4759}
4760#endif
4761
4762void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4763{
4764	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4765}
4766EXPORT_SYMBOL_GPL(shmem_truncate_range);
4767
4768#define shmem_vm_ops				generic_file_vm_ops
4769#define shmem_anon_vm_ops			generic_file_vm_ops
4770#define shmem_file_operations			ramfs_file_operations
 
4771#define shmem_acct_size(flags, size)		0
4772#define shmem_unacct_size(flags, size)		do {} while (0)
4773
4774static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
4775				struct super_block *sb, struct inode *dir,
4776				umode_t mode, dev_t dev, unsigned long flags)
4777{
4778	struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
4779	return inode ? inode : ERR_PTR(-ENOSPC);
4780}
4781
4782#endif /* CONFIG_SHMEM */
4783
4784/* common code */
4785
4786static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
4787			loff_t size, unsigned long flags, unsigned int i_flags)
4788{
4789	struct inode *inode;
4790	struct file *res;
4791
4792	if (IS_ERR(mnt))
4793		return ERR_CAST(mnt);
4794
4795	if (size < 0 || size > MAX_LFS_FILESIZE)
4796		return ERR_PTR(-EINVAL);
4797
4798	if (shmem_acct_size(flags, size))
4799		return ERR_PTR(-ENOMEM);
4800
4801	if (is_idmapped_mnt(mnt))
4802		return ERR_PTR(-EINVAL);
4803
4804	inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
4805				S_IFREG | S_IRWXUGO, 0, flags);
4806	if (IS_ERR(inode)) {
4807		shmem_unacct_size(flags, size);
4808		return ERR_CAST(inode);
4809	}
4810	inode->i_flags |= i_flags;
4811	inode->i_size = size;
4812	clear_nlink(inode);	/* It is unlinked */
4813	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4814	if (!IS_ERR(res))
4815		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4816				&shmem_file_operations);
4817	if (IS_ERR(res))
4818		iput(inode);
4819	return res;
4820}
4821
4822/**
4823 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4824 * 	kernel internal.  There will be NO LSM permission checks against the
4825 * 	underlying inode.  So users of this interface must do LSM checks at a
4826 *	higher layer.  The users are the big_key and shm implementations.  LSM
4827 *	checks are provided at the key or shm level rather than the inode.
4828 * @name: name for dentry (to be seen in /proc/<pid>/maps
4829 * @size: size to be set for the file
4830 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4831 */
4832struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4833{
4834	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4835}
4836
4837/**
4838 * shmem_file_setup - get an unlinked file living in tmpfs
4839 * @name: name for dentry (to be seen in /proc/<pid>/maps
4840 * @size: size to be set for the file
4841 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4842 */
4843struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4844{
4845	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4846}
4847EXPORT_SYMBOL_GPL(shmem_file_setup);
4848
4849/**
4850 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4851 * @mnt: the tmpfs mount where the file will be created
4852 * @name: name for dentry (to be seen in /proc/<pid>/maps
4853 * @size: size to be set for the file
4854 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4855 */
4856struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4857				       loff_t size, unsigned long flags)
4858{
4859	return __shmem_file_setup(mnt, name, size, flags, 0);
4860}
4861EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4862
4863/**
4864 * shmem_zero_setup - setup a shared anonymous mapping
4865 * @vma: the vma to be mmapped is prepared by do_mmap
4866 */
4867int shmem_zero_setup(struct vm_area_struct *vma)
4868{
4869	struct file *file;
4870	loff_t size = vma->vm_end - vma->vm_start;
4871
4872	/*
4873	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
4874	 * between XFS directory reading and selinux: since this file is only
4875	 * accessible to the user through its mapping, use S_PRIVATE flag to
4876	 * bypass file security, in the same way as shmem_kernel_file_setup().
4877	 */
4878	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4879	if (IS_ERR(file))
4880		return PTR_ERR(file);
4881
4882	if (vma->vm_file)
4883		fput(vma->vm_file);
4884	vma->vm_file = file;
4885	vma->vm_ops = &shmem_anon_vm_ops;
 
 
 
 
 
 
4886
4887	return 0;
4888}
4889
4890/**
4891 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
4892 * @mapping:	the folio's address_space
4893 * @index:	the folio index
4894 * @gfp:	the page allocator flags to use if allocating
4895 *
4896 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4897 * with any new page allocations done using the specified allocation flags.
4898 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
4899 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4900 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4901 *
4902 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4903 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4904 */
4905struct folio *shmem_read_folio_gfp(struct address_space *mapping,
4906		pgoff_t index, gfp_t gfp)
4907{
4908#ifdef CONFIG_SHMEM
4909	struct inode *inode = mapping->host;
4910	struct folio *folio;
4911	int error;
4912
4913	BUG_ON(!shmem_mapping(mapping));
4914	error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
4915				    gfp, NULL, NULL);
4916	if (error)
4917		return ERR_PTR(error);
4918
4919	folio_unlock(folio);
4920	return folio;
4921#else
4922	/*
4923	 * The tiny !SHMEM case uses ramfs without swap
4924	 */
4925	return mapping_read_folio_gfp(mapping, index, gfp);
4926#endif
4927}
4928EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
4929
4930struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4931					 pgoff_t index, gfp_t gfp)
4932{
4933	struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
4934	struct page *page;
4935
4936	if (IS_ERR(folio))
4937		return &folio->page;
4938
4939	page = folio_file_page(folio, index);
4940	if (PageHWPoison(page)) {
4941		folio_put(folio);
4942		return ERR_PTR(-EIO);
4943	}
4944
4945	return page;
4946}
4947EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);