Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 *  linux/mm/swapfile.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 */
   7
 
   8#include <linux/mm.h>
 
 
   9#include <linux/hugetlb.h>
  10#include <linux/mman.h>
  11#include <linux/slab.h>
  12#include <linux/kernel_stat.h>
  13#include <linux/swap.h>
  14#include <linux/vmalloc.h>
  15#include <linux/pagemap.h>
  16#include <linux/namei.h>
  17#include <linux/shmem_fs.h>
  18#include <linux/blkdev.h>
  19#include <linux/random.h>
  20#include <linux/writeback.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/init.h>
  24#include <linux/module.h>
  25#include <linux/ksm.h>
  26#include <linux/rmap.h>
  27#include <linux/security.h>
  28#include <linux/backing-dev.h>
  29#include <linux/mutex.h>
  30#include <linux/capability.h>
  31#include <linux/syscalls.h>
  32#include <linux/memcontrol.h>
  33#include <linux/poll.h>
  34#include <linux/oom.h>
 
 
 
 
 
 
 
 
  35
  36#include <asm/pgtable.h>
  37#include <asm/tlbflush.h>
  38#include <linux/swapops.h>
  39#include <linux/page_cgroup.h>
 
 
  40
  41static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  42				 unsigned char);
  43static void free_swap_count_continuations(struct swap_info_struct *);
  44static sector_t map_swap_entry(swp_entry_t, struct block_device**);
  45
  46static DEFINE_SPINLOCK(swap_lock);
  47static unsigned int nr_swapfiles;
  48long nr_swap_pages;
 
 
 
 
 
 
 
  49long total_swap_pages;
  50static int least_priority;
 
 
 
 
  51
  52static const char Bad_file[] = "Bad swap file entry ";
  53static const char Unused_file[] = "Unused swap file entry ";
  54static const char Bad_offset[] = "Bad swap offset entry ";
  55static const char Unused_offset[] = "Unused swap offset entry ";
  56
  57static struct swap_list_t swap_list = {-1, -1};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58
  59static struct swap_info_struct *swap_info[MAX_SWAPFILES];
  60
  61static DEFINE_MUTEX(swapon_mutex);
  62
  63static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  64/* Activity counter to indicate that a swapon or swapoff has occurred */
  65static atomic_t proc_poll_event = ATOMIC_INIT(0);
  66
 
 
 
 
 
 
 
 
 
 
  67static inline unsigned char swap_count(unsigned char ent)
  68{
  69	return ent & ~SWAP_HAS_CACHE;	/* may include SWAP_HAS_CONT flag */
  70}
  71
 
 
 
 
 
 
 
 
 
 
  72/* returns 1 if swap entry is freed */
  73static int
  74__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
  75{
  76	swp_entry_t entry = swp_entry(si->type, offset);
  77	struct page *page;
  78	int ret = 0;
  79
  80	page = find_get_page(&swapper_space, entry.val);
  81	if (!page)
  82		return 0;
  83	/*
  84	 * This function is called from scan_swap_map() and it's called
  85	 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
  86	 * We have to use trylock for avoiding deadlock. This is a special
  87	 * case and you should use try_to_free_swap() with explicit lock_page()
  88	 * in usual operations.
  89	 */
  90	if (trylock_page(page)) {
  91		ret = try_to_free_swap(page);
  92		unlock_page(page);
 
 
 
  93	}
  94	page_cache_release(page);
  95	return ret;
  96}
  97
 
 
 
 
 
 
 
 
 
 
 
 
  98/*
  99 * swapon tell device that all the old swap contents can be discarded,
 100 * to allow the swap device to optimize its wear-levelling.
 101 */
 102static int discard_swap(struct swap_info_struct *si)
 103{
 104	struct swap_extent *se;
 105	sector_t start_block;
 106	sector_t nr_blocks;
 107	int err = 0;
 108
 109	/* Do not discard the swap header page! */
 110	se = &si->first_swap_extent;
 111	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 112	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 113	if (nr_blocks) {
 114		err = blkdev_issue_discard(si->bdev, start_block,
 115				nr_blocks, GFP_KERNEL, 0);
 116		if (err)
 117			return err;
 118		cond_resched();
 119	}
 120
 121	list_for_each_entry(se, &si->first_swap_extent.list, list) {
 122		start_block = se->start_block << (PAGE_SHIFT - 9);
 123		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 124
 125		err = blkdev_issue_discard(si->bdev, start_block,
 126				nr_blocks, GFP_KERNEL, 0);
 127		if (err)
 128			break;
 129
 130		cond_resched();
 131	}
 132	return err;		/* That will often be -EOPNOTSUPP */
 133}
 134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 135/*
 136 * swap allocation tell device that a cluster of swap can now be discarded,
 137 * to allow the swap device to optimize its wear-levelling.
 138 */
 139static void discard_swap_cluster(struct swap_info_struct *si,
 140				 pgoff_t start_page, pgoff_t nr_pages)
 141{
 142	struct swap_extent *se = si->curr_swap_extent;
 143	int found_extent = 0;
 144
 145	while (nr_pages) {
 146		struct list_head *lh;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 147
 148		if (se->start_page <= start_page &&
 149		    start_page < se->start_page + se->nr_pages) {
 150			pgoff_t offset = start_page - se->start_page;
 151			sector_t start_block = se->start_block + offset;
 152			sector_t nr_blocks = se->nr_pages - offset;
 153
 154			if (nr_blocks > nr_pages)
 155				nr_blocks = nr_pages;
 156			start_page += nr_blocks;
 157			nr_pages -= nr_blocks;
 158
 159			if (!found_extent++)
 160				si->curr_swap_extent = se;
 161
 162			start_block <<= PAGE_SHIFT - 9;
 163			nr_blocks <<= PAGE_SHIFT - 9;
 164			if (blkdev_issue_discard(si->bdev, start_block,
 165				    nr_blocks, GFP_NOIO, 0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166				break;
 
 167		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 168
 169		lh = se->list.next;
 170		se = list_entry(lh, struct swap_extent, list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 171	}
 
 172}
 173
 174static int wait_for_discard(void *word)
 175{
 176	schedule();
 177	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178}
 179
 180#define SWAPFILE_CLUSTER	256
 181#define LATENCY_LIMIT		256
 
 
 
 
 
 182
 183static unsigned long scan_swap_map(struct swap_info_struct *si,
 184				   unsigned char usage)
 
 
 
 
 
 
 
 
 
 185{
 
 186	unsigned long offset;
 187	unsigned long scan_base;
 188	unsigned long last_in_cluster = 0;
 189	int latency_ration = LATENCY_LIMIT;
 190	int found_free_cluster = 0;
 
 191
 192	/*
 193	 * We try to cluster swap pages by allocating them sequentially
 194	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 195	 * way, however, we resort to first-free allocation, starting
 196	 * a new cluster.  This prevents us from scattering swap pages
 197	 * all over the entire swap partition, so that we reduce
 198	 * overall disk seek times between swap pages.  -- sct
 199	 * But we do now try to find an empty cluster.  -Andrea
 200	 * And we let swap pages go all over an SSD partition.  Hugh
 201	 */
 202
 203	si->flags += SWP_SCANNING;
 204	scan_base = offset = si->cluster_next;
 
 
 
 
 
 
 
 
 
 205
 206	if (unlikely(!si->cluster_nr--)) {
 
 
 
 
 207		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 208			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 209			goto checks;
 210		}
 211		if (si->flags & SWP_DISCARDABLE) {
 212			/*
 213			 * Start range check on racing allocations, in case
 214			 * they overlap the cluster we eventually decide on
 215			 * (we scan without swap_lock to allow preemption).
 216			 * It's hardly conceivable that cluster_nr could be
 217			 * wrapped during our scan, but don't depend on it.
 218			 */
 219			if (si->lowest_alloc)
 220				goto checks;
 221			si->lowest_alloc = si->max;
 222			si->highest_alloc = 0;
 223		}
 224		spin_unlock(&swap_lock);
 225
 226		/*
 227		 * If seek is expensive, start searching for new cluster from
 228		 * start of partition, to minimize the span of allocated swap.
 229		 * But if seek is cheap, search from our current position, so
 230		 * that swap is allocated from all over the partition: if the
 231		 * Flash Translation Layer only remaps within limited zones,
 232		 * we don't want to wear out the first zone too quickly.
 233		 */
 234		if (!(si->flags & SWP_SOLIDSTATE))
 235			scan_base = offset = si->lowest_bit;
 236		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 237
 238		/* Locate the first empty (unaligned) cluster */
 239		for (; last_in_cluster <= si->highest_bit; offset++) {
 240			if (si->swap_map[offset])
 241				last_in_cluster = offset + SWAPFILE_CLUSTER;
 242			else if (offset == last_in_cluster) {
 243				spin_lock(&swap_lock);
 244				offset -= SWAPFILE_CLUSTER - 1;
 245				si->cluster_next = offset;
 246				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 247				found_free_cluster = 1;
 248				goto checks;
 249			}
 250			if (unlikely(--latency_ration < 0)) {
 251				cond_resched();
 252				latency_ration = LATENCY_LIMIT;
 253			}
 254		}
 255
 256		offset = si->lowest_bit;
 257		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 258
 259		/* Locate the first empty (unaligned) cluster */
 260		for (; last_in_cluster < scan_base; offset++) {
 261			if (si->swap_map[offset])
 262				last_in_cluster = offset + SWAPFILE_CLUSTER;
 263			else if (offset == last_in_cluster) {
 264				spin_lock(&swap_lock);
 265				offset -= SWAPFILE_CLUSTER - 1;
 266				si->cluster_next = offset;
 267				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 268				found_free_cluster = 1;
 269				goto checks;
 270			}
 271			if (unlikely(--latency_ration < 0)) {
 272				cond_resched();
 273				latency_ration = LATENCY_LIMIT;
 274			}
 275		}
 276
 277		offset = scan_base;
 278		spin_lock(&swap_lock);
 279		si->cluster_nr = SWAPFILE_CLUSTER - 1;
 280		si->lowest_alloc = 0;
 281	}
 282
 283checks:
 
 
 
 
 
 
 
 
 
 
 284	if (!(si->flags & SWP_WRITEOK))
 285		goto no_page;
 286	if (!si->highest_bit)
 287		goto no_page;
 288	if (offset > si->highest_bit)
 289		scan_base = offset = si->lowest_bit;
 290
 
 291	/* reuse swap entry of cache-only swap if not busy. */
 292	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 293		int swap_was_freed;
 294		spin_unlock(&swap_lock);
 295		swap_was_freed = __try_to_reclaim_swap(si, offset);
 296		spin_lock(&swap_lock);
 
 297		/* entry was freed successfully, try to use this again */
 298		if (swap_was_freed)
 299			goto checks;
 300		goto scan; /* check next one */
 301	}
 302
 303	if (si->swap_map[offset])
 304		goto scan;
 305
 306	if (offset == si->lowest_bit)
 307		si->lowest_bit++;
 308	if (offset == si->highest_bit)
 309		si->highest_bit--;
 310	si->inuse_pages++;
 311	if (si->inuse_pages == si->pages) {
 312		si->lowest_bit = si->max;
 313		si->highest_bit = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314	}
 315	si->swap_map[offset] = usage;
 316	si->cluster_next = offset + 1;
 317	si->flags -= SWP_SCANNING;
 318
 319	if (si->lowest_alloc) {
 320		/*
 321		 * Only set when SWP_DISCARDABLE, and there's a scan
 322		 * for a free cluster in progress or just completed.
 323		 */
 324		if (found_free_cluster) {
 325			/*
 326			 * To optimize wear-levelling, discard the
 327			 * old data of the cluster, taking care not to
 328			 * discard any of its pages that have already
 329			 * been allocated by racing tasks (offset has
 330			 * already stepped over any at the beginning).
 331			 */
 332			if (offset < si->highest_alloc &&
 333			    si->lowest_alloc <= last_in_cluster)
 334				last_in_cluster = si->lowest_alloc - 1;
 335			si->flags |= SWP_DISCARDING;
 336			spin_unlock(&swap_lock);
 337
 338			if (offset < last_in_cluster)
 339				discard_swap_cluster(si, offset,
 340					last_in_cluster - offset + 1);
 341
 342			spin_lock(&swap_lock);
 343			si->lowest_alloc = 0;
 344			si->flags &= ~SWP_DISCARDING;
 345
 346			smp_mb();	/* wake_up_bit advises this */
 347			wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
 
 
 
 
 
 348
 349		} else if (si->flags & SWP_DISCARDING) {
 350			/*
 351			 * Delay using pages allocated by racing tasks
 352			 * until the whole discard has been issued. We
 353			 * could defer that delay until swap_writepage,
 354			 * but it's easier to keep this self-contained.
 355			 */
 356			spin_unlock(&swap_lock);
 357			wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
 358				wait_for_discard, TASK_UNINTERRUPTIBLE);
 359			spin_lock(&swap_lock);
 360		} else {
 361			/*
 362			 * Note pages allocated by racing tasks while
 363			 * scan for a free cluster is in progress, so
 364			 * that its final discard can exclude them.
 365			 */
 366			if (offset < si->lowest_alloc)
 367				si->lowest_alloc = offset;
 368			if (offset > si->highest_alloc)
 369				si->highest_alloc = offset;
 370		}
 371	}
 372	return offset;
 
 
 
 
 373
 374scan:
 375	spin_unlock(&swap_lock);
 376	while (++offset <= si->highest_bit) {
 377		if (!si->swap_map[offset]) {
 378			spin_lock(&swap_lock);
 379			goto checks;
 380		}
 381		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 382			spin_lock(&swap_lock);
 383			goto checks;
 384		}
 385		if (unlikely(--latency_ration < 0)) {
 386			cond_resched();
 387			latency_ration = LATENCY_LIMIT;
 
 388		}
 
 
 389	}
 390	offset = si->lowest_bit;
 391	while (++offset < scan_base) {
 392		if (!si->swap_map[offset]) {
 393			spin_lock(&swap_lock);
 394			goto checks;
 395		}
 396		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 397			spin_lock(&swap_lock);
 398			goto checks;
 399		}
 400		if (unlikely(--latency_ration < 0)) {
 401			cond_resched();
 402			latency_ration = LATENCY_LIMIT;
 
 403		}
 
 
 
 404	}
 405	spin_lock(&swap_lock);
 406
 407no_page:
 408	si->flags -= SWP_SCANNING;
 409	return 0;
 410}
 411
 412swp_entry_t get_swap_page(void)
 413{
 414	struct swap_info_struct *si;
 415	pgoff_t offset;
 416	int type, next;
 417	int wrapped = 0;
 418
 419	spin_lock(&swap_lock);
 420	if (nr_swap_pages <= 0)
 421		goto noswap;
 422	nr_swap_pages--;
 
 
 
 
 423
 424	for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
 425		si = swap_info[type];
 426		next = si->next;
 427		if (next < 0 ||
 428		    (!wrapped && si->prio != swap_info[next]->prio)) {
 429			next = swap_list.head;
 430			wrapped++;
 431		}
 432
 433		if (!si->highest_bit)
 434			continue;
 435		if (!(si->flags & SWP_WRITEOK))
 436			continue;
 
 
 
 
 
 
 437
 438		swap_list.next = next;
 439		/* This is called for allocating swap entry for cache */
 440		offset = scan_swap_map(si, SWAP_HAS_CACHE);
 441		if (offset) {
 442			spin_unlock(&swap_lock);
 443			return swp_entry(type, offset);
 444		}
 445		next = swap_list.next;
 446	}
 447
 448	nr_swap_pages++;
 449noswap:
 450	spin_unlock(&swap_lock);
 451	return (swp_entry_t) {0};
 
 
 
 
 
 
 
 452}
 453
 454/* The only caller of this function is now susupend routine */
 455swp_entry_t get_swap_page_of_type(int type)
 456{
 457	struct swap_info_struct *si;
 458	pgoff_t offset;
 
 
 
 459
 460	spin_lock(&swap_lock);
 461	si = swap_info[type];
 462	if (si && (si->flags & SWP_WRITEOK)) {
 463		nr_swap_pages--;
 464		/* This is called for allocating swap entry, not cache */
 465		offset = scan_swap_map(si, 1);
 466		if (offset) {
 467			spin_unlock(&swap_lock);
 468			return swp_entry(type, offset);
 469		}
 470		nr_swap_pages++;
 471	}
 472	spin_unlock(&swap_lock);
 473	return (swp_entry_t) {0};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 474}
 475
 476static struct swap_info_struct *swap_info_get(swp_entry_t entry)
 477{
 478	struct swap_info_struct *p;
 479	unsigned long offset, type;
 480
 481	if (!entry.val)
 482		goto out;
 483	type = swp_type(entry);
 484	if (type >= nr_swapfiles)
 485		goto bad_nofile;
 486	p = swap_info[type];
 487	if (!(p->flags & SWP_USED))
 488		goto bad_device;
 489	offset = swp_offset(entry);
 490	if (offset >= p->max)
 491		goto bad_offset;
 492	if (!p->swap_map[offset])
 493		goto bad_free;
 494	spin_lock(&swap_lock);
 495	return p;
 496
 497bad_free:
 498	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
 499	goto out;
 500bad_offset:
 501	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
 502	goto out;
 503bad_device:
 504	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
 505	goto out;
 506bad_nofile:
 507	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
 508out:
 509	return NULL;
 510}
 511
 512static unsigned char swap_entry_free(struct swap_info_struct *p,
 513				     swp_entry_t entry, unsigned char usage)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 514{
 515	unsigned long offset = swp_offset(entry);
 516	unsigned char count;
 517	unsigned char has_cache;
 518
 519	count = p->swap_map[offset];
 
 520	has_cache = count & SWAP_HAS_CACHE;
 521	count &= ~SWAP_HAS_CACHE;
 522
 523	if (usage == SWAP_HAS_CACHE) {
 524		VM_BUG_ON(!has_cache);
 525		has_cache = 0;
 526	} else if (count == SWAP_MAP_SHMEM) {
 527		/*
 528		 * Or we could insist on shmem.c using a special
 529		 * swap_shmem_free() and free_shmem_swap_and_cache()...
 530		 */
 531		count = 0;
 532	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
 533		if (count == COUNT_CONTINUED) {
 534			if (swap_count_continued(p, offset, count))
 535				count = SWAP_MAP_MAX | COUNT_CONTINUED;
 536			else
 537				count = SWAP_MAP_MAX;
 538		} else
 539			count--;
 540	}
 541
 542	if (!count)
 543		mem_cgroup_uncharge_swap(entry);
 544
 545	usage = count | has_cache;
 546	p->swap_map[offset] = usage;
 
 
 
 547
 548	/* free if no reference */
 549	if (!usage) {
 550		struct gendisk *disk = p->bdev->bd_disk;
 551		if (offset < p->lowest_bit)
 552			p->lowest_bit = offset;
 553		if (offset > p->highest_bit)
 554			p->highest_bit = offset;
 555		if (swap_list.next >= 0 &&
 556		    p->prio > swap_info[swap_list.next]->prio)
 557			swap_list.next = p->type;
 558		nr_swap_pages++;
 559		p->inuse_pages--;
 560		if ((p->flags & SWP_BLKDEV) &&
 561				disk->fops->swap_slot_free_notify)
 562			disk->fops->swap_slot_free_notify(p->bdev, offset);
 563	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 564
 565	return usage;
 566}
 567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 568/*
 569 * Caller has made sure that the swapdevice corresponding to entry
 570 * is still around or has not been recycled.
 571 */
 572void swap_free(swp_entry_t entry)
 573{
 574	struct swap_info_struct *p;
 575
 576	p = swap_info_get(entry);
 577	if (p) {
 578		swap_entry_free(p, entry, 1);
 579		spin_unlock(&swap_lock);
 580	}
 581}
 582
 583/*
 584 * Called after dropping swapcache to decrease refcnt to swap entries.
 585 */
 586void swapcache_free(swp_entry_t entry, struct page *page)
 587{
 588	struct swap_info_struct *p;
 589	unsigned char count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 590
 591	p = swap_info_get(entry);
 592	if (p) {
 593		count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
 594		if (page)
 595			mem_cgroup_uncharge_swapcache(page, entry, count != 0);
 596		spin_unlock(&swap_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 597	}
 
 
 
 
 
 
 
 
 
 
 598}
 599
 600/*
 601 * How many references to page are currently swapped out?
 602 * This does not give an exact answer when swap count is continued,
 603 * but does include the high COUNT_CONTINUED flag to allow for that.
 604 */
 605static inline int page_swapcount(struct page *page)
 606{
 607	int count = 0;
 608	struct swap_info_struct *p;
 609	swp_entry_t entry;
 610
 611	entry.val = page_private(page);
 612	p = swap_info_get(entry);
 613	if (p) {
 614		count = swap_count(p->swap_map[swp_offset(entry)]);
 615		spin_unlock(&swap_lock);
 616	}
 617	return count;
 618}
 619
 620/*
 621 * We can write to an anon page without COW if there are no other references
 622 * to it.  And as a side-effect, free up its swap: because the old content
 623 * on disk will never be read, and seeking back there to write new content
 624 * later would only waste time away from clustering.
 625 */
 626int reuse_swap_page(struct page *page)
 627{
 628	int count;
 
 
 
 
 
 629
 630	VM_BUG_ON(!PageLocked(page));
 631	if (unlikely(PageKsm(page)))
 632		return 0;
 633	count = page_mapcount(page);
 634	if (count <= 1 && PageSwapCache(page)) {
 635		count += page_swapcount(page);
 636		if (count == 1 && !PageWriteback(page)) {
 637			delete_from_swap_cache(page);
 638			SetPageDirty(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 639		}
 640	}
 641	return count <= 1;
 
 
 642}
 643
 644/*
 645 * If swap is getting full, or if there are no more mappings of this page,
 646 * then try_to_free_swap is called to free its swap space.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 647 */
 648int try_to_free_swap(struct page *page)
 649{
 650	VM_BUG_ON(!PageLocked(page));
 651
 652	if (!PageSwapCache(page))
 653		return 0;
 654	if (PageWriteback(page))
 655		return 0;
 656	if (page_swapcount(page))
 657		return 0;
 658
 659	/*
 660	 * Once hibernation has begun to create its image of memory,
 661	 * there's a danger that one of the calls to try_to_free_swap()
 662	 * - most probably a call from __try_to_reclaim_swap() while
 663	 * hibernation is allocating its own swap pages for the image,
 664	 * but conceivably even a call from memory reclaim - will free
 665	 * the swap from a page which has already been recorded in the
 666	 * image as a clean swapcache page, and then reuse its swap for
 667	 * another page of the image.  On waking from hibernation, the
 668	 * original page might be freed under memory pressure, then
 669	 * later read back in from swap, now with the wrong data.
 670	 *
 671	 * Hibernation clears bits from gfp_allowed_mask to prevent
 672	 * memory reclaim from writing to disk, so check that here.
 673	 */
 674	if (!(gfp_allowed_mask & __GFP_IO))
 675		return 0;
 676
 677	delete_from_swap_cache(page);
 678	SetPageDirty(page);
 679	return 1;
 680}
 681
 682/*
 683 * Free the swap entry like above, but also try to
 684 * free the page cache entry if it is the last user.
 685 */
 686int free_swap_and_cache(swp_entry_t entry)
 687{
 688	struct swap_info_struct *p;
 689	struct page *page = NULL;
 690
 691	if (non_swap_entry(entry))
 692		return 1;
 693
 694	p = swap_info_get(entry);
 695	if (p) {
 696		if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
 697			page = find_get_page(&swapper_space, entry.val);
 698			if (page && !trylock_page(page)) {
 699				page_cache_release(page);
 700				page = NULL;
 701			}
 702		}
 703		spin_unlock(&swap_lock);
 704	}
 705	if (page) {
 706		/*
 707		 * Not mapped elsewhere, or swap space full? Free it!
 708		 * Also recheck PageSwapCache now page is locked (above).
 709		 */
 710		if (PageSwapCache(page) && !PageWriteback(page) &&
 711				(!page_mapped(page) || vm_swap_full())) {
 712			delete_from_swap_cache(page);
 713			SetPageDirty(page);
 714		}
 715		unlock_page(page);
 716		page_cache_release(page);
 717	}
 718	return p != NULL;
 719}
 720
 721#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 722/**
 723 * mem_cgroup_count_swap_user - count the user of a swap entry
 724 * @ent: the swap entry to be checked
 725 * @pagep: the pointer for the swap cache page of the entry to be stored
 726 *
 727 * Returns the number of the user of the swap entry. The number is valid only
 728 * for swaps of anonymous pages.
 729 * If the entry is found on swap cache, the page is stored to pagep with
 730 * refcount of it being incremented.
 731 */
 732int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
 733{
 734	struct page *page;
 735	struct swap_info_struct *p;
 736	int count = 0;
 737
 738	page = find_get_page(&swapper_space, ent.val);
 739	if (page)
 740		count += page_mapcount(page);
 741	p = swap_info_get(ent);
 742	if (p) {
 743		count += swap_count(p->swap_map[swp_offset(ent)]);
 744		spin_unlock(&swap_lock);
 745	}
 746
 747	*pagep = page;
 748	return count;
 
 
 
 
 
 749}
 750#endif
 751
 752#ifdef CONFIG_HIBERNATION
 753/*
 754 * Find the swap type that corresponds to given device (if any).
 755 *
 756 * @offset - number of the PAGE_SIZE-sized block of the device, starting
 757 * from 0, in which the swap header is expected to be located.
 758 *
 759 * This is needed for the suspend to disk (aka swsusp).
 760 */
 761int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
 762{
 763	struct block_device *bdev = NULL;
 764	int type;
 765
 766	if (device)
 767		bdev = bdget(device);
 768
 769	spin_lock(&swap_lock);
 770	for (type = 0; type < nr_swapfiles; type++) {
 771		struct swap_info_struct *sis = swap_info[type];
 772
 773		if (!(sis->flags & SWP_WRITEOK))
 774			continue;
 775
 776		if (!bdev) {
 777			if (bdev_p)
 778				*bdev_p = bdgrab(sis->bdev);
 779
 780			spin_unlock(&swap_lock);
 781			return type;
 782		}
 783		if (bdev == sis->bdev) {
 784			struct swap_extent *se = &sis->first_swap_extent;
 785
 786			if (se->start_block == offset) {
 787				if (bdev_p)
 788					*bdev_p = bdgrab(sis->bdev);
 789
 790				spin_unlock(&swap_lock);
 791				bdput(bdev);
 792				return type;
 793			}
 794		}
 795	}
 796	spin_unlock(&swap_lock);
 797	if (bdev)
 798		bdput(bdev);
 
 
 
 
 799
 
 
 
 
 
 
 
 
 
 
 
 800	return -ENODEV;
 801}
 802
 803/*
 804 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
 805 * corresponding to given index in swap_info (swap type).
 806 */
 807sector_t swapdev_block(int type, pgoff_t offset)
 808{
 809	struct block_device *bdev;
 
 810
 811	if ((unsigned int)type >= nr_swapfiles)
 812		return 0;
 813	if (!(swap_info[type]->flags & SWP_WRITEOK))
 814		return 0;
 815	return map_swap_entry(swp_entry(type, offset), &bdev);
 
 816}
 817
 818/*
 819 * Return either the total number of swap pages of given type, or the number
 820 * of free pages of that type (depending on @free)
 821 *
 822 * This is needed for software suspend
 823 */
 824unsigned int count_swap_pages(int type, int free)
 825{
 826	unsigned int n = 0;
 827
 828	spin_lock(&swap_lock);
 829	if ((unsigned int)type < nr_swapfiles) {
 830		struct swap_info_struct *sis = swap_info[type];
 831
 
 832		if (sis->flags & SWP_WRITEOK) {
 833			n = sis->pages;
 834			if (free)
 835				n -= sis->inuse_pages;
 836		}
 
 837	}
 838	spin_unlock(&swap_lock);
 839	return n;
 840}
 841#endif /* CONFIG_HIBERNATION */
 842
 
 
 
 
 
 843/*
 844 * No need to decide whether this PTE shares the swap entry with others,
 845 * just let do_wp_page work it out if a write is requested later - to
 846 * force COW, vm_page_prot omits write permission from any private vma.
 847 */
 848static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 849		unsigned long addr, swp_entry_t entry, struct page *page)
 850{
 851	struct mem_cgroup *ptr;
 
 852	spinlock_t *ptl;
 853	pte_t *pte;
 
 854	int ret = 1;
 855
 856	if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) {
 857		ret = -ENOMEM;
 858		goto out_nolock;
 
 
 
 
 859	}
 860
 
 
 
 
 861	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 862	if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
 863		if (ret > 0)
 864			mem_cgroup_cancel_charge_swapin(ptr);
 865		ret = 0;
 866		goto out;
 867	}
 868
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 869	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
 870	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
 871	get_page(page);
 872	set_pte_at(vma->vm_mm, addr, pte,
 873		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
 874	page_add_anon_rmap(page, vma, addr);
 875	mem_cgroup_commit_charge_swapin(page, ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876	swap_free(entry);
 877	/*
 878	 * Move the page to the active list so it is not
 879	 * immediately swapped out again after swapon.
 880	 */
 881	activate_page(page);
 882out:
 883	pte_unmap_unlock(pte, ptl);
 884out_nolock:
 
 
 
 
 885	return ret;
 886}
 887
 888static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 889				unsigned long addr, unsigned long end,
 890				swp_entry_t entry, struct page *page)
 891{
 892	pte_t swp_pte = swp_entry_to_pte(entry);
 893	pte_t *pte;
 894	int ret = 0;
 895
 896	/*
 897	 * We don't actually need pte lock while scanning for swp_pte: since
 898	 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
 899	 * page table while we're scanning; though it could get zapped, and on
 900	 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
 901	 * of unmatched parts which look like swp_pte, so unuse_pte must
 902	 * recheck under pte lock.  Scanning without pte lock lets it be
 903	 * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
 904	 */
 905	pte = pte_offset_map(pmd, addr);
 906	do {
 907		/*
 908		 * swapoff spends a _lot_ of time in this loop!
 909		 * Test inline before going to call unuse_pte.
 910		 */
 911		if (unlikely(pte_same(*pte, swp_pte))) {
 912			pte_unmap(pte);
 913			ret = unuse_pte(vma, pmd, addr, entry, page);
 914			if (ret)
 915				goto out;
 916			pte = pte_offset_map(pmd, addr);
 
 
 917		}
 918	} while (pte++, addr += PAGE_SIZE, addr != end);
 919	pte_unmap(pte - 1);
 920out:
 921	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922}
 923
 924static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 925				unsigned long addr, unsigned long end,
 926				swp_entry_t entry, struct page *page)
 927{
 928	pmd_t *pmd;
 929	unsigned long next;
 930	int ret;
 931
 932	pmd = pmd_offset(pud, addr);
 933	do {
 
 934		next = pmd_addr_end(addr, end);
 935		if (unlikely(pmd_trans_huge(*pmd)))
 936			continue;
 937		if (pmd_none_or_clear_bad(pmd))
 938			continue;
 939		ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
 940		if (ret)
 941			return ret;
 942	} while (pmd++, addr = next, addr != end);
 943	return 0;
 944}
 945
 946static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 947				unsigned long addr, unsigned long end,
 948				swp_entry_t entry, struct page *page)
 949{
 950	pud_t *pud;
 951	unsigned long next;
 952	int ret;
 953
 954	pud = pud_offset(pgd, addr);
 955	do {
 956		next = pud_addr_end(addr, end);
 957		if (pud_none_or_clear_bad(pud))
 958			continue;
 959		ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
 960		if (ret)
 961			return ret;
 962	} while (pud++, addr = next, addr != end);
 963	return 0;
 964}
 965
 966static int unuse_vma(struct vm_area_struct *vma,
 967				swp_entry_t entry, struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968{
 969	pgd_t *pgd;
 970	unsigned long addr, end, next;
 971	int ret;
 972
 973	if (page_anon_vma(page)) {
 974		addr = page_address_in_vma(page, vma);
 975		if (addr == -EFAULT)
 976			return 0;
 977		else
 978			end = addr + PAGE_SIZE;
 979	} else {
 980		addr = vma->vm_start;
 981		end = vma->vm_end;
 982	}
 983
 984	pgd = pgd_offset(vma->vm_mm, addr);
 985	do {
 986		next = pgd_addr_end(addr, end);
 987		if (pgd_none_or_clear_bad(pgd))
 988			continue;
 989		ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
 990		if (ret)
 991			return ret;
 992	} while (pgd++, addr = next, addr != end);
 993	return 0;
 994}
 995
 996static int unuse_mm(struct mm_struct *mm,
 997				swp_entry_t entry, struct page *page)
 998{
 999	struct vm_area_struct *vma;
1000	int ret = 0;
 
1001
1002	if (!down_read_trylock(&mm->mmap_sem)) {
1003		/*
1004		 * Activate page so shrink_inactive_list is unlikely to unmap
1005		 * its ptes while lock is dropped, so swapoff can make progress.
1006		 */
1007		activate_page(page);
1008		unlock_page(page);
1009		down_read(&mm->mmap_sem);
1010		lock_page(page);
1011	}
1012	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1013		if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
1014			break;
1015	}
1016	up_read(&mm->mmap_sem);
1017	return (ret < 0)? ret: 0;
1018}
1019
1020/*
1021 * Scan swap_map from current position to next entry still in use.
1022 * Recycle to start on reaching the end, returning 0 when empty.
 
1023 */
1024static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1025					unsigned int prev)
1026{
1027	unsigned int max = si->max;
1028	unsigned int i = prev;
1029	unsigned char count;
1030
1031	/*
1032	 * No need for swap_lock here: we're just looking
1033	 * for whether an entry is in use, not modifying it; false
1034	 * hits are okay, and sys_swapoff() has already prevented new
1035	 * allocations from this area (while holding swap_lock).
1036	 */
1037	for (;;) {
1038		if (++i >= max) {
1039			if (!prev) {
1040				i = 0;
1041				break;
1042			}
1043			/*
1044			 * No entries in use at top of swap_map,
1045			 * loop back to start and recheck there.
1046			 */
1047			max = prev + 1;
1048			prev = 0;
1049			i = 1;
1050		}
1051		count = si->swap_map[i];
1052		if (count && swap_count(count) != SWAP_MAP_BAD)
1053			break;
 
 
1054	}
 
 
 
 
1055	return i;
1056}
1057
1058/*
1059 * We completely avoid races by reading each swap page in advance,
1060 * and then search for the process using it.  All the necessary
1061 * page table adjustments can then be made atomically.
1062 */
1063static int try_to_unuse(unsigned int type)
1064{
 
 
 
 
1065	struct swap_info_struct *si = swap_info[type];
1066	struct mm_struct *start_mm;
1067	unsigned char *swap_map;
1068	unsigned char swcount;
1069	struct page *page;
1070	swp_entry_t entry;
1071	unsigned int i = 0;
1072	int retval = 0;
1073
1074	/*
1075	 * When searching mms for an entry, a good strategy is to
1076	 * start at the first mm we freed the previous entry from
1077	 * (though actually we don't notice whether we or coincidence
1078	 * freed the entry).  Initialize this start_mm with a hold.
1079	 *
1080	 * A simpler strategy would be to start at the last mm we
1081	 * freed the previous entry from; but that would take less
1082	 * advantage of mmlist ordering, which clusters forked mms
1083	 * together, child after parent.  If we race with dup_mmap(), we
1084	 * prefer to resolve parent before child, lest we miss entries
1085	 * duplicated after we scanned child: using last mm would invert
1086	 * that.
1087	 */
1088	start_mm = &init_mm;
1089	atomic_inc(&init_mm.mm_users);
1090
1091	/*
1092	 * Keep on scanning until all entries have gone.  Usually,
1093	 * one pass through swap_map is enough, but not necessarily:
1094	 * there are races when an instance of an entry might be missed.
1095	 */
1096	while ((i = find_next_to_unuse(si, i)) != 0) {
1097		if (signal_pending(current)) {
1098			retval = -EINTR;
1099			break;
1100		}
1101
1102		/*
1103		 * Get a page for the entry, using the existing swap
1104		 * cache page if there is one.  Otherwise, get a clean
1105		 * page and read the swap into it.
1106		 */
1107		swap_map = &si->swap_map[i];
1108		entry = swp_entry(type, i);
1109		page = read_swap_cache_async(entry,
1110					GFP_HIGHUSER_MOVABLE, NULL, 0);
1111		if (!page) {
1112			/*
1113			 * Either swap_duplicate() failed because entry
1114			 * has been freed independently, and will not be
1115			 * reused since sys_swapoff() already disabled
1116			 * allocation from here, or alloc_page() failed.
1117			 */
1118			if (!*swap_map)
1119				continue;
1120			retval = -ENOMEM;
1121			break;
1122		}
1123
1124		/*
1125		 * Don't hold on to start_mm if it looks like exiting.
1126		 */
1127		if (atomic_read(&start_mm->mm_users) == 1) {
1128			mmput(start_mm);
1129			start_mm = &init_mm;
1130			atomic_inc(&init_mm.mm_users);
1131		}
1132
1133		/*
1134		 * Wait for and lock page.  When do_swap_page races with
1135		 * try_to_unuse, do_swap_page can handle the fault much
1136		 * faster than try_to_unuse can locate the entry.  This
1137		 * apparently redundant "wait_on_page_locked" lets try_to_unuse
1138		 * defer to do_swap_page in such a case - in some tests,
1139		 * do_swap_page and try_to_unuse repeatedly compete.
1140		 */
1141		wait_on_page_locked(page);
1142		wait_on_page_writeback(page);
1143		lock_page(page);
1144		wait_on_page_writeback(page);
1145
1146		/*
1147		 * Remove all references to entry.
1148		 */
1149		swcount = *swap_map;
1150		if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1151			retval = shmem_unuse(entry, page);
1152			/* page has already been unlocked and released */
1153			if (retval < 0)
1154				break;
1155			continue;
1156		}
1157		if (swap_count(swcount) && start_mm != &init_mm)
1158			retval = unuse_mm(start_mm, entry, page);
1159
1160		if (swap_count(*swap_map)) {
1161			int set_start_mm = (*swap_map >= swcount);
1162			struct list_head *p = &start_mm->mmlist;
1163			struct mm_struct *new_start_mm = start_mm;
1164			struct mm_struct *prev_mm = start_mm;
1165			struct mm_struct *mm;
1166
1167			atomic_inc(&new_start_mm->mm_users);
1168			atomic_inc(&prev_mm->mm_users);
1169			spin_lock(&mmlist_lock);
1170			while (swap_count(*swap_map) && !retval &&
1171					(p = p->next) != &start_mm->mmlist) {
1172				mm = list_entry(p, struct mm_struct, mmlist);
1173				if (!atomic_inc_not_zero(&mm->mm_users))
1174					continue;
1175				spin_unlock(&mmlist_lock);
1176				mmput(prev_mm);
1177				prev_mm = mm;
1178
1179				cond_resched();
1180
1181				swcount = *swap_map;
1182				if (!swap_count(swcount)) /* any usage ? */
1183					;
1184				else if (mm == &init_mm)
1185					set_start_mm = 1;
1186				else
1187					retval = unuse_mm(mm, entry, page);
1188
1189				if (set_start_mm && *swap_map < swcount) {
1190					mmput(new_start_mm);
1191					atomic_inc(&mm->mm_users);
1192					new_start_mm = mm;
1193					set_start_mm = 0;
1194				}
1195				spin_lock(&mmlist_lock);
1196			}
1197			spin_unlock(&mmlist_lock);
1198			mmput(prev_mm);
1199			mmput(start_mm);
1200			start_mm = new_start_mm;
1201		}
1202		if (retval) {
1203			unlock_page(page);
1204			page_cache_release(page);
1205			break;
1206		}
1207
1208		/*
1209		 * If a reference remains (rare), we would like to leave
1210		 * the page in the swap cache; but try_to_unmap could
1211		 * then re-duplicate the entry once we drop page lock,
1212		 * so we might loop indefinitely; also, that page could
1213		 * not be swapped out to other storage meanwhile.  So:
1214		 * delete from cache even if there's another reference,
1215		 * after ensuring that the data has been saved to disk -
1216		 * since if the reference remains (rarer), it will be
1217		 * read from disk into another page.  Splitting into two
1218		 * pages would be incorrect if swap supported "shared
1219		 * private" pages, but they are handled by tmpfs files.
1220		 *
1221		 * Given how unuse_vma() targets one particular offset
1222		 * in an anon_vma, once the anon_vma has been determined,
1223		 * this splitting happens to be just what is needed to
1224		 * handle where KSM pages have been swapped out: re-reading
1225		 * is unnecessarily slow, but we can fix that later on.
1226		 */
1227		if (swap_count(*swap_map) &&
1228		     PageDirty(page) && PageSwapCache(page)) {
1229			struct writeback_control wbc = {
1230				.sync_mode = WB_SYNC_NONE,
1231			};
1232
1233			swap_writepage(page, &wbc);
1234			lock_page(page);
1235			wait_on_page_writeback(page);
1236		}
1237
1238		/*
1239		 * It is conceivable that a racing task removed this page from
1240		 * swap cache just before we acquired the page lock at the top,
1241		 * or while we dropped it in unuse_mm().  The page might even
1242		 * be back in swap cache on another swap area: that we must not
1243		 * delete, since it may not have been written out to swap yet.
1244		 */
1245		if (PageSwapCache(page) &&
1246		    likely(page_private(page) == entry.val))
1247			delete_from_swap_cache(page);
1248
1249		/*
1250		 * So we could skip searching mms once swap count went
1251		 * to 1, we did not mark any present ptes as dirty: must
1252		 * mark page dirty so shrink_page_list will preserve it.
1253		 */
1254		SetPageDirty(page);
1255		unlock_page(page);
1256		page_cache_release(page);
1257
1258		/*
1259		 * Make sure that we aren't completely killing
1260		 * interactive performance.
1261		 */
1262		cond_resched();
 
 
 
 
 
 
1263	}
1264
1265	mmput(start_mm);
1266	return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267}
1268
1269/*
1270 * After a successful try_to_unuse, if no swap is now in use, we know
1271 * we can empty the mmlist.  swap_lock must be held on entry and exit.
1272 * Note that mmlist_lock nests inside swap_lock, and an mm must be
1273 * added to the mmlist just after page_duplicate - before would be racy.
1274 */
1275static void drain_mmlist(void)
1276{
1277	struct list_head *p, *next;
1278	unsigned int type;
1279
1280	for (type = 0; type < nr_swapfiles; type++)
1281		if (swap_info[type]->inuse_pages)
1282			return;
1283	spin_lock(&mmlist_lock);
1284	list_for_each_safe(p, next, &init_mm.mmlist)
1285		list_del_init(p);
1286	spin_unlock(&mmlist_lock);
1287}
1288
1289/*
1290 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1291 * corresponds to page offset for the specified swap entry.
1292 * Note that the type of this function is sector_t, but it returns page offset
1293 * into the bdev, not sector offset.
1294 */
1295static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1296{
1297	struct swap_info_struct *sis;
1298	struct swap_extent *start_se;
1299	struct swap_extent *se;
1300	pgoff_t offset;
1301
1302	sis = swap_info[swp_type(entry)];
1303	*bdev = sis->bdev;
1304
1305	offset = swp_offset(entry);
1306	start_se = sis->curr_swap_extent;
1307	se = start_se;
1308
1309	for ( ; ; ) {
1310		struct list_head *lh;
1311
1312		if (se->start_page <= offset &&
1313				offset < (se->start_page + se->nr_pages)) {
1314			return se->start_block + (offset - se->start_page);
1315		}
1316		lh = se->list.next;
1317		se = list_entry(lh, struct swap_extent, list);
1318		sis->curr_swap_extent = se;
1319		BUG_ON(se == start_se);		/* It *must* be present */
1320	}
1321}
1322
1323/*
1324 * Returns the page offset into bdev for the specified page's swap entry.
1325 */
1326sector_t map_swap_page(struct page *page, struct block_device **bdev)
1327{
1328	swp_entry_t entry;
1329	entry.val = page_private(page);
1330	return map_swap_entry(entry, bdev);
1331}
1332
1333/*
1334 * Free all of a swapdev's extent information
1335 */
1336static void destroy_swap_extents(struct swap_info_struct *sis)
1337{
1338	while (!list_empty(&sis->first_swap_extent.list)) {
1339		struct swap_extent *se;
 
1340
1341		se = list_entry(sis->first_swap_extent.list.next,
1342				struct swap_extent, list);
1343		list_del(&se->list);
1344		kfree(se);
1345	}
 
 
 
 
 
 
 
 
 
1346}
1347
1348/*
1349 * Add a block range (and the corresponding page range) into this swapdev's
1350 * extent list.  The extent list is kept sorted in page order.
1351 *
1352 * This function rather assumes that it is called in ascending page order.
1353 */
1354static int
1355add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1356		unsigned long nr_pages, sector_t start_block)
1357{
 
1358	struct swap_extent *se;
1359	struct swap_extent *new_se;
1360	struct list_head *lh;
1361
1362	if (start_page == 0) {
1363		se = &sis->first_swap_extent;
1364		sis->curr_swap_extent = se;
1365		se->start_page = 0;
1366		se->nr_pages = nr_pages;
1367		se->start_block = start_block;
1368		return 1;
1369	} else {
1370		lh = sis->first_swap_extent.list.prev;	/* Highest extent */
1371		se = list_entry(lh, struct swap_extent, list);
 
1372		BUG_ON(se->start_page + se->nr_pages != start_page);
1373		if (se->start_block + se->nr_pages == start_block) {
1374			/* Merge it */
1375			se->nr_pages += nr_pages;
1376			return 0;
1377		}
1378	}
1379
1380	/*
1381	 * No merge.  Insert a new extent, preserving ordering.
1382	 */
1383	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1384	if (new_se == NULL)
1385		return -ENOMEM;
1386	new_se->start_page = start_page;
1387	new_se->nr_pages = nr_pages;
1388	new_se->start_block = start_block;
1389
1390	list_add_tail(&new_se->list, &sis->first_swap_extent.list);
 
1391	return 1;
1392}
 
1393
1394/*
1395 * A `swap extent' is a simple thing which maps a contiguous range of pages
1396 * onto a contiguous range of disk blocks.  An ordered list of swap extents
1397 * is built at swapon time and is then used at swap_writepage/swap_readpage
1398 * time for locating where on disk a page belongs.
1399 *
1400 * If the swapfile is an S_ISBLK block device, a single extent is installed.
1401 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1402 * swap files identically.
1403 *
1404 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1405 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
1406 * swapfiles are handled *identically* after swapon time.
1407 *
1408 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1409 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
1410 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1411 * requirements, they are simply tossed out - we will never use those blocks
1412 * for swapping.
1413 *
1414 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This
1415 * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1416 * which will scribble on the fs.
1417 *
1418 * The amount of disk space which a single swap extent represents varies.
1419 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
1420 * extents in the list.  To avoid much list walking, we cache the previous
1421 * search location in `curr_swap_extent', and start new searches from there.
1422 * This is extremely effective.  The average number of iterations in
1423 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
1424 */
1425static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1426{
1427	struct inode *inode;
1428	unsigned blocks_per_page;
1429	unsigned long page_no;
1430	unsigned blkbits;
1431	sector_t probe_block;
1432	sector_t last_block;
1433	sector_t lowest_block = -1;
1434	sector_t highest_block = 0;
1435	int nr_extents = 0;
1436	int ret;
1437
1438	inode = sis->swap_file->f_mapping->host;
1439	if (S_ISBLK(inode->i_mode)) {
1440		ret = add_swap_extent(sis, 0, sis->max, 0);
1441		*span = sis->pages;
1442		goto out;
1443	}
1444
1445	blkbits = inode->i_blkbits;
1446	blocks_per_page = PAGE_SIZE >> blkbits;
 
 
 
 
 
 
 
 
 
 
1447
1448	/*
1449	 * Map all the blocks into the extent list.  This code doesn't try
1450	 * to be very smart.
1451	 */
1452	probe_block = 0;
1453	page_no = 0;
1454	last_block = i_size_read(inode) >> blkbits;
1455	while ((probe_block + blocks_per_page) <= last_block &&
1456			page_no < sis->max) {
1457		unsigned block_in_page;
1458		sector_t first_block;
1459
1460		first_block = bmap(inode, probe_block);
1461		if (first_block == 0)
1462			goto bad_bmap;
1463
1464		/*
1465		 * It must be PAGE_SIZE aligned on-disk
1466		 */
1467		if (first_block & (blocks_per_page - 1)) {
1468			probe_block++;
1469			goto reprobe;
1470		}
1471
1472		for (block_in_page = 1; block_in_page < blocks_per_page;
1473					block_in_page++) {
1474			sector_t block;
1475
1476			block = bmap(inode, probe_block + block_in_page);
1477			if (block == 0)
1478				goto bad_bmap;
1479			if (block != first_block + block_in_page) {
1480				/* Discontiguity */
1481				probe_block++;
1482				goto reprobe;
1483			}
1484		}
1485
1486		first_block >>= (PAGE_SHIFT - blkbits);
1487		if (page_no) {	/* exclude the header page */
1488			if (first_block < lowest_block)
1489				lowest_block = first_block;
1490			if (first_block > highest_block)
1491				highest_block = first_block;
1492		}
1493
1494		/*
1495		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
1496		 */
1497		ret = add_swap_extent(sis, page_no, 1, first_block);
1498		if (ret < 0)
1499			goto out;
1500		nr_extents += ret;
1501		page_no++;
1502		probe_block += blocks_per_page;
1503reprobe:
1504		continue;
1505	}
1506	ret = nr_extents;
1507	*span = 1 + highest_block - lowest_block;
1508	if (page_no == 0)
1509		page_no = 1;	/* force Empty message */
1510	sis->max = page_no;
1511	sis->pages = page_no - 1;
1512	sis->highest_bit = page_no - 1;
1513out:
1514	return ret;
1515bad_bmap:
1516	printk(KERN_ERR "swapon: swapfile has holes\n");
1517	ret = -EINVAL;
1518	goto out;
1519}
1520
1521static void enable_swap_info(struct swap_info_struct *p, int prio,
1522				unsigned char *swap_map)
 
1523{
1524	int i, prev;
1525
1526	spin_lock(&swap_lock);
1527	if (prio >= 0)
1528		p->prio = prio;
1529	else
1530		p->prio = --least_priority;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1531	p->swap_map = swap_map;
 
 
 
 
 
1532	p->flags |= SWP_WRITEOK;
1533	nr_swap_pages += p->pages;
1534	total_swap_pages += p->pages;
1535
1536	/* insert swap space into swap_list: */
1537	prev = -1;
1538	for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1539		if (p->prio >= swap_info[i]->prio)
1540			break;
1541		prev = i;
1542	}
1543	p->next = i;
1544	if (prev < 0)
1545		swap_list.head = swap_list.next = p->type;
1546	else
1547		swap_info[prev]->next = p->type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1548	spin_unlock(&swap_lock);
1549}
1550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1551SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1552{
1553	struct swap_info_struct *p = NULL;
1554	unsigned char *swap_map;
 
1555	struct file *swap_file, *victim;
1556	struct address_space *mapping;
1557	struct inode *inode;
1558	char *pathname;
1559	int oom_score_adj;
1560	int i, type, prev;
1561	int err;
1562
1563	if (!capable(CAP_SYS_ADMIN))
1564		return -EPERM;
1565
 
 
1566	pathname = getname(specialfile);
1567	err = PTR_ERR(pathname);
1568	if (IS_ERR(pathname))
1569		goto out;
1570
1571	victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0);
1572	putname(pathname);
1573	err = PTR_ERR(victim);
1574	if (IS_ERR(victim))
1575		goto out;
1576
1577	mapping = victim->f_mapping;
1578	prev = -1;
1579	spin_lock(&swap_lock);
1580	for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
1581		p = swap_info[type];
1582		if (p->flags & SWP_WRITEOK) {
1583			if (p->swap_file->f_mapping == mapping)
 
1584				break;
 
1585		}
1586		prev = type;
1587	}
1588	if (type < 0) {
1589		err = -EINVAL;
1590		spin_unlock(&swap_lock);
1591		goto out_dput;
1592	}
1593	if (!security_vm_enough_memory(p->pages))
1594		vm_unacct_memory(p->pages);
1595	else {
1596		err = -ENOMEM;
1597		spin_unlock(&swap_lock);
1598		goto out_dput;
1599	}
1600	if (prev < 0)
1601		swap_list.head = p->next;
1602	else
1603		swap_info[prev]->next = p->next;
1604	if (type == swap_list.next) {
1605		/* just pick something that's safe... */
1606		swap_list.next = swap_list.head;
1607	}
1608	if (p->prio < 0) {
1609		for (i = p->next; i >= 0; i = swap_info[i]->next)
1610			swap_info[i]->prio = p->prio--;
 
 
 
 
 
 
 
 
 
1611		least_priority++;
1612	}
1613	nr_swap_pages -= p->pages;
 
1614	total_swap_pages -= p->pages;
1615	p->flags &= ~SWP_WRITEOK;
 
1616	spin_unlock(&swap_lock);
1617
1618	oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
1619	err = try_to_unuse(type);
1620	test_set_oom_score_adj(oom_score_adj);
 
 
1621
1622	if (err) {
1623		/*
1624		 * reading p->prio and p->swap_map outside the lock is
1625		 * safe here because only sys_swapon and sys_swapoff
1626		 * change them, and there can be no other sys_swapon or
1627		 * sys_swapoff for this swap_info_struct at this point.
1628		 */
1629		/* re-insert swap space back into swap_list */
1630		enable_swap_info(p, p->prio, p->swap_map);
 
1631		goto out_dput;
1632	}
1633
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1634	destroy_swap_extents(p);
1635	if (p->flags & SWP_CONTINUED)
1636		free_swap_count_continuations(p);
1637
 
 
 
1638	mutex_lock(&swapon_mutex);
1639	spin_lock(&swap_lock);
 
1640	drain_mmlist();
1641
1642	/* wait for anyone still in scan_swap_map */
1643	p->highest_bit = 0;		/* cuts scans short */
1644	while (p->flags >= SWP_SCANNING) {
 
1645		spin_unlock(&swap_lock);
1646		schedule_timeout_uninterruptible(1);
1647		spin_lock(&swap_lock);
 
1648	}
1649
1650	swap_file = p->swap_file;
 
1651	p->swap_file = NULL;
1652	p->max = 0;
1653	swap_map = p->swap_map;
1654	p->swap_map = NULL;
1655	p->flags = 0;
 
 
1656	spin_unlock(&swap_lock);
 
 
1657	mutex_unlock(&swapon_mutex);
 
 
 
 
1658	vfree(swap_map);
1659	/* Destroy swap account informatin */
1660	swap_cgroup_swapoff(type);
 
 
1661
1662	inode = mapping->host;
1663	if (S_ISBLK(inode->i_mode)) {
1664		struct block_device *bdev = I_BDEV(inode);
1665		set_blocksize(bdev, p->old_block_size);
1666		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1667	} else {
1668		mutex_lock(&inode->i_mutex);
1669		inode->i_flags &= ~S_SWAPFILE;
1670		mutex_unlock(&inode->i_mutex);
1671	}
 
 
 
 
1672	filp_close(swap_file, NULL);
 
 
 
 
 
 
 
 
 
 
1673	err = 0;
1674	atomic_inc(&proc_poll_event);
1675	wake_up_interruptible(&proc_poll_wait);
1676
1677out_dput:
1678	filp_close(victim, NULL);
1679out:
 
1680	return err;
1681}
1682
1683#ifdef CONFIG_PROC_FS
1684static unsigned swaps_poll(struct file *file, poll_table *wait)
1685{
1686	struct seq_file *seq = file->private_data;
1687
1688	poll_wait(file, &proc_poll_wait, wait);
1689
1690	if (seq->poll_event != atomic_read(&proc_poll_event)) {
1691		seq->poll_event = atomic_read(&proc_poll_event);
1692		return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
1693	}
1694
1695	return POLLIN | POLLRDNORM;
1696}
1697
1698/* iterator */
1699static void *swap_start(struct seq_file *swap, loff_t *pos)
1700{
1701	struct swap_info_struct *si;
1702	int type;
1703	loff_t l = *pos;
1704
1705	mutex_lock(&swapon_mutex);
1706
1707	if (!l)
1708		return SEQ_START_TOKEN;
1709
1710	for (type = 0; type < nr_swapfiles; type++) {
1711		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
1712		si = swap_info[type];
1713		if (!(si->flags & SWP_USED) || !si->swap_map)
1714			continue;
1715		if (!--l)
1716			return si;
1717	}
1718
1719	return NULL;
1720}
1721
1722static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1723{
1724	struct swap_info_struct *si = v;
1725	int type;
1726
1727	if (v == SEQ_START_TOKEN)
1728		type = 0;
1729	else
1730		type = si->type + 1;
1731
1732	for (; type < nr_swapfiles; type++) {
1733		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
1734		si = swap_info[type];
1735		if (!(si->flags & SWP_USED) || !si->swap_map)
1736			continue;
1737		++*pos;
1738		return si;
1739	}
1740
1741	return NULL;
1742}
1743
1744static void swap_stop(struct seq_file *swap, void *v)
1745{
1746	mutex_unlock(&swapon_mutex);
1747}
1748
1749static int swap_show(struct seq_file *swap, void *v)
1750{
1751	struct swap_info_struct *si = v;
1752	struct file *file;
1753	int len;
 
1754
1755	if (si == SEQ_START_TOKEN) {
1756		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
1757		return 0;
1758	}
1759
 
 
 
1760	file = si->swap_file;
1761	len = seq_path(swap, &file->f_path, " \t\n\\");
1762	seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
1763			len < 40 ? 40 - len : 1, " ",
1764			S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
1765				"partition" : "file\t",
1766			si->pages << (PAGE_SHIFT - 10),
1767			si->inuse_pages << (PAGE_SHIFT - 10),
1768			si->prio);
1769	return 0;
1770}
1771
1772static const struct seq_operations swaps_op = {
1773	.start =	swap_start,
1774	.next =		swap_next,
1775	.stop =		swap_stop,
1776	.show =		swap_show
1777};
1778
1779static int swaps_open(struct inode *inode, struct file *file)
1780{
1781	struct seq_file *seq;
1782	int ret;
1783
1784	ret = seq_open(file, &swaps_op);
1785	if (ret)
1786		return ret;
1787
1788	seq = file->private_data;
1789	seq->poll_event = atomic_read(&proc_poll_event);
1790	return 0;
1791}
1792
1793static const struct file_operations proc_swaps_operations = {
1794	.open		= swaps_open,
1795	.read		= seq_read,
1796	.llseek		= seq_lseek,
1797	.release	= seq_release,
1798	.poll		= swaps_poll,
 
1799};
1800
1801static int __init procswaps_init(void)
1802{
1803	proc_create("swaps", 0, NULL, &proc_swaps_operations);
1804	return 0;
1805}
1806__initcall(procswaps_init);
1807#endif /* CONFIG_PROC_FS */
1808
1809#ifdef MAX_SWAPFILES_CHECK
1810static int __init max_swapfiles_check(void)
1811{
1812	MAX_SWAPFILES_CHECK();
1813	return 0;
1814}
1815late_initcall(max_swapfiles_check);
1816#endif
1817
1818static struct swap_info_struct *alloc_swap_info(void)
1819{
1820	struct swap_info_struct *p;
 
1821	unsigned int type;
 
1822
1823	p = kzalloc(sizeof(*p), GFP_KERNEL);
1824	if (!p)
1825		return ERR_PTR(-ENOMEM);
1826
 
 
 
 
 
 
1827	spin_lock(&swap_lock);
1828	for (type = 0; type < nr_swapfiles; type++) {
1829		if (!(swap_info[type]->flags & SWP_USED))
1830			break;
1831	}
1832	if (type >= MAX_SWAPFILES) {
1833		spin_unlock(&swap_lock);
1834		kfree(p);
 
1835		return ERR_PTR(-EPERM);
1836	}
1837	if (type >= nr_swapfiles) {
1838		p->type = type;
1839		swap_info[type] = p;
1840		/*
1841		 * Write swap_info[type] before nr_swapfiles, in case a
1842		 * racing procfs swap_start() or swap_next() is reading them.
1843		 * (We never shrink nr_swapfiles, we never free this entry.)
1844		 */
1845		smp_wmb();
1846		nr_swapfiles++;
1847	} else {
1848		kfree(p);
1849		p = swap_info[type];
1850		/*
1851		 * Do not memset this entry: a racing procfs swap_next()
1852		 * would be relying on p->type to remain valid.
1853		 */
1854	}
1855	INIT_LIST_HEAD(&p->first_swap_extent.list);
 
 
 
1856	p->flags = SWP_USED;
1857	p->next = -1;
1858	spin_unlock(&swap_lock);
 
 
 
 
 
 
 
1859
1860	return p;
1861}
1862
1863static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
1864{
1865	int error;
1866
1867	if (S_ISBLK(inode->i_mode)) {
1868		p->bdev = bdgrab(I_BDEV(inode));
1869		error = blkdev_get(p->bdev,
1870				   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1871				   sys_swapon);
1872		if (error < 0) {
1873			p->bdev = NULL;
1874			return -EINVAL;
1875		}
 
1876		p->old_block_size = block_size(p->bdev);
1877		error = set_blocksize(p->bdev, PAGE_SIZE);
1878		if (error < 0)
1879			return error;
 
 
 
 
 
 
 
1880		p->flags |= SWP_BLKDEV;
1881	} else if (S_ISREG(inode->i_mode)) {
1882		p->bdev = inode->i_sb->s_bdev;
1883		mutex_lock(&inode->i_mutex);
1884		if (IS_SWAPFILE(inode))
1885			return -EBUSY;
1886	} else
1887		return -EINVAL;
1888
1889	return 0;
1890}
1891
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1892static unsigned long read_swap_header(struct swap_info_struct *p,
1893					union swap_header *swap_header,
1894					struct inode *inode)
1895{
1896	int i;
1897	unsigned long maxpages;
1898	unsigned long swapfilepages;
 
1899
1900	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
1901		printk(KERN_ERR "Unable to find swap-space signature\n");
1902		return 0;
1903	}
1904
1905	/* swap partition endianess hack... */
1906	if (swab32(swap_header->info.version) == 1) {
1907		swab32s(&swap_header->info.version);
1908		swab32s(&swap_header->info.last_page);
1909		swab32s(&swap_header->info.nr_badpages);
 
 
1910		for (i = 0; i < swap_header->info.nr_badpages; i++)
1911			swab32s(&swap_header->info.badpages[i]);
1912	}
1913	/* Check the swap header's sub-version */
1914	if (swap_header->info.version != 1) {
1915		printk(KERN_WARNING
1916		       "Unable to handle swap header version %d\n",
1917		       swap_header->info.version);
1918		return 0;
1919	}
1920
1921	p->lowest_bit  = 1;
1922	p->cluster_next = 1;
1923	p->cluster_nr = 0;
1924
1925	/*
1926	 * Find out how many pages are allowed for a single swap
1927	 * device. There are three limiting factors: 1) the number
1928	 * of bits for the swap offset in the swp_entry_t type, and
1929	 * 2) the number of bits in the swap pte as defined by the
1930	 * the different architectures, and 3) the number of free bits
1931	 * in an exceptional radix_tree entry. In order to find the
1932	 * largest possible bit mask, a swap entry with swap type 0
1933	 * and swap offset ~0UL is created, encoded to a swap pte,
1934	 * decoded to a swp_entry_t again, and finally the swap
1935	 * offset is extracted. This will mask all the bits from
1936	 * the initial ~0UL mask that can't be encoded in either
1937	 * the swp_entry_t or the architecture definition of a
1938	 * swap pte.  Then the same is done for a radix_tree entry.
1939	 */
1940	maxpages = swp_offset(pte_to_swp_entry(
1941			swp_entry_to_pte(swp_entry(0, ~0UL))));
1942	maxpages = swp_offset(radix_to_swp_entry(
1943			swp_to_radix_entry(swp_entry(0, maxpages)))) + 1;
1944
1945	if (maxpages > swap_header->info.last_page) {
1946		maxpages = swap_header->info.last_page + 1;
1947		/* p->max is an unsigned int: don't overflow it */
1948		if ((unsigned int)maxpages == 0)
1949			maxpages = UINT_MAX;
1950	}
1951	p->highest_bit = maxpages - 1;
1952
1953	if (!maxpages)
1954		return 0;
1955	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
1956	if (swapfilepages && maxpages > swapfilepages) {
1957		printk(KERN_WARNING
1958		       "Swap area shorter than signature indicates\n");
1959		return 0;
1960	}
1961	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
1962		return 0;
1963	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1964		return 0;
1965
1966	return maxpages;
1967}
1968
 
 
 
 
 
 
 
1969static int setup_swap_map_and_extents(struct swap_info_struct *p,
1970					union swap_header *swap_header,
1971					unsigned char *swap_map,
 
1972					unsigned long maxpages,
1973					sector_t *span)
1974{
1975	int i;
1976	unsigned int nr_good_pages;
1977	int nr_extents;
 
 
 
1978
1979	nr_good_pages = maxpages - 1;	/* omit header page */
1980
 
 
 
1981	for (i = 0; i < swap_header->info.nr_badpages; i++) {
1982		unsigned int page_nr = swap_header->info.badpages[i];
1983		if (page_nr == 0 || page_nr > swap_header->info.last_page)
1984			return -EINVAL;
1985		if (page_nr < maxpages) {
1986			swap_map[page_nr] = SWAP_MAP_BAD;
1987			nr_good_pages--;
 
 
 
 
 
1988		}
1989	}
1990
 
 
 
 
1991	if (nr_good_pages) {
1992		swap_map[0] = SWAP_MAP_BAD;
 
 
 
 
 
1993		p->max = maxpages;
1994		p->pages = nr_good_pages;
1995		nr_extents = setup_swap_extents(p, span);
1996		if (nr_extents < 0)
1997			return nr_extents;
1998		nr_good_pages = p->pages;
1999	}
2000	if (!nr_good_pages) {
2001		printk(KERN_WARNING "Empty swap-file\n");
2002		return -EINVAL;
2003	}
2004
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2005	return nr_extents;
2006}
2007
2008SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2009{
2010	struct swap_info_struct *p;
2011	char *name;
2012	struct file *swap_file = NULL;
2013	struct address_space *mapping;
2014	int i;
2015	int prio;
2016	int error;
2017	union swap_header *swap_header;
2018	int nr_extents;
2019	sector_t span;
2020	unsigned long maxpages;
2021	unsigned char *swap_map = NULL;
 
2022	struct page *page = NULL;
2023	struct inode *inode = NULL;
 
 
 
 
2024
2025	if (!capable(CAP_SYS_ADMIN))
2026		return -EPERM;
2027
 
 
 
2028	p = alloc_swap_info();
2029	if (IS_ERR(p))
2030		return PTR_ERR(p);
2031
 
 
2032	name = getname(specialfile);
2033	if (IS_ERR(name)) {
2034		error = PTR_ERR(name);
2035		name = NULL;
2036		goto bad_swap;
2037	}
2038	swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0);
2039	if (IS_ERR(swap_file)) {
2040		error = PTR_ERR(swap_file);
2041		swap_file = NULL;
2042		goto bad_swap;
2043	}
2044
2045	p->swap_file = swap_file;
2046	mapping = swap_file->f_mapping;
2047
2048	for (i = 0; i < nr_swapfiles; i++) {
2049		struct swap_info_struct *q = swap_info[i];
2050
2051		if (q == p || !q->swap_file)
2052			continue;
2053		if (mapping == q->swap_file->f_mapping) {
2054			error = -EBUSY;
2055			goto bad_swap;
2056		}
2057	}
2058
2059	inode = mapping->host;
2060	/* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
2061	error = claim_swapfile(p, inode);
2062	if (unlikely(error))
2063		goto bad_swap;
2064
 
 
 
 
 
 
 
 
 
 
2065	/*
2066	 * Read the swap header.
2067	 */
2068	if (!mapping->a_ops->readpage) {
2069		error = -EINVAL;
2070		goto bad_swap;
2071	}
2072	page = read_mapping_page(mapping, 0, swap_file);
2073	if (IS_ERR(page)) {
2074		error = PTR_ERR(page);
2075		goto bad_swap;
2076	}
2077	swap_header = kmap(page);
2078
2079	maxpages = read_swap_header(p, swap_header, inode);
2080	if (unlikely(!maxpages)) {
2081		error = -EINVAL;
2082		goto bad_swap;
2083	}
2084
2085	/* OK, set up the swap map and apply the bad block list */
2086	swap_map = vzalloc(maxpages);
2087	if (!swap_map) {
2088		error = -ENOMEM;
2089		goto bad_swap;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2090	}
2091
2092	error = swap_cgroup_swapon(p->type, maxpages);
2093	if (error)
2094		goto bad_swap;
2095
2096	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2097		maxpages, &span);
2098	if (unlikely(nr_extents < 0)) {
2099		error = nr_extents;
2100		goto bad_swap;
2101	}
2102
2103	if (p->bdev) {
2104		if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2105			p->flags |= SWP_SOLIDSTATE;
2106			p->cluster_next = 1 + (random32() % p->highest_bit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2107		}
2108		if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
2109			p->flags |= SWP_DISCARDABLE;
 
 
 
 
 
 
 
 
 
 
 
 
 
2110	}
2111
2112	mutex_lock(&swapon_mutex);
2113	prio = -1;
2114	if (swap_flags & SWAP_FLAG_PREFER)
2115		prio =
2116		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2117	enable_swap_info(p, prio, swap_map);
2118
2119	printk(KERN_INFO "Adding %uk swap on %s.  "
2120			"Priority:%d extents:%d across:%lluk %s%s\n",
2121		p->pages<<(PAGE_SHIFT-10), name, p->prio,
2122		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2123		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2124		(p->flags & SWP_DISCARDABLE) ? "D" : "");
 
 
2125
2126	mutex_unlock(&swapon_mutex);
2127	atomic_inc(&proc_poll_event);
2128	wake_up_interruptible(&proc_poll_wait);
2129
2130	if (S_ISREG(inode->i_mode))
2131		inode->i_flags |= S_SWAPFILE;
2132	error = 0;
2133	goto out;
 
 
 
 
2134bad_swap:
2135	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
 
 
 
 
2136		set_blocksize(p->bdev, p->old_block_size);
2137		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 
2138	}
 
2139	destroy_swap_extents(p);
2140	swap_cgroup_swapoff(p->type);
2141	spin_lock(&swap_lock);
2142	p->swap_file = NULL;
2143	p->flags = 0;
2144	spin_unlock(&swap_lock);
2145	vfree(swap_map);
2146	if (swap_file) {
2147		if (inode && S_ISREG(inode->i_mode)) {
2148			mutex_unlock(&inode->i_mutex);
2149			inode = NULL;
2150		}
2151		filp_close(swap_file, NULL);
2152	}
2153out:
2154	if (page && !IS_ERR(page)) {
2155		kunmap(page);
2156		page_cache_release(page);
2157	}
2158	if (name)
2159		putname(name);
2160	if (inode && S_ISREG(inode->i_mode))
2161		mutex_unlock(&inode->i_mutex);
 
 
2162	return error;
2163}
2164
2165void si_swapinfo(struct sysinfo *val)
2166{
2167	unsigned int type;
2168	unsigned long nr_to_be_unused = 0;
2169
2170	spin_lock(&swap_lock);
2171	for (type = 0; type < nr_swapfiles; type++) {
2172		struct swap_info_struct *si = swap_info[type];
2173
2174		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2175			nr_to_be_unused += si->inuse_pages;
2176	}
2177	val->freeswap = nr_swap_pages + nr_to_be_unused;
2178	val->totalswap = total_swap_pages + nr_to_be_unused;
2179	spin_unlock(&swap_lock);
2180}
2181
2182/*
2183 * Verify that a swap entry is valid and increment its swap map count.
2184 *
2185 * Returns error code in following case.
2186 * - success -> 0
2187 * - swp_entry is invalid -> EINVAL
2188 * - swp_entry is migration entry -> EINVAL
2189 * - swap-cache reference is requested but there is already one. -> EEXIST
2190 * - swap-cache reference is requested but the entry is not used. -> ENOENT
2191 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2192 */
2193static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2194{
2195	struct swap_info_struct *p;
2196	unsigned long offset, type;
 
2197	unsigned char count;
2198	unsigned char has_cache;
2199	int err = -EINVAL;
2200
2201	if (non_swap_entry(entry))
2202		goto out;
2203
2204	type = swp_type(entry);
2205	if (type >= nr_swapfiles)
2206		goto bad_file;
2207	p = swap_info[type];
2208	offset = swp_offset(entry);
 
2209
2210	spin_lock(&swap_lock);
2211	if (unlikely(offset >= p->max))
 
 
 
 
 
 
2212		goto unlock_out;
 
2213
2214	count = p->swap_map[offset];
2215	has_cache = count & SWAP_HAS_CACHE;
2216	count &= ~SWAP_HAS_CACHE;
2217	err = 0;
2218
2219	if (usage == SWAP_HAS_CACHE) {
2220
2221		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
2222		if (!has_cache && count)
2223			has_cache = SWAP_HAS_CACHE;
2224		else if (has_cache)		/* someone else added cache */
2225			err = -EEXIST;
2226		else				/* no users remaining */
2227			err = -ENOENT;
2228
2229	} else if (count || has_cache) {
2230
2231		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2232			count += usage;
2233		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2234			err = -EINVAL;
2235		else if (swap_count_continued(p, offset, count))
2236			count = COUNT_CONTINUED;
2237		else
2238			err = -ENOMEM;
2239	} else
2240		err = -ENOENT;			/* unused swap entry */
2241
2242	p->swap_map[offset] = count | has_cache;
2243
2244unlock_out:
2245	spin_unlock(&swap_lock);
2246out:
2247	return err;
2248
2249bad_file:
2250	printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
2251	goto out;
2252}
2253
2254/*
2255 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2256 * (in which case its reference count is never incremented).
2257 */
2258void swap_shmem_alloc(swp_entry_t entry)
2259{
2260	__swap_duplicate(entry, SWAP_MAP_SHMEM);
2261}
2262
2263/*
2264 * Increase reference count of swap entry by 1.
2265 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
2266 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
2267 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
2268 * might occur if a page table entry has got corrupted.
2269 */
2270int swap_duplicate(swp_entry_t entry)
2271{
2272	int err = 0;
2273
2274	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2275		err = add_swap_count_continuation(entry, GFP_ATOMIC);
2276	return err;
2277}
2278
2279/*
2280 * @entry: swap entry for which we allocate swap cache.
2281 *
2282 * Called when allocating swap cache for existing swap entry,
2283 * This can return error codes. Returns 0 at success.
2284 * -EBUSY means there is a swap cache.
2285 * Note: return code is different from swap_duplicate().
2286 */
2287int swapcache_prepare(swp_entry_t entry)
2288{
2289	return __swap_duplicate(entry, SWAP_HAS_CACHE);
2290}
2291
2292/*
2293 * swap_lock prevents swap_map being freed. Don't grab an extra
2294 * reference on the swaphandle, it doesn't matter if it becomes unused.
2295 */
2296int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
2297{
2298	struct swap_info_struct *si;
2299	int our_page_cluster = page_cluster;
2300	pgoff_t target, toff;
2301	pgoff_t base, end;
2302	int nr_pages = 0;
2303
2304	if (!our_page_cluster)	/* no readahead */
2305		return 0;
2306
2307	si = swap_info[swp_type(entry)];
2308	target = swp_offset(entry);
2309	base = (target >> our_page_cluster) << our_page_cluster;
2310	end = base + (1 << our_page_cluster);
2311	if (!base)		/* first page is swap header */
2312		base++;
2313
2314	spin_lock(&swap_lock);
2315	if (end > si->max)	/* don't go beyond end of map */
2316		end = si->max;
 
2317
2318	/* Count contiguous allocated slots above our target */
2319	for (toff = target; ++toff < end; nr_pages++) {
2320		/* Don't read in free or bad pages */
2321		if (!si->swap_map[toff])
2322			break;
2323		if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2324			break;
2325	}
2326	/* Count contiguous allocated slots below our target */
2327	for (toff = target; --toff >= base; nr_pages++) {
2328		/* Don't read in free or bad pages */
2329		if (!si->swap_map[toff])
2330			break;
2331		if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2332			break;
2333	}
2334	spin_unlock(&swap_lock);
2335
2336	/*
2337	 * Indicate starting offset, and return number of pages to get:
2338	 * if only 1, say 0, since there's then no readahead to be done.
2339	 */
2340	*offset = ++toff;
2341	return nr_pages? ++nr_pages: 0;
2342}
 
2343
2344/*
2345 * add_swap_count_continuation - called when a swap count is duplicated
2346 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2347 * page of the original vmalloc'ed swap_map, to hold the continuation count
2348 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
2349 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2350 *
2351 * These continuation pages are seldom referenced: the common paths all work
2352 * on the original swap_map, only referring to a continuation page when the
2353 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2354 *
2355 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2356 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2357 * can be called after dropping locks.
2358 */
2359int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2360{
2361	struct swap_info_struct *si;
 
2362	struct page *head;
2363	struct page *page;
2364	struct page *list_page;
2365	pgoff_t offset;
2366	unsigned char count;
 
2367
2368	/*
2369	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
2370	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
2371	 */
2372	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2373
2374	si = swap_info_get(entry);
2375	if (!si) {
2376		/*
2377		 * An acceptable race has occurred since the failing
2378		 * __swap_duplicate(): the swap entry has been freed,
2379		 * perhaps even the whole swap_map cleared for swapoff.
2380		 */
2381		goto outer;
2382	}
 
2383
2384	offset = swp_offset(entry);
2385	count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
 
 
 
2386
2387	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2388		/*
2389		 * The higher the swap count, the more likely it is that tasks
2390		 * will race to add swap count continuation: we need to avoid
2391		 * over-provisioning.
2392		 */
2393		goto out;
2394	}
2395
2396	if (!page) {
2397		spin_unlock(&swap_lock);
2398		return -ENOMEM;
2399	}
2400
2401	/*
2402	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2403	 * no architecture is using highmem pages for kernel pagetables: so it
2404	 * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
2405	 */
2406	head = vmalloc_to_page(si->swap_map + offset);
2407	offset &= ~PAGE_MASK;
2408
 
2409	/*
2410	 * Page allocation does not initialize the page's lru field,
2411	 * but it does always reset its private field.
2412	 */
2413	if (!page_private(head)) {
2414		BUG_ON(count & COUNT_CONTINUED);
2415		INIT_LIST_HEAD(&head->lru);
2416		set_page_private(head, SWP_CONTINUED);
2417		si->flags |= SWP_CONTINUED;
2418	}
2419
2420	list_for_each_entry(list_page, &head->lru, lru) {
2421		unsigned char *map;
2422
2423		/*
2424		 * If the previous map said no continuation, but we've found
2425		 * a continuation page, free our allocation and use this one.
2426		 */
2427		if (!(count & COUNT_CONTINUED))
2428			goto out;
2429
2430		map = kmap_atomic(list_page, KM_USER0) + offset;
2431		count = *map;
2432		kunmap_atomic(map, KM_USER0);
2433
2434		/*
2435		 * If this continuation count now has some space in it,
2436		 * free our allocation and use this one.
2437		 */
2438		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2439			goto out;
2440	}
2441
2442	list_add_tail(&page->lru, &head->lru);
2443	page = NULL;			/* now it's attached, don't free it */
 
 
2444out:
2445	spin_unlock(&swap_lock);
 
 
2446outer:
2447	if (page)
2448		__free_page(page);
2449	return 0;
2450}
2451
2452/*
2453 * swap_count_continued - when the original swap_map count is incremented
2454 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2455 * into, carry if so, or else fail until a new continuation page is allocated;
2456 * when the original swap_map count is decremented from 0 with continuation,
2457 * borrow from the continuation and report whether it still holds more.
2458 * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
 
2459 */
2460static bool swap_count_continued(struct swap_info_struct *si,
2461				 pgoff_t offset, unsigned char count)
2462{
2463	struct page *head;
2464	struct page *page;
2465	unsigned char *map;
 
2466
2467	head = vmalloc_to_page(si->swap_map + offset);
2468	if (page_private(head) != SWP_CONTINUED) {
2469		BUG_ON(count & COUNT_CONTINUED);
2470		return false;		/* need to add count continuation */
2471	}
2472
 
2473	offset &= ~PAGE_MASK;
2474	page = list_entry(head->lru.next, struct page, lru);
2475	map = kmap_atomic(page, KM_USER0) + offset;
2476
2477	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
2478		goto init_map;		/* jump over SWAP_CONT_MAX checks */
2479
2480	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2481		/*
2482		 * Think of how you add 1 to 999
2483		 */
2484		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2485			kunmap_atomic(map, KM_USER0);
2486			page = list_entry(page->lru.next, struct page, lru);
2487			BUG_ON(page == head);
2488			map = kmap_atomic(page, KM_USER0) + offset;
2489		}
2490		if (*map == SWAP_CONT_MAX) {
2491			kunmap_atomic(map, KM_USER0);
2492			page = list_entry(page->lru.next, struct page, lru);
2493			if (page == head)
2494				return false;	/* add count continuation */
2495			map = kmap_atomic(page, KM_USER0) + offset;
 
 
2496init_map:		*map = 0;		/* we didn't zero the page */
2497		}
2498		*map += 1;
2499		kunmap_atomic(map, KM_USER0);
2500		page = list_entry(page->lru.prev, struct page, lru);
2501		while (page != head) {
2502			map = kmap_atomic(page, KM_USER0) + offset;
2503			*map = COUNT_CONTINUED;
2504			kunmap_atomic(map, KM_USER0);
2505			page = list_entry(page->lru.prev, struct page, lru);
2506		}
2507		return true;			/* incremented */
2508
2509	} else {				/* decrementing */
2510		/*
2511		 * Think of how you subtract 1 from 1000
2512		 */
2513		BUG_ON(count != COUNT_CONTINUED);
2514		while (*map == COUNT_CONTINUED) {
2515			kunmap_atomic(map, KM_USER0);
2516			page = list_entry(page->lru.next, struct page, lru);
2517			BUG_ON(page == head);
2518			map = kmap_atomic(page, KM_USER0) + offset;
2519		}
2520		BUG_ON(*map == 0);
2521		*map -= 1;
2522		if (*map == 0)
2523			count = 0;
2524		kunmap_atomic(map, KM_USER0);
2525		page = list_entry(page->lru.prev, struct page, lru);
2526		while (page != head) {
2527			map = kmap_atomic(page, KM_USER0) + offset;
2528			*map = SWAP_CONT_MAX | count;
2529			count = COUNT_CONTINUED;
2530			kunmap_atomic(map, KM_USER0);
2531			page = list_entry(page->lru.prev, struct page, lru);
2532		}
2533		return count == COUNT_CONTINUED;
2534	}
 
 
 
2535}
2536
2537/*
2538 * free_swap_count_continuations - swapoff free all the continuation pages
2539 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2540 */
2541static void free_swap_count_continuations(struct swap_info_struct *si)
2542{
2543	pgoff_t offset;
2544
2545	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2546		struct page *head;
2547		head = vmalloc_to_page(si->swap_map + offset);
2548		if (page_private(head)) {
2549			struct list_head *this, *next;
2550			list_for_each_safe(this, next, &head->lru) {
2551				struct page *page;
2552				page = list_entry(this, struct page, lru);
2553				list_del(this);
2554				__free_page(page);
2555			}
2556		}
2557	}
2558}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/swapfile.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *  Swap reorganised 29.12.95, Stephen Tweedie
   7 */
   8
   9#include <linux/blkdev.h>
  10#include <linux/mm.h>
  11#include <linux/sched/mm.h>
  12#include <linux/sched/task.h>
  13#include <linux/hugetlb.h>
  14#include <linux/mman.h>
  15#include <linux/slab.h>
  16#include <linux/kernel_stat.h>
  17#include <linux/swap.h>
  18#include <linux/vmalloc.h>
  19#include <linux/pagemap.h>
  20#include <linux/namei.h>
  21#include <linux/shmem_fs.h>
  22#include <linux/blk-cgroup.h>
  23#include <linux/random.h>
  24#include <linux/writeback.h>
  25#include <linux/proc_fs.h>
  26#include <linux/seq_file.h>
  27#include <linux/init.h>
 
  28#include <linux/ksm.h>
  29#include <linux/rmap.h>
  30#include <linux/security.h>
  31#include <linux/backing-dev.h>
  32#include <linux/mutex.h>
  33#include <linux/capability.h>
  34#include <linux/syscalls.h>
  35#include <linux/memcontrol.h>
  36#include <linux/poll.h>
  37#include <linux/oom.h>
  38#include <linux/swapfile.h>
  39#include <linux/export.h>
  40#include <linux/swap_slots.h>
  41#include <linux/sort.h>
  42#include <linux/completion.h>
  43#include <linux/suspend.h>
  44#include <linux/zswap.h>
  45#include <linux/plist.h>
  46
 
  47#include <asm/tlbflush.h>
  48#include <linux/swapops.h>
  49#include <linux/swap_cgroup.h>
  50#include "internal.h"
  51#include "swap.h"
  52
  53static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  54				 unsigned char);
  55static void free_swap_count_continuations(struct swap_info_struct *);
 
  56
  57static DEFINE_SPINLOCK(swap_lock);
  58static unsigned int nr_swapfiles;
  59atomic_long_t nr_swap_pages;
  60/*
  61 * Some modules use swappable objects and may try to swap them out under
  62 * memory pressure (via the shrinker). Before doing so, they may wish to
  63 * check to see if any swap space is available.
  64 */
  65EXPORT_SYMBOL_GPL(nr_swap_pages);
  66/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  67long total_swap_pages;
  68static int least_priority = -1;
  69unsigned long swapfile_maximum_size;
  70#ifdef CONFIG_MIGRATION
  71bool swap_migration_ad_supported;
  72#endif	/* CONFIG_MIGRATION */
  73
  74static const char Bad_file[] = "Bad swap file entry ";
  75static const char Unused_file[] = "Unused swap file entry ";
  76static const char Bad_offset[] = "Bad swap offset entry ";
  77static const char Unused_offset[] = "Unused swap offset entry ";
  78
  79/*
  80 * all active swap_info_structs
  81 * protected with swap_lock, and ordered by priority.
  82 */
  83static PLIST_HEAD(swap_active_head);
  84
  85/*
  86 * all available (active, not full) swap_info_structs
  87 * protected with swap_avail_lock, ordered by priority.
  88 * This is used by folio_alloc_swap() instead of swap_active_head
  89 * because swap_active_head includes all swap_info_structs,
  90 * but folio_alloc_swap() doesn't need to look at full ones.
  91 * This uses its own lock instead of swap_lock because when a
  92 * swap_info_struct changes between not-full/full, it needs to
  93 * add/remove itself to/from this list, but the swap_info_struct->lock
  94 * is held and the locking order requires swap_lock to be taken
  95 * before any swap_info_struct->lock.
  96 */
  97static struct plist_head *swap_avail_heads;
  98static DEFINE_SPINLOCK(swap_avail_lock);
  99
 100static struct swap_info_struct *swap_info[MAX_SWAPFILES];
 101
 102static DEFINE_MUTEX(swapon_mutex);
 103
 104static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
 105/* Activity counter to indicate that a swapon or swapoff has occurred */
 106static atomic_t proc_poll_event = ATOMIC_INIT(0);
 107
 108atomic_t nr_rotate_swap = ATOMIC_INIT(0);
 109
 110static struct swap_info_struct *swap_type_to_swap_info(int type)
 111{
 112	if (type >= MAX_SWAPFILES)
 113		return NULL;
 114
 115	return READ_ONCE(swap_info[type]); /* rcu_dereference() */
 116}
 117
 118static inline unsigned char swap_count(unsigned char ent)
 119{
 120	return ent & ~SWAP_HAS_CACHE;	/* may include COUNT_CONTINUED flag */
 121}
 122
 123/* Reclaim the swap entry anyway if possible */
 124#define TTRS_ANYWAY		0x1
 125/*
 126 * Reclaim the swap entry if there are no more mappings of the
 127 * corresponding page
 128 */
 129#define TTRS_UNMAPPED		0x2
 130/* Reclaim the swap entry if swap is getting full*/
 131#define TTRS_FULL		0x4
 132
 133/* returns 1 if swap entry is freed */
 134static int __try_to_reclaim_swap(struct swap_info_struct *si,
 135				 unsigned long offset, unsigned long flags)
 136{
 137	swp_entry_t entry = swp_entry(si->type, offset);
 138	struct folio *folio;
 139	int ret = 0;
 140
 141	folio = filemap_get_folio(swap_address_space(entry), offset);
 142	if (IS_ERR(folio))
 143		return 0;
 144	/*
 145	 * When this function is called from scan_swap_map_slots() and it's
 146	 * called by vmscan.c at reclaiming folios. So we hold a folio lock
 147	 * here. We have to use trylock for avoiding deadlock. This is a special
 148	 * case and you should use folio_free_swap() with explicit folio_lock()
 149	 * in usual operations.
 150	 */
 151	if (folio_trylock(folio)) {
 152		if ((flags & TTRS_ANYWAY) ||
 153		    ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
 154		    ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)))
 155			ret = folio_free_swap(folio);
 156		folio_unlock(folio);
 157	}
 158	folio_put(folio);
 159	return ret;
 160}
 161
 162static inline struct swap_extent *first_se(struct swap_info_struct *sis)
 163{
 164	struct rb_node *rb = rb_first(&sis->swap_extent_root);
 165	return rb_entry(rb, struct swap_extent, rb_node);
 166}
 167
 168static inline struct swap_extent *next_se(struct swap_extent *se)
 169{
 170	struct rb_node *rb = rb_next(&se->rb_node);
 171	return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
 172}
 173
 174/*
 175 * swapon tell device that all the old swap contents can be discarded,
 176 * to allow the swap device to optimize its wear-levelling.
 177 */
 178static int discard_swap(struct swap_info_struct *si)
 179{
 180	struct swap_extent *se;
 181	sector_t start_block;
 182	sector_t nr_blocks;
 183	int err = 0;
 184
 185	/* Do not discard the swap header page! */
 186	se = first_se(si);
 187	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 188	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 189	if (nr_blocks) {
 190		err = blkdev_issue_discard(si->bdev, start_block,
 191				nr_blocks, GFP_KERNEL);
 192		if (err)
 193			return err;
 194		cond_resched();
 195	}
 196
 197	for (se = next_se(se); se; se = next_se(se)) {
 198		start_block = se->start_block << (PAGE_SHIFT - 9);
 199		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 200
 201		err = blkdev_issue_discard(si->bdev, start_block,
 202				nr_blocks, GFP_KERNEL);
 203		if (err)
 204			break;
 205
 206		cond_resched();
 207	}
 208	return err;		/* That will often be -EOPNOTSUPP */
 209}
 210
 211static struct swap_extent *
 212offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
 213{
 214	struct swap_extent *se;
 215	struct rb_node *rb;
 216
 217	rb = sis->swap_extent_root.rb_node;
 218	while (rb) {
 219		se = rb_entry(rb, struct swap_extent, rb_node);
 220		if (offset < se->start_page)
 221			rb = rb->rb_left;
 222		else if (offset >= se->start_page + se->nr_pages)
 223			rb = rb->rb_right;
 224		else
 225			return se;
 226	}
 227	/* It *must* be present */
 228	BUG();
 229}
 230
 231sector_t swap_folio_sector(struct folio *folio)
 232{
 233	struct swap_info_struct *sis = swp_swap_info(folio->swap);
 234	struct swap_extent *se;
 235	sector_t sector;
 236	pgoff_t offset;
 237
 238	offset = swp_offset(folio->swap);
 239	se = offset_to_swap_extent(sis, offset);
 240	sector = se->start_block + (offset - se->start_page);
 241	return sector << (PAGE_SHIFT - 9);
 242}
 243
 244/*
 245 * swap allocation tell device that a cluster of swap can now be discarded,
 246 * to allow the swap device to optimize its wear-levelling.
 247 */
 248static void discard_swap_cluster(struct swap_info_struct *si,
 249				 pgoff_t start_page, pgoff_t nr_pages)
 250{
 251	struct swap_extent *se = offset_to_swap_extent(si, start_page);
 
 252
 253	while (nr_pages) {
 254		pgoff_t offset = start_page - se->start_page;
 255		sector_t start_block = se->start_block + offset;
 256		sector_t nr_blocks = se->nr_pages - offset;
 257
 258		if (nr_blocks > nr_pages)
 259			nr_blocks = nr_pages;
 260		start_page += nr_blocks;
 261		nr_pages -= nr_blocks;
 262
 263		start_block <<= PAGE_SHIFT - 9;
 264		nr_blocks <<= PAGE_SHIFT - 9;
 265		if (blkdev_issue_discard(si->bdev, start_block,
 266					nr_blocks, GFP_NOIO))
 267			break;
 268
 269		se = next_se(se);
 270	}
 271}
 272
 273#ifdef CONFIG_THP_SWAP
 274#define SWAPFILE_CLUSTER	HPAGE_PMD_NR
 275
 276#define swap_entry_size(size)	(size)
 277#else
 278#define SWAPFILE_CLUSTER	256
 279
 280/*
 281 * Define swap_entry_size() as constant to let compiler to optimize
 282 * out some code if !CONFIG_THP_SWAP
 283 */
 284#define swap_entry_size(size)	1
 285#endif
 286#define LATENCY_LIMIT		256
 287
 288static inline void cluster_set_flag(struct swap_cluster_info *info,
 289	unsigned int flag)
 290{
 291	info->flags = flag;
 292}
 293
 294static inline unsigned int cluster_count(struct swap_cluster_info *info)
 295{
 296	return info->data;
 297}
 298
 299static inline void cluster_set_count(struct swap_cluster_info *info,
 300				     unsigned int c)
 301{
 302	info->data = c;
 303}
 304
 305static inline void cluster_set_count_flag(struct swap_cluster_info *info,
 306					 unsigned int c, unsigned int f)
 307{
 308	info->flags = f;
 309	info->data = c;
 310}
 311
 312static inline unsigned int cluster_next(struct swap_cluster_info *info)
 313{
 314	return info->data;
 315}
 316
 317static inline void cluster_set_next(struct swap_cluster_info *info,
 318				    unsigned int n)
 319{
 320	info->data = n;
 321}
 322
 323static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 324					 unsigned int n, unsigned int f)
 325{
 326	info->flags = f;
 327	info->data = n;
 328}
 329
 330static inline bool cluster_is_free(struct swap_cluster_info *info)
 331{
 332	return info->flags & CLUSTER_FLAG_FREE;
 333}
 334
 335static inline bool cluster_is_null(struct swap_cluster_info *info)
 336{
 337	return info->flags & CLUSTER_FLAG_NEXT_NULL;
 338}
 339
 340static inline void cluster_set_null(struct swap_cluster_info *info)
 341{
 342	info->flags = CLUSTER_FLAG_NEXT_NULL;
 343	info->data = 0;
 344}
 345
 346static inline bool cluster_is_huge(struct swap_cluster_info *info)
 347{
 348	if (IS_ENABLED(CONFIG_THP_SWAP))
 349		return info->flags & CLUSTER_FLAG_HUGE;
 350	return false;
 351}
 352
 353static inline void cluster_clear_huge(struct swap_cluster_info *info)
 354{
 355	info->flags &= ~CLUSTER_FLAG_HUGE;
 356}
 357
 358static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
 359						     unsigned long offset)
 360{
 361	struct swap_cluster_info *ci;
 362
 363	ci = si->cluster_info;
 364	if (ci) {
 365		ci += offset / SWAPFILE_CLUSTER;
 366		spin_lock(&ci->lock);
 367	}
 368	return ci;
 369}
 370
 371static inline void unlock_cluster(struct swap_cluster_info *ci)
 372{
 373	if (ci)
 374		spin_unlock(&ci->lock);
 375}
 376
 377/*
 378 * Determine the locking method in use for this device.  Return
 379 * swap_cluster_info if SSD-style cluster-based locking is in place.
 380 */
 381static inline struct swap_cluster_info *lock_cluster_or_swap_info(
 382		struct swap_info_struct *si, unsigned long offset)
 383{
 384	struct swap_cluster_info *ci;
 385
 386	/* Try to use fine-grained SSD-style locking if available: */
 387	ci = lock_cluster(si, offset);
 388	/* Otherwise, fall back to traditional, coarse locking: */
 389	if (!ci)
 390		spin_lock(&si->lock);
 391
 392	return ci;
 393}
 394
 395static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
 396					       struct swap_cluster_info *ci)
 397{
 398	if (ci)
 399		unlock_cluster(ci);
 400	else
 401		spin_unlock(&si->lock);
 402}
 403
 404static inline bool cluster_list_empty(struct swap_cluster_list *list)
 405{
 406	return cluster_is_null(&list->head);
 407}
 408
 409static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
 410{
 411	return cluster_next(&list->head);
 412}
 413
 414static void cluster_list_init(struct swap_cluster_list *list)
 415{
 416	cluster_set_null(&list->head);
 417	cluster_set_null(&list->tail);
 418}
 419
 420static void cluster_list_add_tail(struct swap_cluster_list *list,
 421				  struct swap_cluster_info *ci,
 422				  unsigned int idx)
 423{
 424	if (cluster_list_empty(list)) {
 425		cluster_set_next_flag(&list->head, idx, 0);
 426		cluster_set_next_flag(&list->tail, idx, 0);
 427	} else {
 428		struct swap_cluster_info *ci_tail;
 429		unsigned int tail = cluster_next(&list->tail);
 430
 431		/*
 432		 * Nested cluster lock, but both cluster locks are
 433		 * only acquired when we held swap_info_struct->lock
 434		 */
 435		ci_tail = ci + tail;
 436		spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
 437		cluster_set_next(ci_tail, idx);
 438		spin_unlock(&ci_tail->lock);
 439		cluster_set_next_flag(&list->tail, idx, 0);
 440	}
 441}
 442
 443static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
 444					   struct swap_cluster_info *ci)
 445{
 446	unsigned int idx;
 447
 448	idx = cluster_next(&list->head);
 449	if (cluster_next(&list->tail) == idx) {
 450		cluster_set_null(&list->head);
 451		cluster_set_null(&list->tail);
 452	} else
 453		cluster_set_next_flag(&list->head,
 454				      cluster_next(&ci[idx]), 0);
 455
 456	return idx;
 457}
 458
 459/* Add a cluster to discard list and schedule it to do discard */
 460static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 461		unsigned int idx)
 462{
 463	/*
 464	 * If scan_swap_map_slots() can't find a free cluster, it will check
 465	 * si->swap_map directly. To make sure the discarding cluster isn't
 466	 * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
 467	 * It will be cleared after discard
 468	 */
 469	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 470			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 471
 472	cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
 473
 474	schedule_work(&si->discard_work);
 475}
 476
 477static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
 478{
 479	struct swap_cluster_info *ci = si->cluster_info;
 480
 481	cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
 482	cluster_list_add_tail(&si->free_clusters, ci, idx);
 483}
 484
 485/*
 486 * Doing discard actually. After a cluster discard is finished, the cluster
 487 * will be added to free cluster list. caller should hold si->lock.
 488*/
 489static void swap_do_scheduled_discard(struct swap_info_struct *si)
 490{
 491	struct swap_cluster_info *info, *ci;
 492	unsigned int idx;
 493
 494	info = si->cluster_info;
 495
 496	while (!cluster_list_empty(&si->discard_clusters)) {
 497		idx = cluster_list_del_first(&si->discard_clusters, info);
 498		spin_unlock(&si->lock);
 499
 500		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 501				SWAPFILE_CLUSTER);
 502
 503		spin_lock(&si->lock);
 504		ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
 505		__free_cluster(si, idx);
 506		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 507				0, SWAPFILE_CLUSTER);
 508		unlock_cluster(ci);
 509	}
 510}
 511
 512static void swap_discard_work(struct work_struct *work)
 513{
 514	struct swap_info_struct *si;
 515
 516	si = container_of(work, struct swap_info_struct, discard_work);
 517
 518	spin_lock(&si->lock);
 519	swap_do_scheduled_discard(si);
 520	spin_unlock(&si->lock);
 521}
 522
 523static void swap_users_ref_free(struct percpu_ref *ref)
 524{
 525	struct swap_info_struct *si;
 526
 527	si = container_of(ref, struct swap_info_struct, users);
 528	complete(&si->comp);
 529}
 530
 531static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
 532{
 533	struct swap_cluster_info *ci = si->cluster_info;
 534
 535	VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
 536	cluster_list_del_first(&si->free_clusters, ci);
 537	cluster_set_count_flag(ci + idx, 0, 0);
 538}
 539
 540static void free_cluster(struct swap_info_struct *si, unsigned long idx)
 541{
 542	struct swap_cluster_info *ci = si->cluster_info + idx;
 543
 544	VM_BUG_ON(cluster_count(ci) != 0);
 545	/*
 546	 * If the swap is discardable, prepare discard the cluster
 547	 * instead of free it immediately. The cluster will be freed
 548	 * after discard.
 549	 */
 550	if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 551	    (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 552		swap_cluster_schedule_discard(si, idx);
 553		return;
 554	}
 555
 556	__free_cluster(si, idx);
 557}
 558
 559/*
 560 * The cluster corresponding to page_nr will be used. The cluster will be
 561 * removed from free cluster list and its usage counter will be increased.
 562 */
 563static void inc_cluster_info_page(struct swap_info_struct *p,
 564	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 565{
 566	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 567
 568	if (!cluster_info)
 569		return;
 570	if (cluster_is_free(&cluster_info[idx]))
 571		alloc_cluster(p, idx);
 572
 573	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
 574	cluster_set_count(&cluster_info[idx],
 575		cluster_count(&cluster_info[idx]) + 1);
 576}
 577
 578/*
 579 * The cluster corresponding to page_nr decreases one usage. If the usage
 580 * counter becomes 0, which means no page in the cluster is in using, we can
 581 * optionally discard the cluster and add it to free cluster list.
 582 */
 583static void dec_cluster_info_page(struct swap_info_struct *p,
 584	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 585{
 586	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 587
 588	if (!cluster_info)
 589		return;
 590
 591	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
 592	cluster_set_count(&cluster_info[idx],
 593		cluster_count(&cluster_info[idx]) - 1);
 594
 595	if (cluster_count(&cluster_info[idx]) == 0)
 596		free_cluster(p, idx);
 597}
 598
 599/*
 600 * It's possible scan_swap_map_slots() uses a free cluster in the middle of free
 601 * cluster list. Avoiding such abuse to avoid list corruption.
 602 */
 603static bool
 604scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
 605	unsigned long offset)
 606{
 607	struct percpu_cluster *percpu_cluster;
 608	bool conflict;
 609
 610	offset /= SWAPFILE_CLUSTER;
 611	conflict = !cluster_list_empty(&si->free_clusters) &&
 612		offset != cluster_list_first(&si->free_clusters) &&
 613		cluster_is_free(&si->cluster_info[offset]);
 614
 615	if (!conflict)
 616		return false;
 617
 618	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
 619	cluster_set_null(&percpu_cluster->index);
 620	return true;
 621}
 622
 623/*
 624 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
 625 * might involve allocating a new cluster for current CPU too.
 626 */
 627static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
 628	unsigned long *offset, unsigned long *scan_base)
 629{
 630	struct percpu_cluster *cluster;
 631	struct swap_cluster_info *ci;
 632	unsigned long tmp, max;
 633
 634new_cluster:
 635	cluster = this_cpu_ptr(si->percpu_cluster);
 636	if (cluster_is_null(&cluster->index)) {
 637		if (!cluster_list_empty(&si->free_clusters)) {
 638			cluster->index = si->free_clusters.head;
 639			cluster->next = cluster_next(&cluster->index) *
 640					SWAPFILE_CLUSTER;
 641		} else if (!cluster_list_empty(&si->discard_clusters)) {
 642			/*
 643			 * we don't have free cluster but have some clusters in
 644			 * discarding, do discard now and reclaim them, then
 645			 * reread cluster_next_cpu since we dropped si->lock
 646			 */
 647			swap_do_scheduled_discard(si);
 648			*scan_base = this_cpu_read(*si->cluster_next_cpu);
 649			*offset = *scan_base;
 650			goto new_cluster;
 651		} else
 652			return false;
 653	}
 654
 655	/*
 656	 * Other CPUs can use our cluster if they can't find a free cluster,
 657	 * check if there is still free entry in the cluster
 658	 */
 659	tmp = cluster->next;
 660	max = min_t(unsigned long, si->max,
 661		    (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
 662	if (tmp < max) {
 663		ci = lock_cluster(si, tmp);
 664		while (tmp < max) {
 665			if (!si->swap_map[tmp])
 666				break;
 667			tmp++;
 668		}
 669		unlock_cluster(ci);
 670	}
 671	if (tmp >= max) {
 672		cluster_set_null(&cluster->index);
 673		goto new_cluster;
 674	}
 675	cluster->next = tmp + 1;
 676	*offset = tmp;
 677	*scan_base = tmp;
 678	return true;
 679}
 680
 681static void __del_from_avail_list(struct swap_info_struct *p)
 682{
 683	int nid;
 684
 685	assert_spin_locked(&p->lock);
 686	for_each_node(nid)
 687		plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
 688}
 689
 690static void del_from_avail_list(struct swap_info_struct *p)
 691{
 692	spin_lock(&swap_avail_lock);
 693	__del_from_avail_list(p);
 694	spin_unlock(&swap_avail_lock);
 695}
 696
 697static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
 698			     unsigned int nr_entries)
 699{
 700	unsigned int end = offset + nr_entries - 1;
 701
 702	if (offset == si->lowest_bit)
 703		si->lowest_bit += nr_entries;
 704	if (end == si->highest_bit)
 705		WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
 706	WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries);
 707	if (si->inuse_pages == si->pages) {
 708		si->lowest_bit = si->max;
 709		si->highest_bit = 0;
 710		del_from_avail_list(si);
 711	}
 712}
 713
 714static void add_to_avail_list(struct swap_info_struct *p)
 715{
 716	int nid;
 717
 718	spin_lock(&swap_avail_lock);
 719	for_each_node(nid)
 720		plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
 721	spin_unlock(&swap_avail_lock);
 722}
 723
 724static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
 725			    unsigned int nr_entries)
 726{
 727	unsigned long begin = offset;
 728	unsigned long end = offset + nr_entries - 1;
 729	void (*swap_slot_free_notify)(struct block_device *, unsigned long);
 730
 731	if (offset < si->lowest_bit)
 732		si->lowest_bit = offset;
 733	if (end > si->highest_bit) {
 734		bool was_full = !si->highest_bit;
 735
 736		WRITE_ONCE(si->highest_bit, end);
 737		if (was_full && (si->flags & SWP_WRITEOK))
 738			add_to_avail_list(si);
 739	}
 740	atomic_long_add(nr_entries, &nr_swap_pages);
 741	WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
 742	if (si->flags & SWP_BLKDEV)
 743		swap_slot_free_notify =
 744			si->bdev->bd_disk->fops->swap_slot_free_notify;
 745	else
 746		swap_slot_free_notify = NULL;
 747	while (offset <= end) {
 748		arch_swap_invalidate_page(si->type, offset);
 749		zswap_invalidate(si->type, offset);
 750		if (swap_slot_free_notify)
 751			swap_slot_free_notify(si->bdev, offset);
 752		offset++;
 753	}
 754	clear_shadow_from_swap_cache(si->type, begin, end);
 755}
 756
 757static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
 758{
 759	unsigned long prev;
 760
 761	if (!(si->flags & SWP_SOLIDSTATE)) {
 762		si->cluster_next = next;
 763		return;
 764	}
 765
 766	prev = this_cpu_read(*si->cluster_next_cpu);
 767	/*
 768	 * Cross the swap address space size aligned trunk, choose
 769	 * another trunk randomly to avoid lock contention on swap
 770	 * address space if possible.
 771	 */
 772	if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) !=
 773	    (next >> SWAP_ADDRESS_SPACE_SHIFT)) {
 774		/* No free swap slots available */
 775		if (si->highest_bit <= si->lowest_bit)
 776			return;
 777		next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit);
 778		next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
 779		next = max_t(unsigned int, next, si->lowest_bit);
 780	}
 781	this_cpu_write(*si->cluster_next_cpu, next);
 782}
 783
 784static bool swap_offset_available_and_locked(struct swap_info_struct *si,
 785					     unsigned long offset)
 786{
 787	if (data_race(!si->swap_map[offset])) {
 788		spin_lock(&si->lock);
 789		return true;
 790	}
 791
 792	if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
 793		spin_lock(&si->lock);
 794		return true;
 795	}
 796
 797	return false;
 798}
 799
 800static int scan_swap_map_slots(struct swap_info_struct *si,
 801			       unsigned char usage, int nr,
 802			       swp_entry_t slots[])
 803{
 804	struct swap_cluster_info *ci;
 805	unsigned long offset;
 806	unsigned long scan_base;
 807	unsigned long last_in_cluster = 0;
 808	int latency_ration = LATENCY_LIMIT;
 809	int n_ret = 0;
 810	bool scanned_many = false;
 811
 812	/*
 813	 * We try to cluster swap pages by allocating them sequentially
 814	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 815	 * way, however, we resort to first-free allocation, starting
 816	 * a new cluster.  This prevents us from scattering swap pages
 817	 * all over the entire swap partition, so that we reduce
 818	 * overall disk seek times between swap pages.  -- sct
 819	 * But we do now try to find an empty cluster.  -Andrea
 820	 * And we let swap pages go all over an SSD partition.  Hugh
 821	 */
 822
 823	si->flags += SWP_SCANNING;
 824	/*
 825	 * Use percpu scan base for SSD to reduce lock contention on
 826	 * cluster and swap cache.  For HDD, sequential access is more
 827	 * important.
 828	 */
 829	if (si->flags & SWP_SOLIDSTATE)
 830		scan_base = this_cpu_read(*si->cluster_next_cpu);
 831	else
 832		scan_base = si->cluster_next;
 833	offset = scan_base;
 834
 835	/* SSD algorithm */
 836	if (si->cluster_info) {
 837		if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 838			goto scan;
 839	} else if (unlikely(!si->cluster_nr--)) {
 840		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 841			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 842			goto checks;
 843		}
 844
 845		spin_unlock(&si->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 846
 847		/*
 848		 * If seek is expensive, start searching for new cluster from
 849		 * start of partition, to minimize the span of allocated swap.
 850		 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
 851		 * case, just handled by scan_swap_map_try_ssd_cluster() above.
 
 
 852		 */
 853		scan_base = offset = si->lowest_bit;
 
 854		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 855
 856		/* Locate the first empty (unaligned) cluster */
 857		for (; last_in_cluster <= si->highest_bit; offset++) {
 858			if (si->swap_map[offset])
 859				last_in_cluster = offset + SWAPFILE_CLUSTER;
 860			else if (offset == last_in_cluster) {
 861				spin_lock(&si->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862				offset -= SWAPFILE_CLUSTER - 1;
 863				si->cluster_next = offset;
 864				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 
 865				goto checks;
 866			}
 867			if (unlikely(--latency_ration < 0)) {
 868				cond_resched();
 869				latency_ration = LATENCY_LIMIT;
 870			}
 871		}
 872
 873		offset = scan_base;
 874		spin_lock(&si->lock);
 875		si->cluster_nr = SWAPFILE_CLUSTER - 1;
 
 876	}
 877
 878checks:
 879	if (si->cluster_info) {
 880		while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
 881		/* take a break if we already got some slots */
 882			if (n_ret)
 883				goto done;
 884			if (!scan_swap_map_try_ssd_cluster(si, &offset,
 885							&scan_base))
 886				goto scan;
 887		}
 888	}
 889	if (!(si->flags & SWP_WRITEOK))
 890		goto no_page;
 891	if (!si->highest_bit)
 892		goto no_page;
 893	if (offset > si->highest_bit)
 894		scan_base = offset = si->lowest_bit;
 895
 896	ci = lock_cluster(si, offset);
 897	/* reuse swap entry of cache-only swap if not busy. */
 898	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 899		int swap_was_freed;
 900		unlock_cluster(ci);
 901		spin_unlock(&si->lock);
 902		swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
 903		spin_lock(&si->lock);
 904		/* entry was freed successfully, try to use this again */
 905		if (swap_was_freed)
 906			goto checks;
 907		goto scan; /* check next one */
 908	}
 909
 910	if (si->swap_map[offset]) {
 911		unlock_cluster(ci);
 912		if (!n_ret)
 913			goto scan;
 914		else
 915			goto done;
 916	}
 917	WRITE_ONCE(si->swap_map[offset], usage);
 918	inc_cluster_info_page(si, si->cluster_info, offset);
 919	unlock_cluster(ci);
 920
 921	swap_range_alloc(si, offset, 1);
 922	slots[n_ret++] = swp_entry(si->type, offset);
 923
 924	/* got enough slots or reach max slots? */
 925	if ((n_ret == nr) || (offset >= si->highest_bit))
 926		goto done;
 927
 928	/* search for next available slot */
 929
 930	/* time to take a break? */
 931	if (unlikely(--latency_ration < 0)) {
 932		if (n_ret)
 933			goto done;
 934		spin_unlock(&si->lock);
 935		cond_resched();
 936		spin_lock(&si->lock);
 937		latency_ration = LATENCY_LIMIT;
 938	}
 
 
 
 939
 940	/* try to get more slots in cluster */
 941	if (si->cluster_info) {
 942		if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 943			goto checks;
 944	} else if (si->cluster_nr && !si->swap_map[++offset]) {
 945		/* non-ssd case, still more slots in cluster? */
 946		--si->cluster_nr;
 947		goto checks;
 948	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 949
 950	/*
 951	 * Even if there's no free clusters available (fragmented),
 952	 * try to scan a little more quickly with lock held unless we
 953	 * have scanned too many slots already.
 954	 */
 955	if (!scanned_many) {
 956		unsigned long scan_limit;
 957
 958		if (offset < scan_base)
 959			scan_limit = scan_base;
 960		else
 961			scan_limit = si->highest_bit;
 962		for (; offset <= scan_limit && --latency_ration > 0;
 963		     offset++) {
 964			if (!si->swap_map[offset])
 965				goto checks;
 
 
 
 
 
 
 
 
 
 
 
 
 
 966		}
 967	}
 968
 969done:
 970	set_cluster_next(si, offset + 1);
 971	si->flags -= SWP_SCANNING;
 972	return n_ret;
 973
 974scan:
 975	spin_unlock(&si->lock);
 976	while (++offset <= READ_ONCE(si->highest_bit)) {
 
 
 
 
 
 
 
 
 977		if (unlikely(--latency_ration < 0)) {
 978			cond_resched();
 979			latency_ration = LATENCY_LIMIT;
 980			scanned_many = true;
 981		}
 982		if (swap_offset_available_and_locked(si, offset))
 983			goto checks;
 984	}
 985	offset = si->lowest_bit;
 986	while (offset < scan_base) {
 
 
 
 
 
 
 
 
 987		if (unlikely(--latency_ration < 0)) {
 988			cond_resched();
 989			latency_ration = LATENCY_LIMIT;
 990			scanned_many = true;
 991		}
 992		if (swap_offset_available_and_locked(si, offset))
 993			goto checks;
 994		offset++;
 995	}
 996	spin_lock(&si->lock);
 997
 998no_page:
 999	si->flags -= SWP_SCANNING;
1000	return n_ret;
1001}
1002
1003static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
1004{
1005	unsigned long idx;
1006	struct swap_cluster_info *ci;
1007	unsigned long offset;
 
1008
1009	/*
1010	 * Should not even be attempting cluster allocations when huge
1011	 * page swap is disabled.  Warn and fail the allocation.
1012	 */
1013	if (!IS_ENABLED(CONFIG_THP_SWAP)) {
1014		VM_WARN_ON_ONCE(1);
1015		return 0;
1016	}
1017
1018	if (cluster_list_empty(&si->free_clusters))
1019		return 0;
 
 
 
 
 
 
1020
1021	idx = cluster_list_first(&si->free_clusters);
1022	offset = idx * SWAPFILE_CLUSTER;
1023	ci = lock_cluster(si, offset);
1024	alloc_cluster(si, idx);
1025	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
1026
1027	memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
1028	unlock_cluster(ci);
1029	swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
1030	*slot = swp_entry(si->type, offset);
1031
1032	return 1;
1033}
 
 
 
 
 
 
 
1034
1035static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
1036{
1037	unsigned long offset = idx * SWAPFILE_CLUSTER;
1038	struct swap_cluster_info *ci;
1039
1040	ci = lock_cluster(si, offset);
1041	memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
1042	cluster_set_count_flag(ci, 0, 0);
1043	free_cluster(si, idx);
1044	unlock_cluster(ci);
1045	swap_range_free(si, offset, SWAPFILE_CLUSTER);
1046}
1047
1048int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
 
1049{
1050	unsigned long size = swap_entry_size(entry_size);
1051	struct swap_info_struct *si, *next;
1052	long avail_pgs;
1053	int n_ret = 0;
1054	int node;
1055
1056	/* Only single cluster request supported */
1057	WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
1058
1059	spin_lock(&swap_avail_lock);
1060
1061	avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1062	if (avail_pgs <= 0) {
1063		spin_unlock(&swap_avail_lock);
1064		goto noswap;
 
 
1065	}
1066
1067	n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
1068
1069	atomic_long_sub(n_goal * size, &nr_swap_pages);
1070
1071start_over:
1072	node = numa_node_id();
1073	plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1074		/* requeue si to after same-priority siblings */
1075		plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1076		spin_unlock(&swap_avail_lock);
1077		spin_lock(&si->lock);
1078		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1079			spin_lock(&swap_avail_lock);
1080			if (plist_node_empty(&si->avail_lists[node])) {
1081				spin_unlock(&si->lock);
1082				goto nextsi;
1083			}
1084			WARN(!si->highest_bit,
1085			     "swap_info %d in list but !highest_bit\n",
1086			     si->type);
1087			WARN(!(si->flags & SWP_WRITEOK),
1088			     "swap_info %d in list but !SWP_WRITEOK\n",
1089			     si->type);
1090			__del_from_avail_list(si);
1091			spin_unlock(&si->lock);
1092			goto nextsi;
1093		}
1094		if (size == SWAPFILE_CLUSTER) {
1095			if (si->flags & SWP_BLKDEV)
1096				n_ret = swap_alloc_cluster(si, swp_entries);
1097		} else
1098			n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1099						    n_goal, swp_entries);
1100		spin_unlock(&si->lock);
1101		if (n_ret || size == SWAPFILE_CLUSTER)
1102			goto check_out;
1103		cond_resched();
1104
1105		spin_lock(&swap_avail_lock);
1106nextsi:
1107		/*
1108		 * if we got here, it's likely that si was almost full before,
1109		 * and since scan_swap_map_slots() can drop the si->lock,
1110		 * multiple callers probably all tried to get a page from the
1111		 * same si and it filled up before we could get one; or, the si
1112		 * filled up between us dropping swap_avail_lock and taking
1113		 * si->lock. Since we dropped the swap_avail_lock, the
1114		 * swap_avail_head list may have been modified; so if next is
1115		 * still in the swap_avail_head list then try it, otherwise
1116		 * start over if we have not gotten any slots.
1117		 */
1118		if (plist_node_empty(&next->avail_lists[node]))
1119			goto start_over;
1120	}
1121
1122	spin_unlock(&swap_avail_lock);
1123
1124check_out:
1125	if (n_ret < n_goal)
1126		atomic_long_add((long)(n_goal - n_ret) * size,
1127				&nr_swap_pages);
1128noswap:
1129	return n_ret;
1130}
1131
1132static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1133{
1134	struct swap_info_struct *p;
1135	unsigned long offset;
1136
1137	if (!entry.val)
1138		goto out;
1139	p = swp_swap_info(entry);
1140	if (!p)
1141		goto bad_nofile;
1142	if (data_race(!(p->flags & SWP_USED)))
 
1143		goto bad_device;
1144	offset = swp_offset(entry);
1145	if (offset >= p->max)
1146		goto bad_offset;
1147	if (data_race(!p->swap_map[swp_offset(entry)]))
1148		goto bad_free;
 
1149	return p;
1150
1151bad_free:
1152	pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1153	goto out;
1154bad_offset:
1155	pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1156	goto out;
1157bad_device:
1158	pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1159	goto out;
1160bad_nofile:
1161	pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1162out:
1163	return NULL;
1164}
1165
1166static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1167					struct swap_info_struct *q)
1168{
1169	struct swap_info_struct *p;
1170
1171	p = _swap_info_get(entry);
1172
1173	if (p != q) {
1174		if (q != NULL)
1175			spin_unlock(&q->lock);
1176		if (p != NULL)
1177			spin_lock(&p->lock);
1178	}
1179	return p;
1180}
1181
1182static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
1183					      unsigned long offset,
1184					      unsigned char usage)
1185{
 
1186	unsigned char count;
1187	unsigned char has_cache;
1188
1189	count = p->swap_map[offset];
1190
1191	has_cache = count & SWAP_HAS_CACHE;
1192	count &= ~SWAP_HAS_CACHE;
1193
1194	if (usage == SWAP_HAS_CACHE) {
1195		VM_BUG_ON(!has_cache);
1196		has_cache = 0;
1197	} else if (count == SWAP_MAP_SHMEM) {
1198		/*
1199		 * Or we could insist on shmem.c using a special
1200		 * swap_shmem_free() and free_shmem_swap_and_cache()...
1201		 */
1202		count = 0;
1203	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1204		if (count == COUNT_CONTINUED) {
1205			if (swap_count_continued(p, offset, count))
1206				count = SWAP_MAP_MAX | COUNT_CONTINUED;
1207			else
1208				count = SWAP_MAP_MAX;
1209		} else
1210			count--;
1211	}
1212
 
 
 
1213	usage = count | has_cache;
1214	if (usage)
1215		WRITE_ONCE(p->swap_map[offset], usage);
1216	else
1217		WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE);
1218
1219	return usage;
1220}
1221
1222/*
1223 * When we get a swap entry, if there aren't some other ways to
1224 * prevent swapoff, such as the folio in swap cache is locked, page
1225 * table lock is held, etc., the swap entry may become invalid because
1226 * of swapoff.  Then, we need to enclose all swap related functions
1227 * with get_swap_device() and put_swap_device(), unless the swap
1228 * functions call get/put_swap_device() by themselves.
1229 *
1230 * Check whether swap entry is valid in the swap device.  If so,
1231 * return pointer to swap_info_struct, and keep the swap entry valid
1232 * via preventing the swap device from being swapoff, until
1233 * put_swap_device() is called.  Otherwise return NULL.
1234 *
1235 * Notice that swapoff or swapoff+swapon can still happen before the
1236 * percpu_ref_tryget_live() in get_swap_device() or after the
1237 * percpu_ref_put() in put_swap_device() if there isn't any other way
1238 * to prevent swapoff.  The caller must be prepared for that.  For
1239 * example, the following situation is possible.
1240 *
1241 *   CPU1				CPU2
1242 *   do_swap_page()
1243 *     ...				swapoff+swapon
1244 *     __read_swap_cache_async()
1245 *       swapcache_prepare()
1246 *         __swap_duplicate()
1247 *           // check swap_map
1248 *     // verify PTE not changed
1249 *
1250 * In __swap_duplicate(), the swap_map need to be checked before
1251 * changing partly because the specified swap entry may be for another
1252 * swap device which has been swapoff.  And in do_swap_page(), after
1253 * the page is read from the swap device, the PTE is verified not
1254 * changed with the page table locked to check whether the swap device
1255 * has been swapoff or swapoff+swapon.
1256 */
1257struct swap_info_struct *get_swap_device(swp_entry_t entry)
1258{
1259	struct swap_info_struct *si;
1260	unsigned long offset;
1261
1262	if (!entry.val)
1263		goto out;
1264	si = swp_swap_info(entry);
1265	if (!si)
1266		goto bad_nofile;
1267	if (!percpu_ref_tryget_live(&si->users))
1268		goto out;
1269	/*
1270	 * Guarantee the si->users are checked before accessing other
1271	 * fields of swap_info_struct.
1272	 *
1273	 * Paired with the spin_unlock() after setup_swap_info() in
1274	 * enable_swap_info().
1275	 */
1276	smp_rmb();
1277	offset = swp_offset(entry);
1278	if (offset >= si->max)
1279		goto put_out;
1280
1281	return si;
1282bad_nofile:
1283	pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1284out:
1285	return NULL;
1286put_out:
1287	pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1288	percpu_ref_put(&si->users);
1289	return NULL;
1290}
1291
1292static unsigned char __swap_entry_free(struct swap_info_struct *p,
1293				       swp_entry_t entry)
1294{
1295	struct swap_cluster_info *ci;
1296	unsigned long offset = swp_offset(entry);
1297	unsigned char usage;
1298
1299	ci = lock_cluster_or_swap_info(p, offset);
1300	usage = __swap_entry_free_locked(p, offset, 1);
1301	unlock_cluster_or_swap_info(p, ci);
1302	if (!usage)
1303		free_swap_slot(entry);
1304
1305	return usage;
1306}
1307
1308static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1309{
1310	struct swap_cluster_info *ci;
1311	unsigned long offset = swp_offset(entry);
1312	unsigned char count;
1313
1314	ci = lock_cluster(p, offset);
1315	count = p->swap_map[offset];
1316	VM_BUG_ON(count != SWAP_HAS_CACHE);
1317	p->swap_map[offset] = 0;
1318	dec_cluster_info_page(p, p->cluster_info, offset);
1319	unlock_cluster(ci);
1320
1321	mem_cgroup_uncharge_swap(entry, 1);
1322	swap_range_free(p, offset, 1);
1323}
1324
1325/*
1326 * Caller has made sure that the swap device corresponding to entry
1327 * is still around or has not been recycled.
1328 */
1329void swap_free(swp_entry_t entry)
1330{
1331	struct swap_info_struct *p;
1332
1333	p = _swap_info_get(entry);
1334	if (p)
1335		__swap_entry_free(p, entry);
 
 
1336}
1337
1338/*
1339 * Called after dropping swapcache to decrease refcnt to swap entries.
1340 */
1341void put_swap_folio(struct folio *folio, swp_entry_t entry)
1342{
1343	unsigned long offset = swp_offset(entry);
1344	unsigned long idx = offset / SWAPFILE_CLUSTER;
1345	struct swap_cluster_info *ci;
1346	struct swap_info_struct *si;
1347	unsigned char *map;
1348	unsigned int i, free_entries = 0;
1349	unsigned char val;
1350	int size = swap_entry_size(folio_nr_pages(folio));
1351
1352	si = _swap_info_get(entry);
1353	if (!si)
1354		return;
1355
1356	ci = lock_cluster_or_swap_info(si, offset);
1357	if (size == SWAPFILE_CLUSTER) {
1358		VM_BUG_ON(!cluster_is_huge(ci));
1359		map = si->swap_map + offset;
1360		for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1361			val = map[i];
1362			VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1363			if (val == SWAP_HAS_CACHE)
1364				free_entries++;
1365		}
1366		cluster_clear_huge(ci);
1367		if (free_entries == SWAPFILE_CLUSTER) {
1368			unlock_cluster_or_swap_info(si, ci);
1369			spin_lock(&si->lock);
1370			mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1371			swap_free_cluster(si, idx);
1372			spin_unlock(&si->lock);
1373			return;
1374		}
1375	}
1376	for (i = 0; i < size; i++, entry.val++) {
1377		if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1378			unlock_cluster_or_swap_info(si, ci);
1379			free_swap_slot(entry);
1380			if (i == size - 1)
1381				return;
1382			lock_cluster_or_swap_info(si, offset);
1383		}
1384	}
1385	unlock_cluster_or_swap_info(si, ci);
1386}
1387
1388#ifdef CONFIG_THP_SWAP
1389int split_swap_cluster(swp_entry_t entry)
1390{
1391	struct swap_info_struct *si;
1392	struct swap_cluster_info *ci;
1393	unsigned long offset = swp_offset(entry);
1394
1395	si = _swap_info_get(entry);
1396	if (!si)
1397		return -EBUSY;
1398	ci = lock_cluster(si, offset);
1399	cluster_clear_huge(ci);
1400	unlock_cluster(ci);
1401	return 0;
1402}
1403#endif
1404
1405static int swp_entry_cmp(const void *ent1, const void *ent2)
1406{
1407	const swp_entry_t *e1 = ent1, *e2 = ent2;
1408
1409	return (int)swp_type(*e1) - (int)swp_type(*e2);
1410}
1411
1412void swapcache_free_entries(swp_entry_t *entries, int n)
1413{
1414	struct swap_info_struct *p, *prev;
1415	int i;
1416
1417	if (n <= 0)
1418		return;
1419
1420	prev = NULL;
1421	p = NULL;
1422
1423	/*
1424	 * Sort swap entries by swap device, so each lock is only taken once.
1425	 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1426	 * so low that it isn't necessary to optimize further.
1427	 */
1428	if (nr_swapfiles > 1)
1429		sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1430	for (i = 0; i < n; ++i) {
1431		p = swap_info_get_cont(entries[i], prev);
1432		if (p)
1433			swap_entry_free(p, entries[i]);
1434		prev = p;
1435	}
1436	if (p)
1437		spin_unlock(&p->lock);
1438}
1439
1440int __swap_count(swp_entry_t entry)
1441{
1442	struct swap_info_struct *si = swp_swap_info(entry);
1443	pgoff_t offset = swp_offset(entry);
1444
1445	return swap_count(si->swap_map[offset]);
1446}
1447
1448/*
1449 * How many references to @entry are currently swapped out?
1450 * This does not give an exact answer when swap count is continued,
1451 * but does include the high COUNT_CONTINUED flag to allow for that.
1452 */
1453int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1454{
1455	pgoff_t offset = swp_offset(entry);
1456	struct swap_cluster_info *ci;
1457	int count;
1458
1459	ci = lock_cluster_or_swap_info(si, offset);
1460	count = swap_count(si->swap_map[offset]);
1461	unlock_cluster_or_swap_info(si, ci);
 
 
 
1462	return count;
1463}
1464
1465/*
1466 * How many references to @entry are currently swapped out?
1467 * This considers COUNT_CONTINUED so it returns exact answer.
 
 
1468 */
1469int swp_swapcount(swp_entry_t entry)
1470{
1471	int count, tmp_count, n;
1472	struct swap_info_struct *p;
1473	struct swap_cluster_info *ci;
1474	struct page *page;
1475	pgoff_t offset;
1476	unsigned char *map;
1477
1478	p = _swap_info_get(entry);
1479	if (!p)
1480		return 0;
1481
1482	offset = swp_offset(entry);
1483
1484	ci = lock_cluster_or_swap_info(p, offset);
1485
1486	count = swap_count(p->swap_map[offset]);
1487	if (!(count & COUNT_CONTINUED))
1488		goto out;
1489
1490	count &= ~COUNT_CONTINUED;
1491	n = SWAP_MAP_MAX + 1;
1492
1493	page = vmalloc_to_page(p->swap_map + offset);
1494	offset &= ~PAGE_MASK;
1495	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1496
1497	do {
1498		page = list_next_entry(page, lru);
1499		map = kmap_local_page(page);
1500		tmp_count = map[offset];
1501		kunmap_local(map);
1502
1503		count += (tmp_count & ~COUNT_CONTINUED) * n;
1504		n *= (SWAP_CONT_MAX + 1);
1505	} while (tmp_count & COUNT_CONTINUED);
1506out:
1507	unlock_cluster_or_swap_info(p, ci);
1508	return count;
1509}
1510
1511static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1512					 swp_entry_t entry)
1513{
1514	struct swap_cluster_info *ci;
1515	unsigned char *map = si->swap_map;
1516	unsigned long roffset = swp_offset(entry);
1517	unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1518	int i;
1519	bool ret = false;
1520
1521	ci = lock_cluster_or_swap_info(si, offset);
1522	if (!ci || !cluster_is_huge(ci)) {
1523		if (swap_count(map[roffset]))
1524			ret = true;
1525		goto unlock_out;
1526	}
1527	for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1528		if (swap_count(map[offset + i])) {
1529			ret = true;
1530			break;
1531		}
1532	}
1533unlock_out:
1534	unlock_cluster_or_swap_info(si, ci);
1535	return ret;
1536}
1537
1538static bool folio_swapped(struct folio *folio)
1539{
1540	swp_entry_t entry = folio->swap;
1541	struct swap_info_struct *si = _swap_info_get(entry);
1542
1543	if (!si)
1544		return false;
1545
1546	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
1547		return swap_swapcount(si, entry) != 0;
1548
1549	return swap_page_trans_huge_swapped(si, entry);
1550}
1551
1552/**
1553 * folio_free_swap() - Free the swap space used for this folio.
1554 * @folio: The folio to remove.
1555 *
1556 * If swap is getting full, or if there are no more mappings of this folio,
1557 * then call folio_free_swap to free its swap space.
1558 *
1559 * Return: true if we were able to release the swap space.
1560 */
1561bool folio_free_swap(struct folio *folio)
1562{
1563	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1564
1565	if (!folio_test_swapcache(folio))
1566		return false;
1567	if (folio_test_writeback(folio))
1568		return false;
1569	if (folio_swapped(folio))
1570		return false;
1571
1572	/*
1573	 * Once hibernation has begun to create its image of memory,
1574	 * there's a danger that one of the calls to folio_free_swap()
1575	 * - most probably a call from __try_to_reclaim_swap() while
1576	 * hibernation is allocating its own swap pages for the image,
1577	 * but conceivably even a call from memory reclaim - will free
1578	 * the swap from a folio which has already been recorded in the
1579	 * image as a clean swapcache folio, and then reuse its swap for
1580	 * another page of the image.  On waking from hibernation, the
1581	 * original folio might be freed under memory pressure, then
1582	 * later read back in from swap, now with the wrong data.
1583	 *
1584	 * Hibernation suspends storage while it is writing the image
1585	 * to disk so check that here.
1586	 */
1587	if (pm_suspended_storage())
1588		return false;
1589
1590	delete_from_swap_cache(folio);
1591	folio_set_dirty(folio);
1592	return true;
1593}
1594
1595/*
1596 * Free the swap entry like above, but also try to
1597 * free the page cache entry if it is the last user.
1598 */
1599int free_swap_and_cache(swp_entry_t entry)
1600{
1601	struct swap_info_struct *p;
1602	unsigned char count;
1603
1604	if (non_swap_entry(entry))
1605		return 1;
1606
1607	p = _swap_info_get(entry);
1608	if (p) {
1609		count = __swap_entry_free(p, entry);
1610		if (count == SWAP_HAS_CACHE &&
1611		    !swap_page_trans_huge_swapped(p, entry))
1612			__try_to_reclaim_swap(p, swp_offset(entry),
1613					      TTRS_UNMAPPED | TTRS_FULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1614	}
1615	return p != NULL;
1616}
1617
1618#ifdef CONFIG_HIBERNATION
1619
1620swp_entry_t get_swap_page_of_type(int type)
 
 
 
 
 
 
 
 
 
1621{
1622	struct swap_info_struct *si = swap_type_to_swap_info(type);
1623	swp_entry_t entry = {0};
 
1624
1625	if (!si)
1626		goto fail;
 
 
 
 
 
 
1627
1628	/* This is called for allocating swap entry, not cache */
1629	spin_lock(&si->lock);
1630	if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry))
1631		atomic_long_dec(&nr_swap_pages);
1632	spin_unlock(&si->lock);
1633fail:
1634	return entry;
1635}
 
1636
 
1637/*
1638 * Find the swap type that corresponds to given device (if any).
1639 *
1640 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1641 * from 0, in which the swap header is expected to be located.
1642 *
1643 * This is needed for the suspend to disk (aka swsusp).
1644 */
1645int swap_type_of(dev_t device, sector_t offset)
1646{
 
1647	int type;
1648
1649	if (!device)
1650		return -1;
1651
1652	spin_lock(&swap_lock);
1653	for (type = 0; type < nr_swapfiles; type++) {
1654		struct swap_info_struct *sis = swap_info[type];
1655
1656		if (!(sis->flags & SWP_WRITEOK))
1657			continue;
1658
1659		if (device == sis->bdev->bd_dev) {
1660			struct swap_extent *se = first_se(sis);
 
 
 
 
 
 
 
1661
1662			if (se->start_block == offset) {
 
 
 
1663				spin_unlock(&swap_lock);
 
1664				return type;
1665			}
1666		}
1667	}
1668	spin_unlock(&swap_lock);
1669	return -ENODEV;
1670}
1671
1672int find_first_swap(dev_t *device)
1673{
1674	int type;
1675
1676	spin_lock(&swap_lock);
1677	for (type = 0; type < nr_swapfiles; type++) {
1678		struct swap_info_struct *sis = swap_info[type];
1679
1680		if (!(sis->flags & SWP_WRITEOK))
1681			continue;
1682		*device = sis->bdev->bd_dev;
1683		spin_unlock(&swap_lock);
1684		return type;
1685	}
1686	spin_unlock(&swap_lock);
1687	return -ENODEV;
1688}
1689
1690/*
1691 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1692 * corresponding to given index in swap_info (swap type).
1693 */
1694sector_t swapdev_block(int type, pgoff_t offset)
1695{
1696	struct swap_info_struct *si = swap_type_to_swap_info(type);
1697	struct swap_extent *se;
1698
1699	if (!si || !(si->flags & SWP_WRITEOK))
 
 
1700		return 0;
1701	se = offset_to_swap_extent(si, offset);
1702	return se->start_block + (offset - se->start_page);
1703}
1704
1705/*
1706 * Return either the total number of swap pages of given type, or the number
1707 * of free pages of that type (depending on @free)
1708 *
1709 * This is needed for software suspend
1710 */
1711unsigned int count_swap_pages(int type, int free)
1712{
1713	unsigned int n = 0;
1714
1715	spin_lock(&swap_lock);
1716	if ((unsigned int)type < nr_swapfiles) {
1717		struct swap_info_struct *sis = swap_info[type];
1718
1719		spin_lock(&sis->lock);
1720		if (sis->flags & SWP_WRITEOK) {
1721			n = sis->pages;
1722			if (free)
1723				n -= sis->inuse_pages;
1724		}
1725		spin_unlock(&sis->lock);
1726	}
1727	spin_unlock(&swap_lock);
1728	return n;
1729}
1730#endif /* CONFIG_HIBERNATION */
1731
1732static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1733{
1734	return pte_same(pte_swp_clear_flags(pte), swp_pte);
1735}
1736
1737/*
1738 * No need to decide whether this PTE shares the swap entry with others,
1739 * just let do_wp_page work it out if a write is requested later - to
1740 * force COW, vm_page_prot omits write permission from any private vma.
1741 */
1742static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1743		unsigned long addr, swp_entry_t entry, struct folio *folio)
1744{
1745	struct page *page;
1746	struct folio *swapcache;
1747	spinlock_t *ptl;
1748	pte_t *pte, new_pte, old_pte;
1749	bool hwpoisoned = false;
1750	int ret = 1;
1751
1752	swapcache = folio;
1753	folio = ksm_might_need_to_copy(folio, vma, addr);
1754	if (unlikely(!folio))
1755		return -ENOMEM;
1756	else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
1757		hwpoisoned = true;
1758		folio = swapcache;
1759	}
1760
1761	page = folio_file_page(folio, swp_offset(entry));
1762	if (PageHWPoison(page))
1763		hwpoisoned = true;
1764
1765	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1766	if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
1767						swp_entry_to_pte(entry)))) {
 
1768		ret = 0;
1769		goto out;
1770	}
1771
1772	old_pte = ptep_get(pte);
1773
1774	if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
1775		swp_entry_t swp_entry;
1776
1777		dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1778		if (hwpoisoned) {
1779			swp_entry = make_hwpoison_entry(page);
1780		} else {
1781			swp_entry = make_poisoned_swp_entry();
1782		}
1783		new_pte = swp_entry_to_pte(swp_entry);
1784		ret = 0;
1785		goto setpte;
1786	}
1787
1788	/*
1789	 * Some architectures may have to restore extra metadata to the page
1790	 * when reading from swap. This metadata may be indexed by swap entry
1791	 * so this must be called before swap_free().
1792	 */
1793	arch_swap_restore(entry, folio);
1794
1795	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1796	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1797	folio_get(folio);
1798	if (folio == swapcache) {
1799		rmap_t rmap_flags = RMAP_NONE;
1800
1801		/*
1802		 * See do_swap_page(): writeback would be problematic.
1803		 * However, we do a folio_wait_writeback() just before this
1804		 * call and have the folio locked.
1805		 */
1806		VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
1807		if (pte_swp_exclusive(old_pte))
1808			rmap_flags |= RMAP_EXCLUSIVE;
1809
1810		folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
1811	} else { /* ksm created a completely new copy */
1812		folio_add_new_anon_rmap(folio, vma, addr);
1813		folio_add_lru_vma(folio, vma);
1814	}
1815	new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
1816	if (pte_swp_soft_dirty(old_pte))
1817		new_pte = pte_mksoft_dirty(new_pte);
1818	if (pte_swp_uffd_wp(old_pte))
1819		new_pte = pte_mkuffd_wp(new_pte);
1820setpte:
1821	set_pte_at(vma->vm_mm, addr, pte, new_pte);
1822	swap_free(entry);
 
 
 
 
 
1823out:
1824	if (pte)
1825		pte_unmap_unlock(pte, ptl);
1826	if (folio != swapcache) {
1827		folio_unlock(folio);
1828		folio_put(folio);
1829	}
1830	return ret;
1831}
1832
1833static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1834			unsigned long addr, unsigned long end,
1835			unsigned int type)
1836{
1837	pte_t *pte = NULL;
1838	struct swap_info_struct *si;
 
1839
1840	si = swap_info[type];
 
 
 
 
 
 
 
 
 
1841	do {
1842		struct folio *folio;
1843		unsigned long offset;
1844		unsigned char swp_count;
1845		swp_entry_t entry;
1846		int ret;
1847		pte_t ptent;
1848
1849		if (!pte++) {
 
1850			pte = pte_offset_map(pmd, addr);
1851			if (!pte)
1852				break;
1853		}
1854
1855		ptent = ptep_get_lockless(pte);
1856
1857		if (!is_swap_pte(ptent))
1858			continue;
1859
1860		entry = pte_to_swp_entry(ptent);
1861		if (swp_type(entry) != type)
1862			continue;
1863
1864		offset = swp_offset(entry);
1865		pte_unmap(pte);
1866		pte = NULL;
1867
1868		folio = swap_cache_get_folio(entry, vma, addr);
1869		if (!folio) {
1870			struct page *page;
1871			struct vm_fault vmf = {
1872				.vma = vma,
1873				.address = addr,
1874				.real_address = addr,
1875				.pmd = pmd,
1876			};
1877
1878			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
1879						&vmf);
1880			if (page)
1881				folio = page_folio(page);
1882		}
1883		if (!folio) {
1884			swp_count = READ_ONCE(si->swap_map[offset]);
1885			if (swp_count == 0 || swp_count == SWAP_MAP_BAD)
1886				continue;
1887			return -ENOMEM;
1888		}
1889
1890		folio_lock(folio);
1891		folio_wait_writeback(folio);
1892		ret = unuse_pte(vma, pmd, addr, entry, folio);
1893		if (ret < 0) {
1894			folio_unlock(folio);
1895			folio_put(folio);
1896			return ret;
1897		}
1898
1899		folio_free_swap(folio);
1900		folio_unlock(folio);
1901		folio_put(folio);
1902	} while (addr += PAGE_SIZE, addr != end);
1903
1904	if (pte)
1905		pte_unmap(pte);
1906	return 0;
1907}
1908
1909static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1910				unsigned long addr, unsigned long end,
1911				unsigned int type)
1912{
1913	pmd_t *pmd;
1914	unsigned long next;
1915	int ret;
1916
1917	pmd = pmd_offset(pud, addr);
1918	do {
1919		cond_resched();
1920		next = pmd_addr_end(addr, end);
1921		ret = unuse_pte_range(vma, pmd, addr, next, type);
 
 
 
 
1922		if (ret)
1923			return ret;
1924	} while (pmd++, addr = next, addr != end);
1925	return 0;
1926}
1927
1928static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
1929				unsigned long addr, unsigned long end,
1930				unsigned int type)
1931{
1932	pud_t *pud;
1933	unsigned long next;
1934	int ret;
1935
1936	pud = pud_offset(p4d, addr);
1937	do {
1938		next = pud_addr_end(addr, end);
1939		if (pud_none_or_clear_bad(pud))
1940			continue;
1941		ret = unuse_pmd_range(vma, pud, addr, next, type);
1942		if (ret)
1943			return ret;
1944	} while (pud++, addr = next, addr != end);
1945	return 0;
1946}
1947
1948static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
1949				unsigned long addr, unsigned long end,
1950				unsigned int type)
1951{
1952	p4d_t *p4d;
1953	unsigned long next;
1954	int ret;
1955
1956	p4d = p4d_offset(pgd, addr);
1957	do {
1958		next = p4d_addr_end(addr, end);
1959		if (p4d_none_or_clear_bad(p4d))
1960			continue;
1961		ret = unuse_pud_range(vma, p4d, addr, next, type);
1962		if (ret)
1963			return ret;
1964	} while (p4d++, addr = next, addr != end);
1965	return 0;
1966}
1967
1968static int unuse_vma(struct vm_area_struct *vma, unsigned int type)
1969{
1970	pgd_t *pgd;
1971	unsigned long addr, end, next;
1972	int ret;
1973
1974	addr = vma->vm_start;
1975	end = vma->vm_end;
 
 
 
 
 
 
 
 
1976
1977	pgd = pgd_offset(vma->vm_mm, addr);
1978	do {
1979		next = pgd_addr_end(addr, end);
1980		if (pgd_none_or_clear_bad(pgd))
1981			continue;
1982		ret = unuse_p4d_range(vma, pgd, addr, next, type);
1983		if (ret)
1984			return ret;
1985	} while (pgd++, addr = next, addr != end);
1986	return 0;
1987}
1988
1989static int unuse_mm(struct mm_struct *mm, unsigned int type)
 
1990{
1991	struct vm_area_struct *vma;
1992	int ret = 0;
1993	VMA_ITERATOR(vmi, mm, 0);
1994
1995	mmap_read_lock(mm);
1996	for_each_vma(vmi, vma) {
1997		if (vma->anon_vma) {
1998			ret = unuse_vma(vma, type);
1999			if (ret)
2000				break;
2001		}
2002
2003		cond_resched();
 
 
 
 
2004	}
2005	mmap_read_unlock(mm);
2006	return ret;
2007}
2008
2009/*
2010 * Scan swap_map from current position to next entry still in use.
2011 * Return 0 if there are no inuse entries after prev till end of
2012 * the map.
2013 */
2014static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2015					unsigned int prev)
2016{
2017	unsigned int i;
 
2018	unsigned char count;
2019
2020	/*
2021	 * No need for swap_lock here: we're just looking
2022	 * for whether an entry is in use, not modifying it; false
2023	 * hits are okay, and sys_swapoff() has already prevented new
2024	 * allocations from this area (while holding swap_lock).
2025	 */
2026	for (i = prev + 1; i < si->max; i++) {
2027		count = READ_ONCE(si->swap_map[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
2028		if (count && swap_count(count) != SWAP_MAP_BAD)
2029			break;
2030		if ((i % LATENCY_LIMIT) == 0)
2031			cond_resched();
2032	}
2033
2034	if (i == si->max)
2035		i = 0;
2036
2037	return i;
2038}
2039
 
 
 
 
 
2040static int try_to_unuse(unsigned int type)
2041{
2042	struct mm_struct *prev_mm;
2043	struct mm_struct *mm;
2044	struct list_head *p;
2045	int retval = 0;
2046	struct swap_info_struct *si = swap_info[type];
2047	struct folio *folio;
 
 
 
2048	swp_entry_t entry;
2049	unsigned int i;
 
2050
2051	if (!READ_ONCE(si->inuse_pages))
2052		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2053
2054retry:
2055	retval = shmem_unuse(type);
2056	if (retval)
2057		return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2058
2059	prev_mm = &init_mm;
2060	mmget(prev_mm);
 
 
 
 
 
 
2061
2062	spin_lock(&mmlist_lock);
2063	p = &init_mm.mmlist;
2064	while (READ_ONCE(si->inuse_pages) &&
2065	       !signal_pending(current) &&
2066	       (p = p->next) != &init_mm.mmlist) {
 
 
 
 
 
 
 
2067
2068		mm = list_entry(p, struct mm_struct, mmlist);
2069		if (!mmget_not_zero(mm))
 
 
 
 
 
 
 
2070			continue;
2071		spin_unlock(&mmlist_lock);
2072		mmput(prev_mm);
2073		prev_mm = mm;
2074		retval = unuse_mm(mm, type);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2075		if (retval) {
2076			mmput(prev_mm);
2077			return retval;
 
2078		}
2079
2080		/*
2081		 * Make sure that we aren't completely killing
2082		 * interactive performance.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2083		 */
2084		cond_resched();
2085		spin_lock(&mmlist_lock);
2086	}
2087	spin_unlock(&mmlist_lock);
 
2088
2089	mmput(prev_mm);
 
 
 
2090
2091	i = 0;
2092	while (READ_ONCE(si->inuse_pages) &&
2093	       !signal_pending(current) &&
2094	       (i = find_next_to_unuse(si, i)) != 0) {
 
 
 
 
 
 
2095
2096		entry = swp_entry(type, i);
2097		folio = filemap_get_folio(swap_address_space(entry), i);
2098		if (IS_ERR(folio))
2099			continue;
 
 
 
 
2100
2101		/*
2102		 * It is conceivable that a racing task removed this folio from
2103		 * swap cache just before we acquired the page lock. The folio
2104		 * might even be back in swap cache on another swap area. But
2105		 * that is okay, folio_free_swap() only removes stale folios.
2106		 */
2107		folio_lock(folio);
2108		folio_wait_writeback(folio);
2109		folio_free_swap(folio);
2110		folio_unlock(folio);
2111		folio_put(folio);
2112	}
2113
2114	/*
2115	 * Lets check again to see if there are still swap entries in the map.
2116	 * If yes, we would need to do retry the unuse logic again.
2117	 * Under global memory pressure, swap entries can be reinserted back
2118	 * into process space after the mmlist loop above passes over them.
2119	 *
2120	 * Limit the number of retries? No: when mmget_not_zero()
2121	 * above fails, that mm is likely to be freeing swap from
2122	 * exit_mmap(), which proceeds at its own independent pace;
2123	 * and even shmem_writepage() could have been preempted after
2124	 * folio_alloc_swap(), temporarily hiding that swap.  It's easy
2125	 * and robust (though cpu-intensive) just to keep retrying.
2126	 */
2127	if (READ_ONCE(si->inuse_pages)) {
2128		if (!signal_pending(current))
2129			goto retry;
2130		return -EINTR;
2131	}
2132
2133	return 0;
2134}
2135
2136/*
2137 * After a successful try_to_unuse, if no swap is now in use, we know
2138 * we can empty the mmlist.  swap_lock must be held on entry and exit.
2139 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2140 * added to the mmlist just after page_duplicate - before would be racy.
2141 */
2142static void drain_mmlist(void)
2143{
2144	struct list_head *p, *next;
2145	unsigned int type;
2146
2147	for (type = 0; type < nr_swapfiles; type++)
2148		if (swap_info[type]->inuse_pages)
2149			return;
2150	spin_lock(&mmlist_lock);
2151	list_for_each_safe(p, next, &init_mm.mmlist)
2152		list_del_init(p);
2153	spin_unlock(&mmlist_lock);
2154}
2155
2156/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2157 * Free all of a swapdev's extent information
2158 */
2159static void destroy_swap_extents(struct swap_info_struct *sis)
2160{
2161	while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2162		struct rb_node *rb = sis->swap_extent_root.rb_node;
2163		struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2164
2165		rb_erase(rb, &sis->swap_extent_root);
 
 
2166		kfree(se);
2167	}
2168
2169	if (sis->flags & SWP_ACTIVATED) {
2170		struct file *swap_file = sis->swap_file;
2171		struct address_space *mapping = swap_file->f_mapping;
2172
2173		sis->flags &= ~SWP_ACTIVATED;
2174		if (mapping->a_ops->swap_deactivate)
2175			mapping->a_ops->swap_deactivate(swap_file);
2176	}
2177}
2178
2179/*
2180 * Add a block range (and the corresponding page range) into this swapdev's
2181 * extent tree.
2182 *
2183 * This function rather assumes that it is called in ascending page order.
2184 */
2185int
2186add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2187		unsigned long nr_pages, sector_t start_block)
2188{
2189	struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2190	struct swap_extent *se;
2191	struct swap_extent *new_se;
 
2192
2193	/*
2194	 * place the new node at the right most since the
2195	 * function is called in ascending page order.
2196	 */
2197	while (*link) {
2198		parent = *link;
2199		link = &parent->rb_right;
2200	}
2201
2202	if (parent) {
2203		se = rb_entry(parent, struct swap_extent, rb_node);
2204		BUG_ON(se->start_page + se->nr_pages != start_page);
2205		if (se->start_block + se->nr_pages == start_block) {
2206			/* Merge it */
2207			se->nr_pages += nr_pages;
2208			return 0;
2209		}
2210	}
2211
2212	/* No merge, insert a new extent. */
 
 
2213	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2214	if (new_se == NULL)
2215		return -ENOMEM;
2216	new_se->start_page = start_page;
2217	new_se->nr_pages = nr_pages;
2218	new_se->start_block = start_block;
2219
2220	rb_link_node(&new_se->rb_node, parent, link);
2221	rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2222	return 1;
2223}
2224EXPORT_SYMBOL_GPL(add_swap_extent);
2225
2226/*
2227 * A `swap extent' is a simple thing which maps a contiguous range of pages
2228 * onto a contiguous range of disk blocks.  A rbtree of swap extents is
2229 * built at swapon time and is then used at swap_writepage/swap_read_folio
2230 * time for locating where on disk a page belongs.
2231 *
2232 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2233 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2234 * swap files identically.
2235 *
2236 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2237 * extent rbtree operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2238 * swapfiles are handled *identically* after swapon time.
2239 *
2240 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2241 * and will parse them into a rbtree, in PAGE_SIZE chunks.  If some stray
2242 * blocks are found which do not fall within the PAGE_SIZE alignment
2243 * requirements, they are simply tossed out - we will never use those blocks
2244 * for swapping.
2245 *
2246 * For all swap devices we set S_SWAPFILE across the life of the swapon.  This
2247 * prevents users from writing to the swap device, which will corrupt memory.
 
2248 *
2249 * The amount of disk space which a single swap extent represents varies.
2250 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2251 * extents in the rbtree. - akpm.
 
 
 
2252 */
2253static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2254{
2255	struct file *swap_file = sis->swap_file;
2256	struct address_space *mapping = swap_file->f_mapping;
2257	struct inode *inode = mapping->host;
 
 
 
 
 
 
2258	int ret;
2259
 
2260	if (S_ISBLK(inode->i_mode)) {
2261		ret = add_swap_extent(sis, 0, sis->max, 0);
2262		*span = sis->pages;
2263		return ret;
2264	}
2265
2266	if (mapping->a_ops->swap_activate) {
2267		ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2268		if (ret < 0)
2269			return ret;
2270		sis->flags |= SWP_ACTIVATED;
2271		if ((sis->flags & SWP_FS_OPS) &&
2272		    sio_pool_init() != 0) {
2273			destroy_swap_extents(sis);
2274			return -ENOMEM;
2275		}
2276		return ret;
2277	}
2278
2279	return generic_swapfile_activate(sis, swap_file, span);
2280}
 
 
 
 
 
 
 
 
 
 
 
 
 
2281
2282static int swap_node(struct swap_info_struct *p)
2283{
2284	struct block_device *bdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2285
2286	if (p->bdev)
2287		bdev = p->bdev;
2288	else
2289		bdev = p->swap_file->f_inode->i_sb->s_bdev;
 
 
 
2290
2291	return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2292}
2293
2294static void setup_swap_info(struct swap_info_struct *p, int prio,
2295			    unsigned char *swap_map,
2296			    struct swap_cluster_info *cluster_info)
2297{
2298	int i;
2299
 
2300	if (prio >= 0)
2301		p->prio = prio;
2302	else
2303		p->prio = --least_priority;
2304	/*
2305	 * the plist prio is negated because plist ordering is
2306	 * low-to-high, while swap ordering is high-to-low
2307	 */
2308	p->list.prio = -p->prio;
2309	for_each_node(i) {
2310		if (p->prio >= 0)
2311			p->avail_lists[i].prio = -p->prio;
2312		else {
2313			if (swap_node(p) == i)
2314				p->avail_lists[i].prio = 1;
2315			else
2316				p->avail_lists[i].prio = -p->prio;
2317		}
2318	}
2319	p->swap_map = swap_map;
2320	p->cluster_info = cluster_info;
2321}
2322
2323static void _enable_swap_info(struct swap_info_struct *p)
2324{
2325	p->flags |= SWP_WRITEOK;
2326	atomic_long_add(p->pages, &nr_swap_pages);
2327	total_swap_pages += p->pages;
2328
2329	assert_spin_locked(&swap_lock);
2330	/*
2331	 * both lists are plists, and thus priority ordered.
2332	 * swap_active_head needs to be priority ordered for swapoff(),
2333	 * which on removal of any swap_info_struct with an auto-assigned
2334	 * (i.e. negative) priority increments the auto-assigned priority
2335	 * of any lower-priority swap_info_structs.
2336	 * swap_avail_head needs to be priority ordered for folio_alloc_swap(),
2337	 * which allocates swap pages from the highest available priority
2338	 * swap_info_struct.
2339	 */
2340	plist_add(&p->list, &swap_active_head);
2341
2342	/* add to available list iff swap device is not full */
2343	if (p->highest_bit)
2344		add_to_avail_list(p);
2345}
2346
2347static void enable_swap_info(struct swap_info_struct *p, int prio,
2348				unsigned char *swap_map,
2349				struct swap_cluster_info *cluster_info)
2350{
2351	zswap_swapon(p->type);
2352
2353	spin_lock(&swap_lock);
2354	spin_lock(&p->lock);
2355	setup_swap_info(p, prio, swap_map, cluster_info);
2356	spin_unlock(&p->lock);
2357	spin_unlock(&swap_lock);
2358	/*
2359	 * Finished initializing swap device, now it's safe to reference it.
2360	 */
2361	percpu_ref_resurrect(&p->users);
2362	spin_lock(&swap_lock);
2363	spin_lock(&p->lock);
2364	_enable_swap_info(p);
2365	spin_unlock(&p->lock);
2366	spin_unlock(&swap_lock);
2367}
2368
2369static void reinsert_swap_info(struct swap_info_struct *p)
2370{
2371	spin_lock(&swap_lock);
2372	spin_lock(&p->lock);
2373	setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2374	_enable_swap_info(p);
2375	spin_unlock(&p->lock);
2376	spin_unlock(&swap_lock);
2377}
2378
2379bool has_usable_swap(void)
2380{
2381	bool ret = true;
2382
2383	spin_lock(&swap_lock);
2384	if (plist_head_empty(&swap_active_head))
2385		ret = false;
2386	spin_unlock(&swap_lock);
2387	return ret;
2388}
2389
2390SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2391{
2392	struct swap_info_struct *p = NULL;
2393	unsigned char *swap_map;
2394	struct swap_cluster_info *cluster_info;
2395	struct file *swap_file, *victim;
2396	struct address_space *mapping;
2397	struct inode *inode;
2398	struct filename *pathname;
2399	int err, found = 0;
2400	unsigned int old_block_size;
 
2401
2402	if (!capable(CAP_SYS_ADMIN))
2403		return -EPERM;
2404
2405	BUG_ON(!current->mm);
2406
2407	pathname = getname(specialfile);
 
2408	if (IS_ERR(pathname))
2409		return PTR_ERR(pathname);
2410
2411	victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
 
2412	err = PTR_ERR(victim);
2413	if (IS_ERR(victim))
2414		goto out;
2415
2416	mapping = victim->f_mapping;
 
2417	spin_lock(&swap_lock);
2418	plist_for_each_entry(p, &swap_active_head, list) {
 
2419		if (p->flags & SWP_WRITEOK) {
2420			if (p->swap_file->f_mapping == mapping) {
2421				found = 1;
2422				break;
2423			}
2424		}
 
2425	}
2426	if (!found) {
2427		err = -EINVAL;
2428		spin_unlock(&swap_lock);
2429		goto out_dput;
2430	}
2431	if (!security_vm_enough_memory_mm(current->mm, p->pages))
2432		vm_unacct_memory(p->pages);
2433	else {
2434		err = -ENOMEM;
2435		spin_unlock(&swap_lock);
2436		goto out_dput;
2437	}
2438	spin_lock(&p->lock);
2439	del_from_avail_list(p);
 
 
 
 
 
 
2440	if (p->prio < 0) {
2441		struct swap_info_struct *si = p;
2442		int nid;
2443
2444		plist_for_each_entry_continue(si, &swap_active_head, list) {
2445			si->prio++;
2446			si->list.prio--;
2447			for_each_node(nid) {
2448				if (si->avail_lists[nid].prio != 1)
2449					si->avail_lists[nid].prio--;
2450			}
2451		}
2452		least_priority++;
2453	}
2454	plist_del(&p->list, &swap_active_head);
2455	atomic_long_sub(p->pages, &nr_swap_pages);
2456	total_swap_pages -= p->pages;
2457	p->flags &= ~SWP_WRITEOK;
2458	spin_unlock(&p->lock);
2459	spin_unlock(&swap_lock);
2460
2461	disable_swap_slots_cache_lock();
2462
2463	set_current_oom_origin();
2464	err = try_to_unuse(p->type);
2465	clear_current_oom_origin();
2466
2467	if (err) {
 
 
 
 
 
 
2468		/* re-insert swap space back into swap_list */
2469		reinsert_swap_info(p);
2470		reenable_swap_slots_cache_unlock();
2471		goto out_dput;
2472	}
2473
2474	reenable_swap_slots_cache_unlock();
2475
2476	/*
2477	 * Wait for swap operations protected by get/put_swap_device()
2478	 * to complete.
2479	 *
2480	 * We need synchronize_rcu() here to protect the accessing to
2481	 * the swap cache data structure.
2482	 */
2483	percpu_ref_kill(&p->users);
2484	synchronize_rcu();
2485	wait_for_completion(&p->comp);
2486
2487	flush_work(&p->discard_work);
2488
2489	destroy_swap_extents(p);
2490	if (p->flags & SWP_CONTINUED)
2491		free_swap_count_continuations(p);
2492
2493	if (!p->bdev || !bdev_nonrot(p->bdev))
2494		atomic_dec(&nr_rotate_swap);
2495
2496	mutex_lock(&swapon_mutex);
2497	spin_lock(&swap_lock);
2498	spin_lock(&p->lock);
2499	drain_mmlist();
2500
2501	/* wait for anyone still in scan_swap_map_slots */
2502	p->highest_bit = 0;		/* cuts scans short */
2503	while (p->flags >= SWP_SCANNING) {
2504		spin_unlock(&p->lock);
2505		spin_unlock(&swap_lock);
2506		schedule_timeout_uninterruptible(1);
2507		spin_lock(&swap_lock);
2508		spin_lock(&p->lock);
2509	}
2510
2511	swap_file = p->swap_file;
2512	old_block_size = p->old_block_size;
2513	p->swap_file = NULL;
2514	p->max = 0;
2515	swap_map = p->swap_map;
2516	p->swap_map = NULL;
2517	cluster_info = p->cluster_info;
2518	p->cluster_info = NULL;
2519	spin_unlock(&p->lock);
2520	spin_unlock(&swap_lock);
2521	arch_swap_invalidate_area(p->type);
2522	zswap_swapoff(p->type);
2523	mutex_unlock(&swapon_mutex);
2524	free_percpu(p->percpu_cluster);
2525	p->percpu_cluster = NULL;
2526	free_percpu(p->cluster_next_cpu);
2527	p->cluster_next_cpu = NULL;
2528	vfree(swap_map);
2529	kvfree(cluster_info);
2530	/* Destroy swap account information */
2531	swap_cgroup_swapoff(p->type);
2532	exit_swap_address_space(p->type);
2533
2534	inode = mapping->host;
2535	if (p->bdev_handle) {
2536		set_blocksize(p->bdev, old_block_size);
2537		bdev_release(p->bdev_handle);
2538		p->bdev_handle = NULL;
 
 
 
 
2539	}
2540
2541	inode_lock(inode);
2542	inode->i_flags &= ~S_SWAPFILE;
2543	inode_unlock(inode);
2544	filp_close(swap_file, NULL);
2545
2546	/*
2547	 * Clear the SWP_USED flag after all resources are freed so that swapon
2548	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2549	 * not hold p->lock after we cleared its SWP_WRITEOK.
2550	 */
2551	spin_lock(&swap_lock);
2552	p->flags = 0;
2553	spin_unlock(&swap_lock);
2554
2555	err = 0;
2556	atomic_inc(&proc_poll_event);
2557	wake_up_interruptible(&proc_poll_wait);
2558
2559out_dput:
2560	filp_close(victim, NULL);
2561out:
2562	putname(pathname);
2563	return err;
2564}
2565
2566#ifdef CONFIG_PROC_FS
2567static __poll_t swaps_poll(struct file *file, poll_table *wait)
2568{
2569	struct seq_file *seq = file->private_data;
2570
2571	poll_wait(file, &proc_poll_wait, wait);
2572
2573	if (seq->poll_event != atomic_read(&proc_poll_event)) {
2574		seq->poll_event = atomic_read(&proc_poll_event);
2575		return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2576	}
2577
2578	return EPOLLIN | EPOLLRDNORM;
2579}
2580
2581/* iterator */
2582static void *swap_start(struct seq_file *swap, loff_t *pos)
2583{
2584	struct swap_info_struct *si;
2585	int type;
2586	loff_t l = *pos;
2587
2588	mutex_lock(&swapon_mutex);
2589
2590	if (!l)
2591		return SEQ_START_TOKEN;
2592
2593	for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
 
 
2594		if (!(si->flags & SWP_USED) || !si->swap_map)
2595			continue;
2596		if (!--l)
2597			return si;
2598	}
2599
2600	return NULL;
2601}
2602
2603static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2604{
2605	struct swap_info_struct *si = v;
2606	int type;
2607
2608	if (v == SEQ_START_TOKEN)
2609		type = 0;
2610	else
2611		type = si->type + 1;
2612
2613	++(*pos);
2614	for (; (si = swap_type_to_swap_info(type)); type++) {
 
2615		if (!(si->flags & SWP_USED) || !si->swap_map)
2616			continue;
 
2617		return si;
2618	}
2619
2620	return NULL;
2621}
2622
2623static void swap_stop(struct seq_file *swap, void *v)
2624{
2625	mutex_unlock(&swapon_mutex);
2626}
2627
2628static int swap_show(struct seq_file *swap, void *v)
2629{
2630	struct swap_info_struct *si = v;
2631	struct file *file;
2632	int len;
2633	unsigned long bytes, inuse;
2634
2635	if (si == SEQ_START_TOKEN) {
2636		seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2637		return 0;
2638	}
2639
2640	bytes = K(si->pages);
2641	inuse = K(READ_ONCE(si->inuse_pages));
2642
2643	file = si->swap_file;
2644	len = seq_file_path(swap, file, " \t\n\\");
2645	seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n",
2646			len < 40 ? 40 - len : 1, " ",
2647			S_ISBLK(file_inode(file)->i_mode) ?
2648				"partition" : "file\t",
2649			bytes, bytes < 10000000 ? "\t" : "",
2650			inuse, inuse < 10000000 ? "\t" : "",
2651			si->prio);
2652	return 0;
2653}
2654
2655static const struct seq_operations swaps_op = {
2656	.start =	swap_start,
2657	.next =		swap_next,
2658	.stop =		swap_stop,
2659	.show =		swap_show
2660};
2661
2662static int swaps_open(struct inode *inode, struct file *file)
2663{
2664	struct seq_file *seq;
2665	int ret;
2666
2667	ret = seq_open(file, &swaps_op);
2668	if (ret)
2669		return ret;
2670
2671	seq = file->private_data;
2672	seq->poll_event = atomic_read(&proc_poll_event);
2673	return 0;
2674}
2675
2676static const struct proc_ops swaps_proc_ops = {
2677	.proc_flags	= PROC_ENTRY_PERMANENT,
2678	.proc_open	= swaps_open,
2679	.proc_read	= seq_read,
2680	.proc_lseek	= seq_lseek,
2681	.proc_release	= seq_release,
2682	.proc_poll	= swaps_poll,
2683};
2684
2685static int __init procswaps_init(void)
2686{
2687	proc_create("swaps", 0, NULL, &swaps_proc_ops);
2688	return 0;
2689}
2690__initcall(procswaps_init);
2691#endif /* CONFIG_PROC_FS */
2692
2693#ifdef MAX_SWAPFILES_CHECK
2694static int __init max_swapfiles_check(void)
2695{
2696	MAX_SWAPFILES_CHECK();
2697	return 0;
2698}
2699late_initcall(max_swapfiles_check);
2700#endif
2701
2702static struct swap_info_struct *alloc_swap_info(void)
2703{
2704	struct swap_info_struct *p;
2705	struct swap_info_struct *defer = NULL;
2706	unsigned int type;
2707	int i;
2708
2709	p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2710	if (!p)
2711		return ERR_PTR(-ENOMEM);
2712
2713	if (percpu_ref_init(&p->users, swap_users_ref_free,
2714			    PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
2715		kvfree(p);
2716		return ERR_PTR(-ENOMEM);
2717	}
2718
2719	spin_lock(&swap_lock);
2720	for (type = 0; type < nr_swapfiles; type++) {
2721		if (!(swap_info[type]->flags & SWP_USED))
2722			break;
2723	}
2724	if (type >= MAX_SWAPFILES) {
2725		spin_unlock(&swap_lock);
2726		percpu_ref_exit(&p->users);
2727		kvfree(p);
2728		return ERR_PTR(-EPERM);
2729	}
2730	if (type >= nr_swapfiles) {
2731		p->type = type;
 
2732		/*
2733		 * Publish the swap_info_struct after initializing it.
2734		 * Note that kvzalloc() above zeroes all its fields.
 
2735		 */
2736		smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
2737		nr_swapfiles++;
2738	} else {
2739		defer = p;
2740		p = swap_info[type];
2741		/*
2742		 * Do not memset this entry: a racing procfs swap_next()
2743		 * would be relying on p->type to remain valid.
2744		 */
2745	}
2746	p->swap_extent_root = RB_ROOT;
2747	plist_node_init(&p->list, 0);
2748	for_each_node(i)
2749		plist_node_init(&p->avail_lists[i], 0);
2750	p->flags = SWP_USED;
 
2751	spin_unlock(&swap_lock);
2752	if (defer) {
2753		percpu_ref_exit(&defer->users);
2754		kvfree(defer);
2755	}
2756	spin_lock_init(&p->lock);
2757	spin_lock_init(&p->cont_lock);
2758	init_completion(&p->comp);
2759
2760	return p;
2761}
2762
2763static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2764{
2765	int error;
2766
2767	if (S_ISBLK(inode->i_mode)) {
2768		p->bdev_handle = bdev_open_by_dev(inode->i_rdev,
2769				BLK_OPEN_READ | BLK_OPEN_WRITE, p, NULL);
2770		if (IS_ERR(p->bdev_handle)) {
2771			error = PTR_ERR(p->bdev_handle);
2772			p->bdev_handle = NULL;
2773			return error;
 
2774		}
2775		p->bdev = p->bdev_handle->bdev;
2776		p->old_block_size = block_size(p->bdev);
2777		error = set_blocksize(p->bdev, PAGE_SIZE);
2778		if (error < 0)
2779			return error;
2780		/*
2781		 * Zoned block devices contain zones that have a sequential
2782		 * write only restriction.  Hence zoned block devices are not
2783		 * suitable for swapping.  Disallow them here.
2784		 */
2785		if (bdev_is_zoned(p->bdev))
2786			return -EINVAL;
2787		p->flags |= SWP_BLKDEV;
2788	} else if (S_ISREG(inode->i_mode)) {
2789		p->bdev = inode->i_sb->s_bdev;
2790	}
 
 
 
 
2791
2792	return 0;
2793}
2794
2795
2796/*
2797 * Find out how many pages are allowed for a single swap device. There
2798 * are two limiting factors:
2799 * 1) the number of bits for the swap offset in the swp_entry_t type, and
2800 * 2) the number of bits in the swap pte, as defined by the different
2801 * architectures.
2802 *
2803 * In order to find the largest possible bit mask, a swap entry with
2804 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
2805 * decoded to a swp_entry_t again, and finally the swap offset is
2806 * extracted.
2807 *
2808 * This will mask all the bits from the initial ~0UL mask that can't
2809 * be encoded in either the swp_entry_t or the architecture definition
2810 * of a swap pte.
2811 */
2812unsigned long generic_max_swapfile_size(void)
2813{
2814	return swp_offset(pte_to_swp_entry(
2815			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2816}
2817
2818/* Can be overridden by an architecture for additional checks. */
2819__weak unsigned long arch_max_swapfile_size(void)
2820{
2821	return generic_max_swapfile_size();
2822}
2823
2824static unsigned long read_swap_header(struct swap_info_struct *p,
2825					union swap_header *swap_header,
2826					struct inode *inode)
2827{
2828	int i;
2829	unsigned long maxpages;
2830	unsigned long swapfilepages;
2831	unsigned long last_page;
2832
2833	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2834		pr_err("Unable to find swap-space signature\n");
2835		return 0;
2836	}
2837
2838	/* swap partition endianness hack... */
2839	if (swab32(swap_header->info.version) == 1) {
2840		swab32s(&swap_header->info.version);
2841		swab32s(&swap_header->info.last_page);
2842		swab32s(&swap_header->info.nr_badpages);
2843		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2844			return 0;
2845		for (i = 0; i < swap_header->info.nr_badpages; i++)
2846			swab32s(&swap_header->info.badpages[i]);
2847	}
2848	/* Check the swap header's sub-version */
2849	if (swap_header->info.version != 1) {
2850		pr_warn("Unable to handle swap header version %d\n",
2851			swap_header->info.version);
 
2852		return 0;
2853	}
2854
2855	p->lowest_bit  = 1;
2856	p->cluster_next = 1;
2857	p->cluster_nr = 0;
2858
2859	maxpages = swapfile_maximum_size;
2860	last_page = swap_header->info.last_page;
2861	if (!last_page) {
2862		pr_warn("Empty swap-file\n");
2863		return 0;
2864	}
2865	if (last_page > maxpages) {
2866		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2867			K(maxpages), K(last_page));
2868	}
2869	if (maxpages > last_page) {
2870		maxpages = last_page + 1;
 
 
 
 
 
 
 
 
 
 
2871		/* p->max is an unsigned int: don't overflow it */
2872		if ((unsigned int)maxpages == 0)
2873			maxpages = UINT_MAX;
2874	}
2875	p->highest_bit = maxpages - 1;
2876
2877	if (!maxpages)
2878		return 0;
2879	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2880	if (swapfilepages && maxpages > swapfilepages) {
2881		pr_warn("Swap area shorter than signature indicates\n");
 
2882		return 0;
2883	}
2884	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
2885		return 0;
2886	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2887		return 0;
2888
2889	return maxpages;
2890}
2891
2892#define SWAP_CLUSTER_INFO_COLS						\
2893	DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
2894#define SWAP_CLUSTER_SPACE_COLS						\
2895	DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
2896#define SWAP_CLUSTER_COLS						\
2897	max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
2898
2899static int setup_swap_map_and_extents(struct swap_info_struct *p,
2900					union swap_header *swap_header,
2901					unsigned char *swap_map,
2902					struct swap_cluster_info *cluster_info,
2903					unsigned long maxpages,
2904					sector_t *span)
2905{
2906	unsigned int j, k;
2907	unsigned int nr_good_pages;
2908	int nr_extents;
2909	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
2910	unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
2911	unsigned long i, idx;
2912
2913	nr_good_pages = maxpages - 1;	/* omit header page */
2914
2915	cluster_list_init(&p->free_clusters);
2916	cluster_list_init(&p->discard_clusters);
2917
2918	for (i = 0; i < swap_header->info.nr_badpages; i++) {
2919		unsigned int page_nr = swap_header->info.badpages[i];
2920		if (page_nr == 0 || page_nr > swap_header->info.last_page)
2921			return -EINVAL;
2922		if (page_nr < maxpages) {
2923			swap_map[page_nr] = SWAP_MAP_BAD;
2924			nr_good_pages--;
2925			/*
2926			 * Haven't marked the cluster free yet, no list
2927			 * operation involved
2928			 */
2929			inc_cluster_info_page(p, cluster_info, page_nr);
2930		}
2931	}
2932
2933	/* Haven't marked the cluster free yet, no list operation involved */
2934	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
2935		inc_cluster_info_page(p, cluster_info, i);
2936
2937	if (nr_good_pages) {
2938		swap_map[0] = SWAP_MAP_BAD;
2939		/*
2940		 * Not mark the cluster free yet, no list
2941		 * operation involved
2942		 */
2943		inc_cluster_info_page(p, cluster_info, 0);
2944		p->max = maxpages;
2945		p->pages = nr_good_pages;
2946		nr_extents = setup_swap_extents(p, span);
2947		if (nr_extents < 0)
2948			return nr_extents;
2949		nr_good_pages = p->pages;
2950	}
2951	if (!nr_good_pages) {
2952		pr_warn("Empty swap-file\n");
2953		return -EINVAL;
2954	}
2955
2956	if (!cluster_info)
2957		return nr_extents;
2958
2959
2960	/*
2961	 * Reduce false cache line sharing between cluster_info and
2962	 * sharing same address space.
2963	 */
2964	for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
2965		j = (k + col) % SWAP_CLUSTER_COLS;
2966		for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
2967			idx = i * SWAP_CLUSTER_COLS + j;
2968			if (idx >= nr_clusters)
2969				continue;
2970			if (cluster_count(&cluster_info[idx]))
2971				continue;
2972			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
2973			cluster_list_add_tail(&p->free_clusters, cluster_info,
2974					      idx);
2975		}
2976	}
2977	return nr_extents;
2978}
2979
2980SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2981{
2982	struct swap_info_struct *p;
2983	struct filename *name;
2984	struct file *swap_file = NULL;
2985	struct address_space *mapping;
2986	struct dentry *dentry;
2987	int prio;
2988	int error;
2989	union swap_header *swap_header;
2990	int nr_extents;
2991	sector_t span;
2992	unsigned long maxpages;
2993	unsigned char *swap_map = NULL;
2994	struct swap_cluster_info *cluster_info = NULL;
2995	struct page *page = NULL;
2996	struct inode *inode = NULL;
2997	bool inced_nr_rotate_swap = false;
2998
2999	if (swap_flags & ~SWAP_FLAGS_VALID)
3000		return -EINVAL;
3001
3002	if (!capable(CAP_SYS_ADMIN))
3003		return -EPERM;
3004
3005	if (!swap_avail_heads)
3006		return -ENOMEM;
3007
3008	p = alloc_swap_info();
3009	if (IS_ERR(p))
3010		return PTR_ERR(p);
3011
3012	INIT_WORK(&p->discard_work, swap_discard_work);
3013
3014	name = getname(specialfile);
3015	if (IS_ERR(name)) {
3016		error = PTR_ERR(name);
3017		name = NULL;
3018		goto bad_swap;
3019	}
3020	swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3021	if (IS_ERR(swap_file)) {
3022		error = PTR_ERR(swap_file);
3023		swap_file = NULL;
3024		goto bad_swap;
3025	}
3026
3027	p->swap_file = swap_file;
3028	mapping = swap_file->f_mapping;
3029	dentry = swap_file->f_path.dentry;
 
 
 
 
 
 
 
 
 
 
 
3030	inode = mapping->host;
3031
3032	error = claim_swapfile(p, inode);
3033	if (unlikely(error))
3034		goto bad_swap;
3035
3036	inode_lock(inode);
3037	if (d_unlinked(dentry) || cant_mount(dentry)) {
3038		error = -ENOENT;
3039		goto bad_swap_unlock_inode;
3040	}
3041	if (IS_SWAPFILE(inode)) {
3042		error = -EBUSY;
3043		goto bad_swap_unlock_inode;
3044	}
3045
3046	/*
3047	 * Read the swap header.
3048	 */
3049	if (!mapping->a_ops->read_folio) {
3050		error = -EINVAL;
3051		goto bad_swap_unlock_inode;
3052	}
3053	page = read_mapping_page(mapping, 0, swap_file);
3054	if (IS_ERR(page)) {
3055		error = PTR_ERR(page);
3056		goto bad_swap_unlock_inode;
3057	}
3058	swap_header = kmap(page);
3059
3060	maxpages = read_swap_header(p, swap_header, inode);
3061	if (unlikely(!maxpages)) {
3062		error = -EINVAL;
3063		goto bad_swap_unlock_inode;
3064	}
3065
3066	/* OK, set up the swap map and apply the bad block list */
3067	swap_map = vzalloc(maxpages);
3068	if (!swap_map) {
3069		error = -ENOMEM;
3070		goto bad_swap_unlock_inode;
3071	}
3072
3073	if (p->bdev && bdev_stable_writes(p->bdev))
3074		p->flags |= SWP_STABLE_WRITES;
3075
3076	if (p->bdev && bdev_synchronous(p->bdev))
3077		p->flags |= SWP_SYNCHRONOUS_IO;
3078
3079	if (p->bdev && bdev_nonrot(p->bdev)) {
3080		int cpu;
3081		unsigned long ci, nr_cluster;
3082
3083		p->flags |= SWP_SOLIDSTATE;
3084		p->cluster_next_cpu = alloc_percpu(unsigned int);
3085		if (!p->cluster_next_cpu) {
3086			error = -ENOMEM;
3087			goto bad_swap_unlock_inode;
3088		}
3089		/*
3090		 * select a random position to start with to help wear leveling
3091		 * SSD
3092		 */
3093		for_each_possible_cpu(cpu) {
3094			per_cpu(*p->cluster_next_cpu, cpu) =
3095				get_random_u32_inclusive(1, p->highest_bit);
3096		}
3097		nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3098
3099		cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
3100					GFP_KERNEL);
3101		if (!cluster_info) {
3102			error = -ENOMEM;
3103			goto bad_swap_unlock_inode;
3104		}
3105
3106		for (ci = 0; ci < nr_cluster; ci++)
3107			spin_lock_init(&((cluster_info + ci)->lock));
3108
3109		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3110		if (!p->percpu_cluster) {
3111			error = -ENOMEM;
3112			goto bad_swap_unlock_inode;
3113		}
3114		for_each_possible_cpu(cpu) {
3115			struct percpu_cluster *cluster;
3116			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3117			cluster_set_null(&cluster->index);
3118		}
3119	} else {
3120		atomic_inc(&nr_rotate_swap);
3121		inced_nr_rotate_swap = true;
3122	}
3123
3124	error = swap_cgroup_swapon(p->type, maxpages);
3125	if (error)
3126		goto bad_swap_unlock_inode;
3127
3128	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3129		cluster_info, maxpages, &span);
3130	if (unlikely(nr_extents < 0)) {
3131		error = nr_extents;
3132		goto bad_swap_unlock_inode;
3133	}
3134
3135	if ((swap_flags & SWAP_FLAG_DISCARD) &&
3136	    p->bdev && bdev_max_discard_sectors(p->bdev)) {
3137		/*
3138		 * When discard is enabled for swap with no particular
3139		 * policy flagged, we set all swap discard flags here in
3140		 * order to sustain backward compatibility with older
3141		 * swapon(8) releases.
3142		 */
3143		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3144			     SWP_PAGE_DISCARD);
3145
3146		/*
3147		 * By flagging sys_swapon, a sysadmin can tell us to
3148		 * either do single-time area discards only, or to just
3149		 * perform discards for released swap page-clusters.
3150		 * Now it's time to adjust the p->flags accordingly.
3151		 */
3152		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3153			p->flags &= ~SWP_PAGE_DISCARD;
3154		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3155			p->flags &= ~SWP_AREA_DISCARD;
3156
3157		/* issue a swapon-time discard if it's still required */
3158		if (p->flags & SWP_AREA_DISCARD) {
3159			int err = discard_swap(p);
3160			if (unlikely(err))
3161				pr_err("swapon: discard_swap(%p): %d\n",
3162					p, err);
3163		}
3164	}
3165
3166	error = init_swap_address_space(p->type, maxpages);
3167	if (error)
3168		goto bad_swap_unlock_inode;
3169
3170	/*
3171	 * Flush any pending IO and dirty mappings before we start using this
3172	 * swap device.
3173	 */
3174	inode->i_flags |= S_SWAPFILE;
3175	error = inode_drain_writes(inode);
3176	if (error) {
3177		inode->i_flags &= ~S_SWAPFILE;
3178		goto free_swap_address_space;
3179	}
3180
3181	mutex_lock(&swapon_mutex);
3182	prio = -1;
3183	if (swap_flags & SWAP_FLAG_PREFER)
3184		prio =
3185		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3186	enable_swap_info(p, prio, swap_map, cluster_info);
3187
3188	pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s\n",
3189		K(p->pages), name->name, p->prio, nr_extents,
3190		K((unsigned long long)span),
 
3191		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3192		(p->flags & SWP_DISCARDABLE) ? "D" : "",
3193		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
3194		(p->flags & SWP_PAGE_DISCARD) ? "c" : "");
3195
3196	mutex_unlock(&swapon_mutex);
3197	atomic_inc(&proc_poll_event);
3198	wake_up_interruptible(&proc_poll_wait);
3199
 
 
3200	error = 0;
3201	goto out;
3202free_swap_address_space:
3203	exit_swap_address_space(p->type);
3204bad_swap_unlock_inode:
3205	inode_unlock(inode);
3206bad_swap:
3207	free_percpu(p->percpu_cluster);
3208	p->percpu_cluster = NULL;
3209	free_percpu(p->cluster_next_cpu);
3210	p->cluster_next_cpu = NULL;
3211	if (p->bdev_handle) {
3212		set_blocksize(p->bdev, p->old_block_size);
3213		bdev_release(p->bdev_handle);
3214		p->bdev_handle = NULL;
3215	}
3216	inode = NULL;
3217	destroy_swap_extents(p);
3218	swap_cgroup_swapoff(p->type);
3219	spin_lock(&swap_lock);
3220	p->swap_file = NULL;
3221	p->flags = 0;
3222	spin_unlock(&swap_lock);
3223	vfree(swap_map);
3224	kvfree(cluster_info);
3225	if (inced_nr_rotate_swap)
3226		atomic_dec(&nr_rotate_swap);
3227	if (swap_file)
 
3228		filp_close(swap_file, NULL);
 
3229out:
3230	if (page && !IS_ERR(page)) {
3231		kunmap(page);
3232		put_page(page);
3233	}
3234	if (name)
3235		putname(name);
3236	if (inode)
3237		inode_unlock(inode);
3238	if (!error)
3239		enable_swap_slots_cache();
3240	return error;
3241}
3242
3243void si_swapinfo(struct sysinfo *val)
3244{
3245	unsigned int type;
3246	unsigned long nr_to_be_unused = 0;
3247
3248	spin_lock(&swap_lock);
3249	for (type = 0; type < nr_swapfiles; type++) {
3250		struct swap_info_struct *si = swap_info[type];
3251
3252		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3253			nr_to_be_unused += READ_ONCE(si->inuse_pages);
3254	}
3255	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3256	val->totalswap = total_swap_pages + nr_to_be_unused;
3257	spin_unlock(&swap_lock);
3258}
3259
3260/*
3261 * Verify that a swap entry is valid and increment its swap map count.
3262 *
3263 * Returns error code in following case.
3264 * - success -> 0
3265 * - swp_entry is invalid -> EINVAL
3266 * - swp_entry is migration entry -> EINVAL
3267 * - swap-cache reference is requested but there is already one. -> EEXIST
3268 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3269 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3270 */
3271static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3272{
3273	struct swap_info_struct *p;
3274	struct swap_cluster_info *ci;
3275	unsigned long offset;
3276	unsigned char count;
3277	unsigned char has_cache;
3278	int err;
3279
3280	p = swp_swap_info(entry);
 
3281
 
 
 
 
3282	offset = swp_offset(entry);
3283	ci = lock_cluster_or_swap_info(p, offset);
3284
3285	count = p->swap_map[offset];
3286
3287	/*
3288	 * swapin_readahead() doesn't check if a swap entry is valid, so the
3289	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3290	 */
3291	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3292		err = -ENOENT;
3293		goto unlock_out;
3294	}
3295
 
3296	has_cache = count & SWAP_HAS_CACHE;
3297	count &= ~SWAP_HAS_CACHE;
3298	err = 0;
3299
3300	if (usage == SWAP_HAS_CACHE) {
3301
3302		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
3303		if (!has_cache && count)
3304			has_cache = SWAP_HAS_CACHE;
3305		else if (has_cache)		/* someone else added cache */
3306			err = -EEXIST;
3307		else				/* no users remaining */
3308			err = -ENOENT;
3309
3310	} else if (count || has_cache) {
3311
3312		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3313			count += usage;
3314		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3315			err = -EINVAL;
3316		else if (swap_count_continued(p, offset, count))
3317			count = COUNT_CONTINUED;
3318		else
3319			err = -ENOMEM;
3320	} else
3321		err = -ENOENT;			/* unused swap entry */
3322
3323	WRITE_ONCE(p->swap_map[offset], count | has_cache);
3324
3325unlock_out:
3326	unlock_cluster_or_swap_info(p, ci);
 
3327	return err;
 
 
 
 
3328}
3329
3330/*
3331 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3332 * (in which case its reference count is never incremented).
3333 */
3334void swap_shmem_alloc(swp_entry_t entry)
3335{
3336	__swap_duplicate(entry, SWAP_MAP_SHMEM);
3337}
3338
3339/*
3340 * Increase reference count of swap entry by 1.
3341 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3342 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3343 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3344 * might occur if a page table entry has got corrupted.
3345 */
3346int swap_duplicate(swp_entry_t entry)
3347{
3348	int err = 0;
3349
3350	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3351		err = add_swap_count_continuation(entry, GFP_ATOMIC);
3352	return err;
3353}
3354
3355/*
3356 * @entry: swap entry for which we allocate swap cache.
3357 *
3358 * Called when allocating swap cache for existing swap entry,
3359 * This can return error codes. Returns 0 at success.
3360 * -EEXIST means there is a swap cache.
3361 * Note: return code is different from swap_duplicate().
3362 */
3363int swapcache_prepare(swp_entry_t entry)
3364{
3365	return __swap_duplicate(entry, SWAP_HAS_CACHE);
3366}
3367
3368void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
 
 
 
 
3369{
3370	struct swap_cluster_info *ci;
3371	unsigned long offset = swp_offset(entry);
3372	unsigned char usage;
 
 
 
 
 
3373
3374	ci = lock_cluster_or_swap_info(si, offset);
3375	usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
3376	unlock_cluster_or_swap_info(si, ci);
3377	if (!usage)
3378		free_swap_slot(entry);
3379}
3380
3381struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3382{
3383	return swap_type_to_swap_info(swp_type(entry));
3384}
3385
3386/*
3387 * out-of-line methods to avoid include hell.
3388 */
3389struct address_space *swapcache_mapping(struct folio *folio)
3390{
3391	return swp_swap_info(folio->swap)->swap_file->f_mapping;
3392}
3393EXPORT_SYMBOL_GPL(swapcache_mapping);
 
 
 
 
 
 
 
 
 
3394
3395pgoff_t __page_file_index(struct page *page)
3396{
3397	swp_entry_t swap = page_swap_entry(page);
3398	return swp_offset(swap);
 
 
3399}
3400EXPORT_SYMBOL_GPL(__page_file_index);
3401
3402/*
3403 * add_swap_count_continuation - called when a swap count is duplicated
3404 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3405 * page of the original vmalloc'ed swap_map, to hold the continuation count
3406 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3407 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3408 *
3409 * These continuation pages are seldom referenced: the common paths all work
3410 * on the original swap_map, only referring to a continuation page when the
3411 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3412 *
3413 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3414 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3415 * can be called after dropping locks.
3416 */
3417int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3418{
3419	struct swap_info_struct *si;
3420	struct swap_cluster_info *ci;
3421	struct page *head;
3422	struct page *page;
3423	struct page *list_page;
3424	pgoff_t offset;
3425	unsigned char count;
3426	int ret = 0;
3427
3428	/*
3429	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3430	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3431	 */
3432	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3433
3434	si = get_swap_device(entry);
3435	if (!si) {
3436		/*
3437		 * An acceptable race has occurred since the failing
3438		 * __swap_duplicate(): the swap device may be swapoff
 
3439		 */
3440		goto outer;
3441	}
3442	spin_lock(&si->lock);
3443
3444	offset = swp_offset(entry);
3445
3446	ci = lock_cluster(si, offset);
3447
3448	count = swap_count(si->swap_map[offset]);
3449
3450	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3451		/*
3452		 * The higher the swap count, the more likely it is that tasks
3453		 * will race to add swap count continuation: we need to avoid
3454		 * over-provisioning.
3455		 */
3456		goto out;
3457	}
3458
3459	if (!page) {
3460		ret = -ENOMEM;
3461		goto out;
3462	}
3463
 
 
 
 
 
3464	head = vmalloc_to_page(si->swap_map + offset);
3465	offset &= ~PAGE_MASK;
3466
3467	spin_lock(&si->cont_lock);
3468	/*
3469	 * Page allocation does not initialize the page's lru field,
3470	 * but it does always reset its private field.
3471	 */
3472	if (!page_private(head)) {
3473		BUG_ON(count & COUNT_CONTINUED);
3474		INIT_LIST_HEAD(&head->lru);
3475		set_page_private(head, SWP_CONTINUED);
3476		si->flags |= SWP_CONTINUED;
3477	}
3478
3479	list_for_each_entry(list_page, &head->lru, lru) {
3480		unsigned char *map;
3481
3482		/*
3483		 * If the previous map said no continuation, but we've found
3484		 * a continuation page, free our allocation and use this one.
3485		 */
3486		if (!(count & COUNT_CONTINUED))
3487			goto out_unlock_cont;
3488
3489		map = kmap_local_page(list_page) + offset;
3490		count = *map;
3491		kunmap_local(map);
3492
3493		/*
3494		 * If this continuation count now has some space in it,
3495		 * free our allocation and use this one.
3496		 */
3497		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3498			goto out_unlock_cont;
3499	}
3500
3501	list_add_tail(&page->lru, &head->lru);
3502	page = NULL;			/* now it's attached, don't free it */
3503out_unlock_cont:
3504	spin_unlock(&si->cont_lock);
3505out:
3506	unlock_cluster(ci);
3507	spin_unlock(&si->lock);
3508	put_swap_device(si);
3509outer:
3510	if (page)
3511		__free_page(page);
3512	return ret;
3513}
3514
3515/*
3516 * swap_count_continued - when the original swap_map count is incremented
3517 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3518 * into, carry if so, or else fail until a new continuation page is allocated;
3519 * when the original swap_map count is decremented from 0 with continuation,
3520 * borrow from the continuation and report whether it still holds more.
3521 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3522 * lock.
3523 */
3524static bool swap_count_continued(struct swap_info_struct *si,
3525				 pgoff_t offset, unsigned char count)
3526{
3527	struct page *head;
3528	struct page *page;
3529	unsigned char *map;
3530	bool ret;
3531
3532	head = vmalloc_to_page(si->swap_map + offset);
3533	if (page_private(head) != SWP_CONTINUED) {
3534		BUG_ON(count & COUNT_CONTINUED);
3535		return false;		/* need to add count continuation */
3536	}
3537
3538	spin_lock(&si->cont_lock);
3539	offset &= ~PAGE_MASK;
3540	page = list_next_entry(head, lru);
3541	map = kmap_local_page(page) + offset;
3542
3543	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
3544		goto init_map;		/* jump over SWAP_CONT_MAX checks */
3545
3546	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3547		/*
3548		 * Think of how you add 1 to 999
3549		 */
3550		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3551			kunmap_local(map);
3552			page = list_next_entry(page, lru);
3553			BUG_ON(page == head);
3554			map = kmap_local_page(page) + offset;
3555		}
3556		if (*map == SWAP_CONT_MAX) {
3557			kunmap_local(map);
3558			page = list_next_entry(page, lru);
3559			if (page == head) {
3560				ret = false;	/* add count continuation */
3561				goto out;
3562			}
3563			map = kmap_local_page(page) + offset;
3564init_map:		*map = 0;		/* we didn't zero the page */
3565		}
3566		*map += 1;
3567		kunmap_local(map);
3568		while ((page = list_prev_entry(page, lru)) != head) {
3569			map = kmap_local_page(page) + offset;
 
3570			*map = COUNT_CONTINUED;
3571			kunmap_local(map);
 
3572		}
3573		ret = true;			/* incremented */
3574
3575	} else {				/* decrementing */
3576		/*
3577		 * Think of how you subtract 1 from 1000
3578		 */
3579		BUG_ON(count != COUNT_CONTINUED);
3580		while (*map == COUNT_CONTINUED) {
3581			kunmap_local(map);
3582			page = list_next_entry(page, lru);
3583			BUG_ON(page == head);
3584			map = kmap_local_page(page) + offset;
3585		}
3586		BUG_ON(*map == 0);
3587		*map -= 1;
3588		if (*map == 0)
3589			count = 0;
3590		kunmap_local(map);
3591		while ((page = list_prev_entry(page, lru)) != head) {
3592			map = kmap_local_page(page) + offset;
 
3593			*map = SWAP_CONT_MAX | count;
3594			count = COUNT_CONTINUED;
3595			kunmap_local(map);
 
3596		}
3597		ret = count == COUNT_CONTINUED;
3598	}
3599out:
3600	spin_unlock(&si->cont_lock);
3601	return ret;
3602}
3603
3604/*
3605 * free_swap_count_continuations - swapoff free all the continuation pages
3606 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3607 */
3608static void free_swap_count_continuations(struct swap_info_struct *si)
3609{
3610	pgoff_t offset;
3611
3612	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3613		struct page *head;
3614		head = vmalloc_to_page(si->swap_map + offset);
3615		if (page_private(head)) {
3616			struct page *page, *next;
3617
3618			list_for_each_entry_safe(page, next, &head->lru, lru) {
3619				list_del(&page->lru);
 
3620				__free_page(page);
3621			}
3622		}
3623	}
3624}
3625
3626#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3627void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
3628{
3629	struct swap_info_struct *si, *next;
3630	int nid = folio_nid(folio);
3631
3632	if (!(gfp & __GFP_IO))
3633		return;
3634
3635	if (!blk_cgroup_congested())
3636		return;
3637
3638	/*
3639	 * We've already scheduled a throttle, avoid taking the global swap
3640	 * lock.
3641	 */
3642	if (current->throttle_disk)
3643		return;
3644
3645	spin_lock(&swap_avail_lock);
3646	plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3647				  avail_lists[nid]) {
3648		if (si->bdev) {
3649			blkcg_schedule_throttle(si->bdev->bd_disk, true);
3650			break;
3651		}
3652	}
3653	spin_unlock(&swap_avail_lock);
3654}
3655#endif
3656
3657static int __init swapfile_init(void)
3658{
3659	int nid;
3660
3661	swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3662					 GFP_KERNEL);
3663	if (!swap_avail_heads) {
3664		pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3665		return -ENOMEM;
3666	}
3667
3668	for_each_node(nid)
3669		plist_head_init(&swap_avail_heads[nid]);
3670
3671	swapfile_maximum_size = arch_max_swapfile_size();
3672
3673#ifdef CONFIG_MIGRATION
3674	if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
3675		swap_migration_ad_supported = true;
3676#endif	/* CONFIG_MIGRATION */
3677
3678	return 0;
3679}
3680subsys_initcall(swapfile_init);