Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *  linux/mm/swapfile.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/hugetlb.h>
  10#include <linux/mman.h>
  11#include <linux/slab.h>
  12#include <linux/kernel_stat.h>
  13#include <linux/swap.h>
  14#include <linux/vmalloc.h>
  15#include <linux/pagemap.h>
  16#include <linux/namei.h>
  17#include <linux/shmem_fs.h>
  18#include <linux/blkdev.h>
  19#include <linux/random.h>
  20#include <linux/writeback.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/init.h>
  24#include <linux/module.h>
  25#include <linux/ksm.h>
  26#include <linux/rmap.h>
  27#include <linux/security.h>
  28#include <linux/backing-dev.h>
  29#include <linux/mutex.h>
  30#include <linux/capability.h>
  31#include <linux/syscalls.h>
  32#include <linux/memcontrol.h>
  33#include <linux/poll.h>
  34#include <linux/oom.h>
 
 
 
  35
  36#include <asm/pgtable.h>
  37#include <asm/tlbflush.h>
  38#include <linux/swapops.h>
  39#include <linux/page_cgroup.h>
  40
  41static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  42				 unsigned char);
  43static void free_swap_count_continuations(struct swap_info_struct *);
  44static sector_t map_swap_entry(swp_entry_t, struct block_device**);
  45
  46static DEFINE_SPINLOCK(swap_lock);
  47static unsigned int nr_swapfiles;
  48long nr_swap_pages;
 
  49long total_swap_pages;
  50static int least_priority;
 
  51
  52static const char Bad_file[] = "Bad swap file entry ";
  53static const char Unused_file[] = "Unused swap file entry ";
  54static const char Bad_offset[] = "Bad swap offset entry ";
  55static const char Unused_offset[] = "Unused swap offset entry ";
  56
  57static struct swap_list_t swap_list = {-1, -1};
  58
  59static struct swap_info_struct *swap_info[MAX_SWAPFILES];
  60
  61static DEFINE_MUTEX(swapon_mutex);
  62
  63static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  64/* Activity counter to indicate that a swapon or swapoff has occurred */
  65static atomic_t proc_poll_event = ATOMIC_INIT(0);
  66
  67static inline unsigned char swap_count(unsigned char ent)
  68{
  69	return ent & ~SWAP_HAS_CACHE;	/* may include SWAP_HAS_CONT flag */
  70}
  71
  72/* returns 1 if swap entry is freed */
  73static int
  74__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
  75{
  76	swp_entry_t entry = swp_entry(si->type, offset);
  77	struct page *page;
  78	int ret = 0;
  79
  80	page = find_get_page(&swapper_space, entry.val);
  81	if (!page)
  82		return 0;
  83	/*
  84	 * This function is called from scan_swap_map() and it's called
  85	 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
  86	 * We have to use trylock for avoiding deadlock. This is a special
  87	 * case and you should use try_to_free_swap() with explicit lock_page()
  88	 * in usual operations.
  89	 */
  90	if (trylock_page(page)) {
  91		ret = try_to_free_swap(page);
  92		unlock_page(page);
  93	}
  94	page_cache_release(page);
  95	return ret;
  96}
  97
  98/*
  99 * swapon tell device that all the old swap contents can be discarded,
 100 * to allow the swap device to optimize its wear-levelling.
 101 */
 102static int discard_swap(struct swap_info_struct *si)
 103{
 104	struct swap_extent *se;
 105	sector_t start_block;
 106	sector_t nr_blocks;
 107	int err = 0;
 108
 109	/* Do not discard the swap header page! */
 110	se = &si->first_swap_extent;
 111	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 112	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 113	if (nr_blocks) {
 114		err = blkdev_issue_discard(si->bdev, start_block,
 115				nr_blocks, GFP_KERNEL, 0);
 116		if (err)
 117			return err;
 118		cond_resched();
 119	}
 120
 121	list_for_each_entry(se, &si->first_swap_extent.list, list) {
 122		start_block = se->start_block << (PAGE_SHIFT - 9);
 123		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 124
 125		err = blkdev_issue_discard(si->bdev, start_block,
 126				nr_blocks, GFP_KERNEL, 0);
 127		if (err)
 128			break;
 129
 130		cond_resched();
 131	}
 132	return err;		/* That will often be -EOPNOTSUPP */
 133}
 134
 135/*
 136 * swap allocation tell device that a cluster of swap can now be discarded,
 137 * to allow the swap device to optimize its wear-levelling.
 138 */
 139static void discard_swap_cluster(struct swap_info_struct *si,
 140				 pgoff_t start_page, pgoff_t nr_pages)
 141{
 142	struct swap_extent *se = si->curr_swap_extent;
 143	int found_extent = 0;
 144
 145	while (nr_pages) {
 146		struct list_head *lh;
 147
 148		if (se->start_page <= start_page &&
 149		    start_page < se->start_page + se->nr_pages) {
 150			pgoff_t offset = start_page - se->start_page;
 151			sector_t start_block = se->start_block + offset;
 152			sector_t nr_blocks = se->nr_pages - offset;
 153
 154			if (nr_blocks > nr_pages)
 155				nr_blocks = nr_pages;
 156			start_page += nr_blocks;
 157			nr_pages -= nr_blocks;
 158
 159			if (!found_extent++)
 160				si->curr_swap_extent = se;
 161
 162			start_block <<= PAGE_SHIFT - 9;
 163			nr_blocks <<= PAGE_SHIFT - 9;
 164			if (blkdev_issue_discard(si->bdev, start_block,
 165				    nr_blocks, GFP_NOIO, 0))
 166				break;
 167		}
 168
 169		lh = se->list.next;
 170		se = list_entry(lh, struct swap_extent, list);
 171	}
 172}
 173
 174static int wait_for_discard(void *word)
 
 
 
 
 175{
 176	schedule();
 177	return 0;
 178}
 179
 180#define SWAPFILE_CLUSTER	256
 181#define LATENCY_LIMIT		256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 182
 183static unsigned long scan_swap_map(struct swap_info_struct *si,
 184				   unsigned char usage)
 185{
 186	unsigned long offset;
 187	unsigned long scan_base;
 188	unsigned long last_in_cluster = 0;
 189	int latency_ration = LATENCY_LIMIT;
 190	int found_free_cluster = 0;
 191
 192	/*
 193	 * We try to cluster swap pages by allocating them sequentially
 194	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 195	 * way, however, we resort to first-free allocation, starting
 196	 * a new cluster.  This prevents us from scattering swap pages
 197	 * all over the entire swap partition, so that we reduce
 198	 * overall disk seek times between swap pages.  -- sct
 199	 * But we do now try to find an empty cluster.  -Andrea
 200	 * And we let swap pages go all over an SSD partition.  Hugh
 201	 */
 202
 203	si->flags += SWP_SCANNING;
 204	scan_base = offset = si->cluster_next;
 205
 
 
 
 
 
 
 206	if (unlikely(!si->cluster_nr--)) {
 207		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 208			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 209			goto checks;
 210		}
 211		if (si->flags & SWP_DISCARDABLE) {
 212			/*
 213			 * Start range check on racing allocations, in case
 214			 * they overlap the cluster we eventually decide on
 215			 * (we scan without swap_lock to allow preemption).
 216			 * It's hardly conceivable that cluster_nr could be
 217			 * wrapped during our scan, but don't depend on it.
 218			 */
 219			if (si->lowest_alloc)
 220				goto checks;
 221			si->lowest_alloc = si->max;
 222			si->highest_alloc = 0;
 223		}
 224		spin_unlock(&swap_lock);
 225
 226		/*
 227		 * If seek is expensive, start searching for new cluster from
 228		 * start of partition, to minimize the span of allocated swap.
 229		 * But if seek is cheap, search from our current position, so
 230		 * that swap is allocated from all over the partition: if the
 231		 * Flash Translation Layer only remaps within limited zones,
 232		 * we don't want to wear out the first zone too quickly.
 233		 */
 234		if (!(si->flags & SWP_SOLIDSTATE))
 235			scan_base = offset = si->lowest_bit;
 236		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 237
 238		/* Locate the first empty (unaligned) cluster */
 239		for (; last_in_cluster <= si->highest_bit; offset++) {
 240			if (si->swap_map[offset])
 241				last_in_cluster = offset + SWAPFILE_CLUSTER;
 242			else if (offset == last_in_cluster) {
 243				spin_lock(&swap_lock);
 244				offset -= SWAPFILE_CLUSTER - 1;
 245				si->cluster_next = offset;
 246				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 247				found_free_cluster = 1;
 248				goto checks;
 249			}
 250			if (unlikely(--latency_ration < 0)) {
 251				cond_resched();
 252				latency_ration = LATENCY_LIMIT;
 253			}
 254		}
 255
 256		offset = si->lowest_bit;
 257		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 258
 259		/* Locate the first empty (unaligned) cluster */
 260		for (; last_in_cluster < scan_base; offset++) {
 261			if (si->swap_map[offset])
 262				last_in_cluster = offset + SWAPFILE_CLUSTER;
 263			else if (offset == last_in_cluster) {
 264				spin_lock(&swap_lock);
 265				offset -= SWAPFILE_CLUSTER - 1;
 266				si->cluster_next = offset;
 267				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 268				found_free_cluster = 1;
 269				goto checks;
 270			}
 271			if (unlikely(--latency_ration < 0)) {
 272				cond_resched();
 273				latency_ration = LATENCY_LIMIT;
 274			}
 275		}
 276
 277		offset = scan_base;
 278		spin_lock(&swap_lock);
 279		si->cluster_nr = SWAPFILE_CLUSTER - 1;
 280		si->lowest_alloc = 0;
 281	}
 282
 283checks:
 
 
 
 
 284	if (!(si->flags & SWP_WRITEOK))
 285		goto no_page;
 286	if (!si->highest_bit)
 287		goto no_page;
 288	if (offset > si->highest_bit)
 289		scan_base = offset = si->lowest_bit;
 290
 291	/* reuse swap entry of cache-only swap if not busy. */
 292	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 293		int swap_was_freed;
 294		spin_unlock(&swap_lock);
 295		swap_was_freed = __try_to_reclaim_swap(si, offset);
 296		spin_lock(&swap_lock);
 297		/* entry was freed successfully, try to use this again */
 298		if (swap_was_freed)
 299			goto checks;
 300		goto scan; /* check next one */
 301	}
 302
 303	if (si->swap_map[offset])
 304		goto scan;
 305
 306	if (offset == si->lowest_bit)
 307		si->lowest_bit++;
 308	if (offset == si->highest_bit)
 309		si->highest_bit--;
 310	si->inuse_pages++;
 311	if (si->inuse_pages == si->pages) {
 312		si->lowest_bit = si->max;
 313		si->highest_bit = 0;
 314	}
 315	si->swap_map[offset] = usage;
 
 316	si->cluster_next = offset + 1;
 317	si->flags -= SWP_SCANNING;
 318
 319	if (si->lowest_alloc) {
 320		/*
 321		 * Only set when SWP_DISCARDABLE, and there's a scan
 322		 * for a free cluster in progress or just completed.
 323		 */
 324		if (found_free_cluster) {
 325			/*
 326			 * To optimize wear-levelling, discard the
 327			 * old data of the cluster, taking care not to
 328			 * discard any of its pages that have already
 329			 * been allocated by racing tasks (offset has
 330			 * already stepped over any at the beginning).
 331			 */
 332			if (offset < si->highest_alloc &&
 333			    si->lowest_alloc <= last_in_cluster)
 334				last_in_cluster = si->lowest_alloc - 1;
 335			si->flags |= SWP_DISCARDING;
 336			spin_unlock(&swap_lock);
 337
 338			if (offset < last_in_cluster)
 339				discard_swap_cluster(si, offset,
 340					last_in_cluster - offset + 1);
 341
 342			spin_lock(&swap_lock);
 343			si->lowest_alloc = 0;
 344			si->flags &= ~SWP_DISCARDING;
 345
 346			smp_mb();	/* wake_up_bit advises this */
 347			wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
 348
 349		} else if (si->flags & SWP_DISCARDING) {
 350			/*
 351			 * Delay using pages allocated by racing tasks
 352			 * until the whole discard has been issued. We
 353			 * could defer that delay until swap_writepage,
 354			 * but it's easier to keep this self-contained.
 355			 */
 356			spin_unlock(&swap_lock);
 357			wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
 358				wait_for_discard, TASK_UNINTERRUPTIBLE);
 359			spin_lock(&swap_lock);
 360		} else {
 361			/*
 362			 * Note pages allocated by racing tasks while
 363			 * scan for a free cluster is in progress, so
 364			 * that its final discard can exclude them.
 365			 */
 366			if (offset < si->lowest_alloc)
 367				si->lowest_alloc = offset;
 368			if (offset > si->highest_alloc)
 369				si->highest_alloc = offset;
 370		}
 371	}
 372	return offset;
 373
 374scan:
 375	spin_unlock(&swap_lock);
 376	while (++offset <= si->highest_bit) {
 377		if (!si->swap_map[offset]) {
 378			spin_lock(&swap_lock);
 379			goto checks;
 380		}
 381		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 382			spin_lock(&swap_lock);
 383			goto checks;
 384		}
 385		if (unlikely(--latency_ration < 0)) {
 386			cond_resched();
 387			latency_ration = LATENCY_LIMIT;
 388		}
 389	}
 390	offset = si->lowest_bit;
 391	while (++offset < scan_base) {
 392		if (!si->swap_map[offset]) {
 393			spin_lock(&swap_lock);
 394			goto checks;
 395		}
 396		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 397			spin_lock(&swap_lock);
 398			goto checks;
 399		}
 400		if (unlikely(--latency_ration < 0)) {
 401			cond_resched();
 402			latency_ration = LATENCY_LIMIT;
 403		}
 
 404	}
 405	spin_lock(&swap_lock);
 406
 407no_page:
 408	si->flags -= SWP_SCANNING;
 409	return 0;
 410}
 411
 412swp_entry_t get_swap_page(void)
 413{
 414	struct swap_info_struct *si;
 415	pgoff_t offset;
 416	int type, next;
 417	int wrapped = 0;
 
 418
 419	spin_lock(&swap_lock);
 420	if (nr_swap_pages <= 0)
 421		goto noswap;
 422	nr_swap_pages--;
 423
 424	for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 425		si = swap_info[type];
 426		next = si->next;
 427		if (next < 0 ||
 428		    (!wrapped && si->prio != swap_info[next]->prio)) {
 429			next = swap_list.head;
 430			wrapped++;
 431		}
 432
 433		if (!si->highest_bit)
 
 
 434			continue;
 435		if (!(si->flags & SWP_WRITEOK))
 
 
 436			continue;
 
 437
 438		swap_list.next = next;
 
 
 439		/* This is called for allocating swap entry for cache */
 440		offset = scan_swap_map(si, SWAP_HAS_CACHE);
 441		if (offset) {
 442			spin_unlock(&swap_lock);
 443			return swp_entry(type, offset);
 444		}
 445		next = swap_list.next;
 446	}
 447
 448	nr_swap_pages++;
 449noswap:
 450	spin_unlock(&swap_lock);
 451	return (swp_entry_t) {0};
 452}
 453
 454/* The only caller of this function is now susupend routine */
 455swp_entry_t get_swap_page_of_type(int type)
 456{
 457	struct swap_info_struct *si;
 458	pgoff_t offset;
 459
 460	spin_lock(&swap_lock);
 461	si = swap_info[type];
 
 462	if (si && (si->flags & SWP_WRITEOK)) {
 463		nr_swap_pages--;
 464		/* This is called for allocating swap entry, not cache */
 465		offset = scan_swap_map(si, 1);
 466		if (offset) {
 467			spin_unlock(&swap_lock);
 468			return swp_entry(type, offset);
 469		}
 470		nr_swap_pages++;
 471	}
 472	spin_unlock(&swap_lock);
 473	return (swp_entry_t) {0};
 474}
 475
 476static struct swap_info_struct *swap_info_get(swp_entry_t entry)
 477{
 478	struct swap_info_struct *p;
 479	unsigned long offset, type;
 480
 481	if (!entry.val)
 482		goto out;
 483	type = swp_type(entry);
 484	if (type >= nr_swapfiles)
 485		goto bad_nofile;
 486	p = swap_info[type];
 487	if (!(p->flags & SWP_USED))
 488		goto bad_device;
 489	offset = swp_offset(entry);
 490	if (offset >= p->max)
 491		goto bad_offset;
 492	if (!p->swap_map[offset])
 493		goto bad_free;
 494	spin_lock(&swap_lock);
 495	return p;
 496
 497bad_free:
 498	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
 499	goto out;
 500bad_offset:
 501	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
 502	goto out;
 503bad_device:
 504	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
 505	goto out;
 506bad_nofile:
 507	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
 508out:
 509	return NULL;
 510}
 511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 512static unsigned char swap_entry_free(struct swap_info_struct *p,
 513				     swp_entry_t entry, unsigned char usage)
 514{
 515	unsigned long offset = swp_offset(entry);
 516	unsigned char count;
 517	unsigned char has_cache;
 518
 519	count = p->swap_map[offset];
 520	has_cache = count & SWAP_HAS_CACHE;
 521	count &= ~SWAP_HAS_CACHE;
 522
 523	if (usage == SWAP_HAS_CACHE) {
 524		VM_BUG_ON(!has_cache);
 525		has_cache = 0;
 526	} else if (count == SWAP_MAP_SHMEM) {
 527		/*
 528		 * Or we could insist on shmem.c using a special
 529		 * swap_shmem_free() and free_shmem_swap_and_cache()...
 530		 */
 531		count = 0;
 532	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
 533		if (count == COUNT_CONTINUED) {
 534			if (swap_count_continued(p, offset, count))
 535				count = SWAP_MAP_MAX | COUNT_CONTINUED;
 536			else
 537				count = SWAP_MAP_MAX;
 538		} else
 539			count--;
 540	}
 541
 542	if (!count)
 543		mem_cgroup_uncharge_swap(entry);
 544
 545	usage = count | has_cache;
 546	p->swap_map[offset] = usage;
 547
 548	/* free if no reference */
 549	if (!usage) {
 550		struct gendisk *disk = p->bdev->bd_disk;
 551		if (offset < p->lowest_bit)
 552			p->lowest_bit = offset;
 553		if (offset > p->highest_bit)
 554			p->highest_bit = offset;
 555		if (swap_list.next >= 0 &&
 556		    p->prio > swap_info[swap_list.next]->prio)
 557			swap_list.next = p->type;
 558		nr_swap_pages++;
 559		p->inuse_pages--;
 560		if ((p->flags & SWP_BLKDEV) &&
 561				disk->fops->swap_slot_free_notify)
 562			disk->fops->swap_slot_free_notify(p->bdev, offset);
 
 
 
 
 563	}
 564
 565	return usage;
 566}
 567
 568/*
 569 * Caller has made sure that the swapdevice corresponding to entry
 570 * is still around or has not been recycled.
 571 */
 572void swap_free(swp_entry_t entry)
 573{
 574	struct swap_info_struct *p;
 575
 576	p = swap_info_get(entry);
 577	if (p) {
 578		swap_entry_free(p, entry, 1);
 579		spin_unlock(&swap_lock);
 580	}
 581}
 582
 583/*
 584 * Called after dropping swapcache to decrease refcnt to swap entries.
 585 */
 586void swapcache_free(swp_entry_t entry, struct page *page)
 587{
 588	struct swap_info_struct *p;
 589	unsigned char count;
 590
 591	p = swap_info_get(entry);
 592	if (p) {
 593		count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
 594		if (page)
 595			mem_cgroup_uncharge_swapcache(page, entry, count != 0);
 596		spin_unlock(&swap_lock);
 597	}
 598}
 599
 600/*
 601 * How many references to page are currently swapped out?
 602 * This does not give an exact answer when swap count is continued,
 603 * but does include the high COUNT_CONTINUED flag to allow for that.
 604 */
 605static inline int page_swapcount(struct page *page)
 606{
 607	int count = 0;
 608	struct swap_info_struct *p;
 609	swp_entry_t entry;
 610
 611	entry.val = page_private(page);
 612	p = swap_info_get(entry);
 613	if (p) {
 614		count = swap_count(p->swap_map[swp_offset(entry)]);
 615		spin_unlock(&swap_lock);
 616	}
 617	return count;
 618}
 619
 620/*
 621 * We can write to an anon page without COW if there are no other references
 622 * to it.  And as a side-effect, free up its swap: because the old content
 623 * on disk will never be read, and seeking back there to write new content
 624 * later would only waste time away from clustering.
 625 */
 626int reuse_swap_page(struct page *page)
 627{
 628	int count;
 629
 630	VM_BUG_ON(!PageLocked(page));
 631	if (unlikely(PageKsm(page)))
 632		return 0;
 633	count = page_mapcount(page);
 634	if (count <= 1 && PageSwapCache(page)) {
 635		count += page_swapcount(page);
 636		if (count == 1 && !PageWriteback(page)) {
 637			delete_from_swap_cache(page);
 638			SetPageDirty(page);
 639		}
 640	}
 641	return count <= 1;
 642}
 643
 644/*
 645 * If swap is getting full, or if there are no more mappings of this page,
 646 * then try_to_free_swap is called to free its swap space.
 647 */
 648int try_to_free_swap(struct page *page)
 649{
 650	VM_BUG_ON(!PageLocked(page));
 651
 652	if (!PageSwapCache(page))
 653		return 0;
 654	if (PageWriteback(page))
 655		return 0;
 656	if (page_swapcount(page))
 657		return 0;
 658
 659	/*
 660	 * Once hibernation has begun to create its image of memory,
 661	 * there's a danger that one of the calls to try_to_free_swap()
 662	 * - most probably a call from __try_to_reclaim_swap() while
 663	 * hibernation is allocating its own swap pages for the image,
 664	 * but conceivably even a call from memory reclaim - will free
 665	 * the swap from a page which has already been recorded in the
 666	 * image as a clean swapcache page, and then reuse its swap for
 667	 * another page of the image.  On waking from hibernation, the
 668	 * original page might be freed under memory pressure, then
 669	 * later read back in from swap, now with the wrong data.
 670	 *
 671	 * Hibernation clears bits from gfp_allowed_mask to prevent
 672	 * memory reclaim from writing to disk, so check that here.
 673	 */
 674	if (!(gfp_allowed_mask & __GFP_IO))
 675		return 0;
 676
 677	delete_from_swap_cache(page);
 678	SetPageDirty(page);
 679	return 1;
 680}
 681
 682/*
 683 * Free the swap entry like above, but also try to
 684 * free the page cache entry if it is the last user.
 685 */
 686int free_swap_and_cache(swp_entry_t entry)
 687{
 688	struct swap_info_struct *p;
 689	struct page *page = NULL;
 690
 691	if (non_swap_entry(entry))
 692		return 1;
 693
 694	p = swap_info_get(entry);
 695	if (p) {
 696		if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
 697			page = find_get_page(&swapper_space, entry.val);
 
 698			if (page && !trylock_page(page)) {
 699				page_cache_release(page);
 700				page = NULL;
 701			}
 702		}
 703		spin_unlock(&swap_lock);
 704	}
 705	if (page) {
 706		/*
 707		 * Not mapped elsewhere, or swap space full? Free it!
 708		 * Also recheck PageSwapCache now page is locked (above).
 709		 */
 710		if (PageSwapCache(page) && !PageWriteback(page) &&
 711				(!page_mapped(page) || vm_swap_full())) {
 712			delete_from_swap_cache(page);
 713			SetPageDirty(page);
 714		}
 715		unlock_page(page);
 716		page_cache_release(page);
 717	}
 718	return p != NULL;
 719}
 720
 721#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 722/**
 723 * mem_cgroup_count_swap_user - count the user of a swap entry
 724 * @ent: the swap entry to be checked
 725 * @pagep: the pointer for the swap cache page of the entry to be stored
 726 *
 727 * Returns the number of the user of the swap entry. The number is valid only
 728 * for swaps of anonymous pages.
 729 * If the entry is found on swap cache, the page is stored to pagep with
 730 * refcount of it being incremented.
 731 */
 732int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
 733{
 734	struct page *page;
 735	struct swap_info_struct *p;
 736	int count = 0;
 737
 738	page = find_get_page(&swapper_space, ent.val);
 739	if (page)
 740		count += page_mapcount(page);
 741	p = swap_info_get(ent);
 742	if (p) {
 743		count += swap_count(p->swap_map[swp_offset(ent)]);
 744		spin_unlock(&swap_lock);
 745	}
 746
 747	*pagep = page;
 748	return count;
 749}
 750#endif
 751
 752#ifdef CONFIG_HIBERNATION
 753/*
 754 * Find the swap type that corresponds to given device (if any).
 755 *
 756 * @offset - number of the PAGE_SIZE-sized block of the device, starting
 757 * from 0, in which the swap header is expected to be located.
 758 *
 759 * This is needed for the suspend to disk (aka swsusp).
 760 */
 761int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
 762{
 763	struct block_device *bdev = NULL;
 764	int type;
 765
 766	if (device)
 767		bdev = bdget(device);
 768
 769	spin_lock(&swap_lock);
 770	for (type = 0; type < nr_swapfiles; type++) {
 771		struct swap_info_struct *sis = swap_info[type];
 772
 773		if (!(sis->flags & SWP_WRITEOK))
 774			continue;
 775
 776		if (!bdev) {
 777			if (bdev_p)
 778				*bdev_p = bdgrab(sis->bdev);
 779
 780			spin_unlock(&swap_lock);
 781			return type;
 782		}
 783		if (bdev == sis->bdev) {
 784			struct swap_extent *se = &sis->first_swap_extent;
 785
 786			if (se->start_block == offset) {
 787				if (bdev_p)
 788					*bdev_p = bdgrab(sis->bdev);
 789
 790				spin_unlock(&swap_lock);
 791				bdput(bdev);
 792				return type;
 793			}
 794		}
 795	}
 796	spin_unlock(&swap_lock);
 797	if (bdev)
 798		bdput(bdev);
 799
 800	return -ENODEV;
 801}
 802
 803/*
 804 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
 805 * corresponding to given index in swap_info (swap type).
 806 */
 807sector_t swapdev_block(int type, pgoff_t offset)
 808{
 809	struct block_device *bdev;
 810
 811	if ((unsigned int)type >= nr_swapfiles)
 812		return 0;
 813	if (!(swap_info[type]->flags & SWP_WRITEOK))
 814		return 0;
 815	return map_swap_entry(swp_entry(type, offset), &bdev);
 816}
 817
 818/*
 819 * Return either the total number of swap pages of given type, or the number
 820 * of free pages of that type (depending on @free)
 821 *
 822 * This is needed for software suspend
 823 */
 824unsigned int count_swap_pages(int type, int free)
 825{
 826	unsigned int n = 0;
 827
 828	spin_lock(&swap_lock);
 829	if ((unsigned int)type < nr_swapfiles) {
 830		struct swap_info_struct *sis = swap_info[type];
 831
 
 832		if (sis->flags & SWP_WRITEOK) {
 833			n = sis->pages;
 834			if (free)
 835				n -= sis->inuse_pages;
 836		}
 
 837	}
 838	spin_unlock(&swap_lock);
 839	return n;
 840}
 841#endif /* CONFIG_HIBERNATION */
 842
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 843/*
 844 * No need to decide whether this PTE shares the swap entry with others,
 845 * just let do_wp_page work it out if a write is requested later - to
 846 * force COW, vm_page_prot omits write permission from any private vma.
 847 */
 848static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 849		unsigned long addr, swp_entry_t entry, struct page *page)
 850{
 851	struct mem_cgroup *ptr;
 
 852	spinlock_t *ptl;
 853	pte_t *pte;
 854	int ret = 1;
 855
 856	if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) {
 
 
 
 
 
 
 857		ret = -ENOMEM;
 858		goto out_nolock;
 859	}
 860
 861	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 862	if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
 863		if (ret > 0)
 864			mem_cgroup_cancel_charge_swapin(ptr);
 865		ret = 0;
 866		goto out;
 867	}
 868
 869	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
 870	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
 871	get_page(page);
 872	set_pte_at(vma->vm_mm, addr, pte,
 873		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
 874	page_add_anon_rmap(page, vma, addr);
 875	mem_cgroup_commit_charge_swapin(page, ptr);
 
 
 
 876	swap_free(entry);
 877	/*
 878	 * Move the page to the active list so it is not
 879	 * immediately swapped out again after swapon.
 880	 */
 881	activate_page(page);
 882out:
 883	pte_unmap_unlock(pte, ptl);
 884out_nolock:
 
 
 
 
 885	return ret;
 886}
 887
 888static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 889				unsigned long addr, unsigned long end,
 890				swp_entry_t entry, struct page *page)
 891{
 892	pte_t swp_pte = swp_entry_to_pte(entry);
 893	pte_t *pte;
 894	int ret = 0;
 895
 896	/*
 897	 * We don't actually need pte lock while scanning for swp_pte: since
 898	 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
 899	 * page table while we're scanning; though it could get zapped, and on
 900	 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
 901	 * of unmatched parts which look like swp_pte, so unuse_pte must
 902	 * recheck under pte lock.  Scanning without pte lock lets it be
 903	 * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
 904	 */
 905	pte = pte_offset_map(pmd, addr);
 906	do {
 907		/*
 908		 * swapoff spends a _lot_ of time in this loop!
 909		 * Test inline before going to call unuse_pte.
 910		 */
 911		if (unlikely(pte_same(*pte, swp_pte))) {
 912			pte_unmap(pte);
 913			ret = unuse_pte(vma, pmd, addr, entry, page);
 914			if (ret)
 915				goto out;
 916			pte = pte_offset_map(pmd, addr);
 917		}
 918	} while (pte++, addr += PAGE_SIZE, addr != end);
 919	pte_unmap(pte - 1);
 920out:
 921	return ret;
 922}
 923
 924static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 925				unsigned long addr, unsigned long end,
 926				swp_entry_t entry, struct page *page)
 927{
 928	pmd_t *pmd;
 929	unsigned long next;
 930	int ret;
 931
 932	pmd = pmd_offset(pud, addr);
 933	do {
 934		next = pmd_addr_end(addr, end);
 935		if (unlikely(pmd_trans_huge(*pmd)))
 936			continue;
 937		if (pmd_none_or_clear_bad(pmd))
 938			continue;
 939		ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
 940		if (ret)
 941			return ret;
 942	} while (pmd++, addr = next, addr != end);
 943	return 0;
 944}
 945
 946static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 947				unsigned long addr, unsigned long end,
 948				swp_entry_t entry, struct page *page)
 949{
 950	pud_t *pud;
 951	unsigned long next;
 952	int ret;
 953
 954	pud = pud_offset(pgd, addr);
 955	do {
 956		next = pud_addr_end(addr, end);
 957		if (pud_none_or_clear_bad(pud))
 958			continue;
 959		ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
 960		if (ret)
 961			return ret;
 962	} while (pud++, addr = next, addr != end);
 963	return 0;
 964}
 965
 966static int unuse_vma(struct vm_area_struct *vma,
 967				swp_entry_t entry, struct page *page)
 968{
 969	pgd_t *pgd;
 970	unsigned long addr, end, next;
 971	int ret;
 972
 973	if (page_anon_vma(page)) {
 974		addr = page_address_in_vma(page, vma);
 975		if (addr == -EFAULT)
 976			return 0;
 977		else
 978			end = addr + PAGE_SIZE;
 979	} else {
 980		addr = vma->vm_start;
 981		end = vma->vm_end;
 982	}
 983
 984	pgd = pgd_offset(vma->vm_mm, addr);
 985	do {
 986		next = pgd_addr_end(addr, end);
 987		if (pgd_none_or_clear_bad(pgd))
 988			continue;
 989		ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
 990		if (ret)
 991			return ret;
 992	} while (pgd++, addr = next, addr != end);
 993	return 0;
 994}
 995
 996static int unuse_mm(struct mm_struct *mm,
 997				swp_entry_t entry, struct page *page)
 998{
 999	struct vm_area_struct *vma;
1000	int ret = 0;
1001
1002	if (!down_read_trylock(&mm->mmap_sem)) {
1003		/*
1004		 * Activate page so shrink_inactive_list is unlikely to unmap
1005		 * its ptes while lock is dropped, so swapoff can make progress.
1006		 */
1007		activate_page(page);
1008		unlock_page(page);
1009		down_read(&mm->mmap_sem);
1010		lock_page(page);
1011	}
1012	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1013		if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
1014			break;
1015	}
1016	up_read(&mm->mmap_sem);
1017	return (ret < 0)? ret: 0;
1018}
1019
1020/*
1021 * Scan swap_map from current position to next entry still in use.
 
1022 * Recycle to start on reaching the end, returning 0 when empty.
1023 */
1024static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1025					unsigned int prev)
1026{
1027	unsigned int max = si->max;
1028	unsigned int i = prev;
1029	unsigned char count;
1030
1031	/*
1032	 * No need for swap_lock here: we're just looking
1033	 * for whether an entry is in use, not modifying it; false
1034	 * hits are okay, and sys_swapoff() has already prevented new
1035	 * allocations from this area (while holding swap_lock).
1036	 */
1037	for (;;) {
1038		if (++i >= max) {
1039			if (!prev) {
1040				i = 0;
1041				break;
1042			}
1043			/*
1044			 * No entries in use at top of swap_map,
1045			 * loop back to start and recheck there.
1046			 */
1047			max = prev + 1;
1048			prev = 0;
1049			i = 1;
1050		}
1051		count = si->swap_map[i];
 
 
 
 
 
 
1052		if (count && swap_count(count) != SWAP_MAP_BAD)
1053			break;
1054	}
1055	return i;
1056}
1057
1058/*
1059 * We completely avoid races by reading each swap page in advance,
1060 * and then search for the process using it.  All the necessary
1061 * page table adjustments can then be made atomically.
 
 
 
1062 */
1063static int try_to_unuse(unsigned int type)
 
1064{
1065	struct swap_info_struct *si = swap_info[type];
1066	struct mm_struct *start_mm;
1067	unsigned char *swap_map;
 
 
 
 
1068	unsigned char swcount;
1069	struct page *page;
1070	swp_entry_t entry;
1071	unsigned int i = 0;
1072	int retval = 0;
1073
1074	/*
1075	 * When searching mms for an entry, a good strategy is to
1076	 * start at the first mm we freed the previous entry from
1077	 * (though actually we don't notice whether we or coincidence
1078	 * freed the entry).  Initialize this start_mm with a hold.
1079	 *
1080	 * A simpler strategy would be to start at the last mm we
1081	 * freed the previous entry from; but that would take less
1082	 * advantage of mmlist ordering, which clusters forked mms
1083	 * together, child after parent.  If we race with dup_mmap(), we
1084	 * prefer to resolve parent before child, lest we miss entries
1085	 * duplicated after we scanned child: using last mm would invert
1086	 * that.
1087	 */
1088	start_mm = &init_mm;
1089	atomic_inc(&init_mm.mm_users);
1090
1091	/*
1092	 * Keep on scanning until all entries have gone.  Usually,
1093	 * one pass through swap_map is enough, but not necessarily:
1094	 * there are races when an instance of an entry might be missed.
1095	 */
1096	while ((i = find_next_to_unuse(si, i)) != 0) {
1097		if (signal_pending(current)) {
1098			retval = -EINTR;
1099			break;
1100		}
1101
1102		/*
1103		 * Get a page for the entry, using the existing swap
1104		 * cache page if there is one.  Otherwise, get a clean
1105		 * page and read the swap into it.
1106		 */
1107		swap_map = &si->swap_map[i];
1108		entry = swp_entry(type, i);
1109		page = read_swap_cache_async(entry,
1110					GFP_HIGHUSER_MOVABLE, NULL, 0);
1111		if (!page) {
1112			/*
1113			 * Either swap_duplicate() failed because entry
1114			 * has been freed independently, and will not be
1115			 * reused since sys_swapoff() already disabled
1116			 * allocation from here, or alloc_page() failed.
1117			 */
1118			if (!*swap_map)
 
 
 
 
 
 
 
 
1119				continue;
1120			retval = -ENOMEM;
1121			break;
1122		}
1123
1124		/*
1125		 * Don't hold on to start_mm if it looks like exiting.
1126		 */
1127		if (atomic_read(&start_mm->mm_users) == 1) {
1128			mmput(start_mm);
1129			start_mm = &init_mm;
1130			atomic_inc(&init_mm.mm_users);
1131		}
1132
1133		/*
1134		 * Wait for and lock page.  When do_swap_page races with
1135		 * try_to_unuse, do_swap_page can handle the fault much
1136		 * faster than try_to_unuse can locate the entry.  This
1137		 * apparently redundant "wait_on_page_locked" lets try_to_unuse
1138		 * defer to do_swap_page in such a case - in some tests,
1139		 * do_swap_page and try_to_unuse repeatedly compete.
1140		 */
1141		wait_on_page_locked(page);
1142		wait_on_page_writeback(page);
1143		lock_page(page);
1144		wait_on_page_writeback(page);
1145
1146		/*
1147		 * Remove all references to entry.
1148		 */
1149		swcount = *swap_map;
1150		if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1151			retval = shmem_unuse(entry, page);
1152			/* page has already been unlocked and released */
1153			if (retval < 0)
1154				break;
1155			continue;
1156		}
1157		if (swap_count(swcount) && start_mm != &init_mm)
1158			retval = unuse_mm(start_mm, entry, page);
1159
1160		if (swap_count(*swap_map)) {
1161			int set_start_mm = (*swap_map >= swcount);
1162			struct list_head *p = &start_mm->mmlist;
1163			struct mm_struct *new_start_mm = start_mm;
1164			struct mm_struct *prev_mm = start_mm;
1165			struct mm_struct *mm;
1166
1167			atomic_inc(&new_start_mm->mm_users);
1168			atomic_inc(&prev_mm->mm_users);
1169			spin_lock(&mmlist_lock);
1170			while (swap_count(*swap_map) && !retval &&
1171					(p = p->next) != &start_mm->mmlist) {
1172				mm = list_entry(p, struct mm_struct, mmlist);
1173				if (!atomic_inc_not_zero(&mm->mm_users))
1174					continue;
1175				spin_unlock(&mmlist_lock);
1176				mmput(prev_mm);
1177				prev_mm = mm;
1178
1179				cond_resched();
1180
1181				swcount = *swap_map;
1182				if (!swap_count(swcount)) /* any usage ? */
1183					;
1184				else if (mm == &init_mm)
1185					set_start_mm = 1;
1186				else
1187					retval = unuse_mm(mm, entry, page);
1188
1189				if (set_start_mm && *swap_map < swcount) {
1190					mmput(new_start_mm);
1191					atomic_inc(&mm->mm_users);
1192					new_start_mm = mm;
1193					set_start_mm = 0;
1194				}
1195				spin_lock(&mmlist_lock);
1196			}
1197			spin_unlock(&mmlist_lock);
1198			mmput(prev_mm);
1199			mmput(start_mm);
1200			start_mm = new_start_mm;
1201		}
1202		if (retval) {
1203			unlock_page(page);
1204			page_cache_release(page);
1205			break;
1206		}
1207
1208		/*
1209		 * If a reference remains (rare), we would like to leave
1210		 * the page in the swap cache; but try_to_unmap could
1211		 * then re-duplicate the entry once we drop page lock,
1212		 * so we might loop indefinitely; also, that page could
1213		 * not be swapped out to other storage meanwhile.  So:
1214		 * delete from cache even if there's another reference,
1215		 * after ensuring that the data has been saved to disk -
1216		 * since if the reference remains (rarer), it will be
1217		 * read from disk into another page.  Splitting into two
1218		 * pages would be incorrect if swap supported "shared
1219		 * private" pages, but they are handled by tmpfs files.
1220		 *
1221		 * Given how unuse_vma() targets one particular offset
1222		 * in an anon_vma, once the anon_vma has been determined,
1223		 * this splitting happens to be just what is needed to
1224		 * handle where KSM pages have been swapped out: re-reading
1225		 * is unnecessarily slow, but we can fix that later on.
1226		 */
1227		if (swap_count(*swap_map) &&
1228		     PageDirty(page) && PageSwapCache(page)) {
1229			struct writeback_control wbc = {
1230				.sync_mode = WB_SYNC_NONE,
1231			};
1232
1233			swap_writepage(page, &wbc);
1234			lock_page(page);
1235			wait_on_page_writeback(page);
1236		}
1237
1238		/*
1239		 * It is conceivable that a racing task removed this page from
1240		 * swap cache just before we acquired the page lock at the top,
1241		 * or while we dropped it in unuse_mm().  The page might even
1242		 * be back in swap cache on another swap area: that we must not
1243		 * delete, since it may not have been written out to swap yet.
1244		 */
1245		if (PageSwapCache(page) &&
1246		    likely(page_private(page) == entry.val))
1247			delete_from_swap_cache(page);
1248
1249		/*
1250		 * So we could skip searching mms once swap count went
1251		 * to 1, we did not mark any present ptes as dirty: must
1252		 * mark page dirty so shrink_page_list will preserve it.
1253		 */
1254		SetPageDirty(page);
1255		unlock_page(page);
1256		page_cache_release(page);
1257
1258		/*
1259		 * Make sure that we aren't completely killing
1260		 * interactive performance.
1261		 */
1262		cond_resched();
 
 
 
 
1263	}
1264
1265	mmput(start_mm);
1266	return retval;
1267}
1268
1269/*
1270 * After a successful try_to_unuse, if no swap is now in use, we know
1271 * we can empty the mmlist.  swap_lock must be held on entry and exit.
1272 * Note that mmlist_lock nests inside swap_lock, and an mm must be
1273 * added to the mmlist just after page_duplicate - before would be racy.
1274 */
1275static void drain_mmlist(void)
1276{
1277	struct list_head *p, *next;
1278	unsigned int type;
1279
1280	for (type = 0; type < nr_swapfiles; type++)
1281		if (swap_info[type]->inuse_pages)
1282			return;
1283	spin_lock(&mmlist_lock);
1284	list_for_each_safe(p, next, &init_mm.mmlist)
1285		list_del_init(p);
1286	spin_unlock(&mmlist_lock);
1287}
1288
1289/*
1290 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1291 * corresponds to page offset for the specified swap entry.
1292 * Note that the type of this function is sector_t, but it returns page offset
1293 * into the bdev, not sector offset.
1294 */
1295static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1296{
1297	struct swap_info_struct *sis;
1298	struct swap_extent *start_se;
1299	struct swap_extent *se;
1300	pgoff_t offset;
1301
1302	sis = swap_info[swp_type(entry)];
1303	*bdev = sis->bdev;
1304
1305	offset = swp_offset(entry);
1306	start_se = sis->curr_swap_extent;
1307	se = start_se;
1308
1309	for ( ; ; ) {
1310		struct list_head *lh;
1311
1312		if (se->start_page <= offset &&
1313				offset < (se->start_page + se->nr_pages)) {
1314			return se->start_block + (offset - se->start_page);
1315		}
1316		lh = se->list.next;
1317		se = list_entry(lh, struct swap_extent, list);
1318		sis->curr_swap_extent = se;
1319		BUG_ON(se == start_se);		/* It *must* be present */
1320	}
1321}
1322
1323/*
1324 * Returns the page offset into bdev for the specified page's swap entry.
1325 */
1326sector_t map_swap_page(struct page *page, struct block_device **bdev)
1327{
1328	swp_entry_t entry;
1329	entry.val = page_private(page);
1330	return map_swap_entry(entry, bdev);
1331}
1332
1333/*
1334 * Free all of a swapdev's extent information
1335 */
1336static void destroy_swap_extents(struct swap_info_struct *sis)
1337{
1338	while (!list_empty(&sis->first_swap_extent.list)) {
1339		struct swap_extent *se;
1340
1341		se = list_entry(sis->first_swap_extent.list.next,
1342				struct swap_extent, list);
1343		list_del(&se->list);
1344		kfree(se);
1345	}
 
 
 
 
 
 
 
 
1346}
1347
1348/*
1349 * Add a block range (and the corresponding page range) into this swapdev's
1350 * extent list.  The extent list is kept sorted in page order.
1351 *
1352 * This function rather assumes that it is called in ascending page order.
1353 */
1354static int
1355add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1356		unsigned long nr_pages, sector_t start_block)
1357{
1358	struct swap_extent *se;
1359	struct swap_extent *new_se;
1360	struct list_head *lh;
1361
1362	if (start_page == 0) {
1363		se = &sis->first_swap_extent;
1364		sis->curr_swap_extent = se;
1365		se->start_page = 0;
1366		se->nr_pages = nr_pages;
1367		se->start_block = start_block;
1368		return 1;
1369	} else {
1370		lh = sis->first_swap_extent.list.prev;	/* Highest extent */
1371		se = list_entry(lh, struct swap_extent, list);
1372		BUG_ON(se->start_page + se->nr_pages != start_page);
1373		if (se->start_block + se->nr_pages == start_block) {
1374			/* Merge it */
1375			se->nr_pages += nr_pages;
1376			return 0;
1377		}
1378	}
1379
1380	/*
1381	 * No merge.  Insert a new extent, preserving ordering.
1382	 */
1383	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1384	if (new_se == NULL)
1385		return -ENOMEM;
1386	new_se->start_page = start_page;
1387	new_se->nr_pages = nr_pages;
1388	new_se->start_block = start_block;
1389
1390	list_add_tail(&new_se->list, &sis->first_swap_extent.list);
1391	return 1;
1392}
1393
1394/*
1395 * A `swap extent' is a simple thing which maps a contiguous range of pages
1396 * onto a contiguous range of disk blocks.  An ordered list of swap extents
1397 * is built at swapon time and is then used at swap_writepage/swap_readpage
1398 * time for locating where on disk a page belongs.
1399 *
1400 * If the swapfile is an S_ISBLK block device, a single extent is installed.
1401 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1402 * swap files identically.
1403 *
1404 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1405 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
1406 * swapfiles are handled *identically* after swapon time.
1407 *
1408 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1409 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
1410 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1411 * requirements, they are simply tossed out - we will never use those blocks
1412 * for swapping.
1413 *
1414 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This
1415 * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1416 * which will scribble on the fs.
1417 *
1418 * The amount of disk space which a single swap extent represents varies.
1419 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
1420 * extents in the list.  To avoid much list walking, we cache the previous
1421 * search location in `curr_swap_extent', and start new searches from there.
1422 * This is extremely effective.  The average number of iterations in
1423 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
1424 */
1425static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1426{
1427	struct inode *inode;
1428	unsigned blocks_per_page;
1429	unsigned long page_no;
1430	unsigned blkbits;
1431	sector_t probe_block;
1432	sector_t last_block;
1433	sector_t lowest_block = -1;
1434	sector_t highest_block = 0;
1435	int nr_extents = 0;
1436	int ret;
1437
1438	inode = sis->swap_file->f_mapping->host;
1439	if (S_ISBLK(inode->i_mode)) {
1440		ret = add_swap_extent(sis, 0, sis->max, 0);
1441		*span = sis->pages;
1442		goto out;
1443	}
1444
1445	blkbits = inode->i_blkbits;
1446	blocks_per_page = PAGE_SIZE >> blkbits;
1447
1448	/*
1449	 * Map all the blocks into the extent list.  This code doesn't try
1450	 * to be very smart.
1451	 */
1452	probe_block = 0;
1453	page_no = 0;
1454	last_block = i_size_read(inode) >> blkbits;
1455	while ((probe_block + blocks_per_page) <= last_block &&
1456			page_no < sis->max) {
1457		unsigned block_in_page;
1458		sector_t first_block;
1459
1460		first_block = bmap(inode, probe_block);
1461		if (first_block == 0)
1462			goto bad_bmap;
1463
1464		/*
1465		 * It must be PAGE_SIZE aligned on-disk
1466		 */
1467		if (first_block & (blocks_per_page - 1)) {
1468			probe_block++;
1469			goto reprobe;
1470		}
1471
1472		for (block_in_page = 1; block_in_page < blocks_per_page;
1473					block_in_page++) {
1474			sector_t block;
1475
1476			block = bmap(inode, probe_block + block_in_page);
1477			if (block == 0)
1478				goto bad_bmap;
1479			if (block != first_block + block_in_page) {
1480				/* Discontiguity */
1481				probe_block++;
1482				goto reprobe;
1483			}
1484		}
1485
1486		first_block >>= (PAGE_SHIFT - blkbits);
1487		if (page_no) {	/* exclude the header page */
1488			if (first_block < lowest_block)
1489				lowest_block = first_block;
1490			if (first_block > highest_block)
1491				highest_block = first_block;
1492		}
 
 
1493
1494		/*
1495		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
1496		 */
1497		ret = add_swap_extent(sis, page_no, 1, first_block);
1498		if (ret < 0)
1499			goto out;
1500		nr_extents += ret;
1501		page_no++;
1502		probe_block += blocks_per_page;
1503reprobe:
1504		continue;
1505	}
1506	ret = nr_extents;
1507	*span = 1 + highest_block - lowest_block;
1508	if (page_no == 0)
1509		page_no = 1;	/* force Empty message */
1510	sis->max = page_no;
1511	sis->pages = page_no - 1;
1512	sis->highest_bit = page_no - 1;
1513out:
1514	return ret;
1515bad_bmap:
1516	printk(KERN_ERR "swapon: swapfile has holes\n");
1517	ret = -EINVAL;
1518	goto out;
1519}
1520
1521static void enable_swap_info(struct swap_info_struct *p, int prio,
1522				unsigned char *swap_map)
 
1523{
1524	int i, prev;
1525
1526	spin_lock(&swap_lock);
1527	if (prio >= 0)
1528		p->prio = prio;
1529	else
1530		p->prio = --least_priority;
1531	p->swap_map = swap_map;
 
1532	p->flags |= SWP_WRITEOK;
1533	nr_swap_pages += p->pages;
1534	total_swap_pages += p->pages;
1535
1536	/* insert swap space into swap_list: */
1537	prev = -1;
1538	for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1539		if (p->prio >= swap_info[i]->prio)
1540			break;
1541		prev = i;
1542	}
1543	p->next = i;
1544	if (prev < 0)
1545		swap_list.head = swap_list.next = p->type;
1546	else
1547		swap_info[prev]->next = p->type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1548	spin_unlock(&swap_lock);
1549}
1550
1551SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1552{
1553	struct swap_info_struct *p = NULL;
1554	unsigned char *swap_map;
 
 
1555	struct file *swap_file, *victim;
1556	struct address_space *mapping;
1557	struct inode *inode;
1558	char *pathname;
1559	int oom_score_adj;
1560	int i, type, prev;
1561	int err;
 
1562
1563	if (!capable(CAP_SYS_ADMIN))
1564		return -EPERM;
1565
 
 
1566	pathname = getname(specialfile);
1567	err = PTR_ERR(pathname);
1568	if (IS_ERR(pathname))
1569		goto out;
1570
1571	victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0);
1572	putname(pathname);
1573	err = PTR_ERR(victim);
1574	if (IS_ERR(victim))
1575		goto out;
1576
1577	mapping = victim->f_mapping;
1578	prev = -1;
1579	spin_lock(&swap_lock);
1580	for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
1581		p = swap_info[type];
1582		if (p->flags & SWP_WRITEOK) {
1583			if (p->swap_file->f_mapping == mapping)
1584				break;
1585		}
1586		prev = type;
1587	}
1588	if (type < 0) {
1589		err = -EINVAL;
1590		spin_unlock(&swap_lock);
1591		goto out_dput;
1592	}
1593	if (!security_vm_enough_memory(p->pages))
1594		vm_unacct_memory(p->pages);
1595	else {
1596		err = -ENOMEM;
1597		spin_unlock(&swap_lock);
1598		goto out_dput;
1599	}
1600	if (prev < 0)
1601		swap_list.head = p->next;
1602	else
1603		swap_info[prev]->next = p->next;
1604	if (type == swap_list.next) {
1605		/* just pick something that's safe... */
1606		swap_list.next = swap_list.head;
1607	}
 
1608	if (p->prio < 0) {
1609		for (i = p->next; i >= 0; i = swap_info[i]->next)
1610			swap_info[i]->prio = p->prio--;
1611		least_priority++;
1612	}
1613	nr_swap_pages -= p->pages;
1614	total_swap_pages -= p->pages;
1615	p->flags &= ~SWP_WRITEOK;
 
1616	spin_unlock(&swap_lock);
1617
1618	oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
1619	err = try_to_unuse(type);
1620	test_set_oom_score_adj(oom_score_adj);
1621
1622	if (err) {
1623		/*
1624		 * reading p->prio and p->swap_map outside the lock is
1625		 * safe here because only sys_swapon and sys_swapoff
1626		 * change them, and there can be no other sys_swapon or
1627		 * sys_swapoff for this swap_info_struct at this point.
1628		 */
1629		/* re-insert swap space back into swap_list */
1630		enable_swap_info(p, p->prio, p->swap_map);
1631		goto out_dput;
1632	}
1633
 
 
1634	destroy_swap_extents(p);
1635	if (p->flags & SWP_CONTINUED)
1636		free_swap_count_continuations(p);
1637
1638	mutex_lock(&swapon_mutex);
1639	spin_lock(&swap_lock);
 
1640	drain_mmlist();
1641
1642	/* wait for anyone still in scan_swap_map */
1643	p->highest_bit = 0;		/* cuts scans short */
1644	while (p->flags >= SWP_SCANNING) {
 
1645		spin_unlock(&swap_lock);
1646		schedule_timeout_uninterruptible(1);
1647		spin_lock(&swap_lock);
 
1648	}
1649
1650	swap_file = p->swap_file;
 
1651	p->swap_file = NULL;
1652	p->max = 0;
1653	swap_map = p->swap_map;
1654	p->swap_map = NULL;
1655	p->flags = 0;
 
 
 
1656	spin_unlock(&swap_lock);
 
 
1657	mutex_unlock(&swapon_mutex);
 
 
1658	vfree(swap_map);
1659	/* Destroy swap account informatin */
 
 
1660	swap_cgroup_swapoff(type);
1661
1662	inode = mapping->host;
1663	if (S_ISBLK(inode->i_mode)) {
1664		struct block_device *bdev = I_BDEV(inode);
1665		set_blocksize(bdev, p->old_block_size);
1666		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1667	} else {
1668		mutex_lock(&inode->i_mutex);
1669		inode->i_flags &= ~S_SWAPFILE;
1670		mutex_unlock(&inode->i_mutex);
1671	}
1672	filp_close(swap_file, NULL);
 
 
 
 
 
 
 
 
 
 
1673	err = 0;
1674	atomic_inc(&proc_poll_event);
1675	wake_up_interruptible(&proc_poll_wait);
1676
1677out_dput:
1678	filp_close(victim, NULL);
1679out:
 
1680	return err;
1681}
1682
1683#ifdef CONFIG_PROC_FS
1684static unsigned swaps_poll(struct file *file, poll_table *wait)
1685{
1686	struct seq_file *seq = file->private_data;
1687
1688	poll_wait(file, &proc_poll_wait, wait);
1689
1690	if (seq->poll_event != atomic_read(&proc_poll_event)) {
1691		seq->poll_event = atomic_read(&proc_poll_event);
1692		return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
1693	}
1694
1695	return POLLIN | POLLRDNORM;
1696}
1697
1698/* iterator */
1699static void *swap_start(struct seq_file *swap, loff_t *pos)
1700{
1701	struct swap_info_struct *si;
1702	int type;
1703	loff_t l = *pos;
1704
1705	mutex_lock(&swapon_mutex);
1706
1707	if (!l)
1708		return SEQ_START_TOKEN;
1709
1710	for (type = 0; type < nr_swapfiles; type++) {
1711		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
1712		si = swap_info[type];
1713		if (!(si->flags & SWP_USED) || !si->swap_map)
1714			continue;
1715		if (!--l)
1716			return si;
1717	}
1718
1719	return NULL;
1720}
1721
1722static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1723{
1724	struct swap_info_struct *si = v;
1725	int type;
1726
1727	if (v == SEQ_START_TOKEN)
1728		type = 0;
1729	else
1730		type = si->type + 1;
1731
1732	for (; type < nr_swapfiles; type++) {
1733		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
1734		si = swap_info[type];
1735		if (!(si->flags & SWP_USED) || !si->swap_map)
1736			continue;
1737		++*pos;
1738		return si;
1739	}
1740
1741	return NULL;
1742}
1743
1744static void swap_stop(struct seq_file *swap, void *v)
1745{
1746	mutex_unlock(&swapon_mutex);
1747}
1748
1749static int swap_show(struct seq_file *swap, void *v)
1750{
1751	struct swap_info_struct *si = v;
1752	struct file *file;
1753	int len;
1754
1755	if (si == SEQ_START_TOKEN) {
1756		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
1757		return 0;
1758	}
1759
1760	file = si->swap_file;
1761	len = seq_path(swap, &file->f_path, " \t\n\\");
1762	seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
1763			len < 40 ? 40 - len : 1, " ",
1764			S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
1765				"partition" : "file\t",
1766			si->pages << (PAGE_SHIFT - 10),
1767			si->inuse_pages << (PAGE_SHIFT - 10),
1768			si->prio);
1769	return 0;
1770}
1771
1772static const struct seq_operations swaps_op = {
1773	.start =	swap_start,
1774	.next =		swap_next,
1775	.stop =		swap_stop,
1776	.show =		swap_show
1777};
1778
1779static int swaps_open(struct inode *inode, struct file *file)
1780{
1781	struct seq_file *seq;
1782	int ret;
1783
1784	ret = seq_open(file, &swaps_op);
1785	if (ret)
1786		return ret;
1787
1788	seq = file->private_data;
1789	seq->poll_event = atomic_read(&proc_poll_event);
1790	return 0;
1791}
1792
1793static const struct file_operations proc_swaps_operations = {
1794	.open		= swaps_open,
1795	.read		= seq_read,
1796	.llseek		= seq_lseek,
1797	.release	= seq_release,
1798	.poll		= swaps_poll,
1799};
1800
1801static int __init procswaps_init(void)
1802{
1803	proc_create("swaps", 0, NULL, &proc_swaps_operations);
1804	return 0;
1805}
1806__initcall(procswaps_init);
1807#endif /* CONFIG_PROC_FS */
1808
1809#ifdef MAX_SWAPFILES_CHECK
1810static int __init max_swapfiles_check(void)
1811{
1812	MAX_SWAPFILES_CHECK();
1813	return 0;
1814}
1815late_initcall(max_swapfiles_check);
1816#endif
1817
1818static struct swap_info_struct *alloc_swap_info(void)
1819{
1820	struct swap_info_struct *p;
1821	unsigned int type;
1822
1823	p = kzalloc(sizeof(*p), GFP_KERNEL);
1824	if (!p)
1825		return ERR_PTR(-ENOMEM);
1826
1827	spin_lock(&swap_lock);
1828	for (type = 0; type < nr_swapfiles; type++) {
1829		if (!(swap_info[type]->flags & SWP_USED))
1830			break;
1831	}
1832	if (type >= MAX_SWAPFILES) {
1833		spin_unlock(&swap_lock);
1834		kfree(p);
1835		return ERR_PTR(-EPERM);
1836	}
1837	if (type >= nr_swapfiles) {
1838		p->type = type;
1839		swap_info[type] = p;
1840		/*
1841		 * Write swap_info[type] before nr_swapfiles, in case a
1842		 * racing procfs swap_start() or swap_next() is reading them.
1843		 * (We never shrink nr_swapfiles, we never free this entry.)
1844		 */
1845		smp_wmb();
1846		nr_swapfiles++;
1847	} else {
1848		kfree(p);
1849		p = swap_info[type];
1850		/*
1851		 * Do not memset this entry: a racing procfs swap_next()
1852		 * would be relying on p->type to remain valid.
1853		 */
1854	}
1855	INIT_LIST_HEAD(&p->first_swap_extent.list);
1856	p->flags = SWP_USED;
1857	p->next = -1;
1858	spin_unlock(&swap_lock);
 
1859
1860	return p;
1861}
1862
1863static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
1864{
1865	int error;
1866
1867	if (S_ISBLK(inode->i_mode)) {
1868		p->bdev = bdgrab(I_BDEV(inode));
1869		error = blkdev_get(p->bdev,
1870				   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1871				   sys_swapon);
1872		if (error < 0) {
1873			p->bdev = NULL;
1874			return -EINVAL;
1875		}
1876		p->old_block_size = block_size(p->bdev);
1877		error = set_blocksize(p->bdev, PAGE_SIZE);
1878		if (error < 0)
1879			return error;
1880		p->flags |= SWP_BLKDEV;
1881	} else if (S_ISREG(inode->i_mode)) {
1882		p->bdev = inode->i_sb->s_bdev;
1883		mutex_lock(&inode->i_mutex);
1884		if (IS_SWAPFILE(inode))
1885			return -EBUSY;
1886	} else
1887		return -EINVAL;
1888
1889	return 0;
1890}
1891
1892static unsigned long read_swap_header(struct swap_info_struct *p,
1893					union swap_header *swap_header,
1894					struct inode *inode)
1895{
1896	int i;
1897	unsigned long maxpages;
1898	unsigned long swapfilepages;
 
1899
1900	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
1901		printk(KERN_ERR "Unable to find swap-space signature\n");
1902		return 0;
1903	}
1904
1905	/* swap partition endianess hack... */
1906	if (swab32(swap_header->info.version) == 1) {
1907		swab32s(&swap_header->info.version);
1908		swab32s(&swap_header->info.last_page);
1909		swab32s(&swap_header->info.nr_badpages);
1910		for (i = 0; i < swap_header->info.nr_badpages; i++)
1911			swab32s(&swap_header->info.badpages[i]);
1912	}
1913	/* Check the swap header's sub-version */
1914	if (swap_header->info.version != 1) {
1915		printk(KERN_WARNING
1916		       "Unable to handle swap header version %d\n",
1917		       swap_header->info.version);
1918		return 0;
1919	}
1920
1921	p->lowest_bit  = 1;
1922	p->cluster_next = 1;
1923	p->cluster_nr = 0;
1924
1925	/*
1926	 * Find out how many pages are allowed for a single swap
1927	 * device. There are three limiting factors: 1) the number
1928	 * of bits for the swap offset in the swp_entry_t type, and
1929	 * 2) the number of bits in the swap pte as defined by the
1930	 * the different architectures, and 3) the number of free bits
1931	 * in an exceptional radix_tree entry. In order to find the
1932	 * largest possible bit mask, a swap entry with swap type 0
1933	 * and swap offset ~0UL is created, encoded to a swap pte,
1934	 * decoded to a swp_entry_t again, and finally the swap
1935	 * offset is extracted. This will mask all the bits from
1936	 * the initial ~0UL mask that can't be encoded in either
1937	 * the swp_entry_t or the architecture definition of a
1938	 * swap pte.  Then the same is done for a radix_tree entry.
1939	 */
1940	maxpages = swp_offset(pte_to_swp_entry(
1941			swp_entry_to_pte(swp_entry(0, ~0UL))));
1942	maxpages = swp_offset(radix_to_swp_entry(
1943			swp_to_radix_entry(swp_entry(0, maxpages)))) + 1;
1944
1945	if (maxpages > swap_header->info.last_page) {
1946		maxpages = swap_header->info.last_page + 1;
 
 
 
1947		/* p->max is an unsigned int: don't overflow it */
1948		if ((unsigned int)maxpages == 0)
1949			maxpages = UINT_MAX;
1950	}
1951	p->highest_bit = maxpages - 1;
1952
1953	if (!maxpages)
1954		return 0;
1955	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
1956	if (swapfilepages && maxpages > swapfilepages) {
1957		printk(KERN_WARNING
1958		       "Swap area shorter than signature indicates\n");
1959		return 0;
1960	}
1961	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
1962		return 0;
1963	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1964		return 0;
1965
1966	return maxpages;
1967}
1968
1969static int setup_swap_map_and_extents(struct swap_info_struct *p,
1970					union swap_header *swap_header,
1971					unsigned char *swap_map,
 
1972					unsigned long maxpages,
1973					sector_t *span)
1974{
1975	int i;
1976	unsigned int nr_good_pages;
1977	int nr_extents;
 
 
1978
1979	nr_good_pages = maxpages - 1;	/* omit header page */
1980
 
 
 
 
 
1981	for (i = 0; i < swap_header->info.nr_badpages; i++) {
1982		unsigned int page_nr = swap_header->info.badpages[i];
1983		if (page_nr == 0 || page_nr > swap_header->info.last_page)
1984			return -EINVAL;
1985		if (page_nr < maxpages) {
1986			swap_map[page_nr] = SWAP_MAP_BAD;
1987			nr_good_pages--;
 
 
 
 
 
1988		}
1989	}
1990
 
 
 
 
1991	if (nr_good_pages) {
1992		swap_map[0] = SWAP_MAP_BAD;
 
 
 
 
 
1993		p->max = maxpages;
1994		p->pages = nr_good_pages;
1995		nr_extents = setup_swap_extents(p, span);
1996		if (nr_extents < 0)
1997			return nr_extents;
1998		nr_good_pages = p->pages;
1999	}
2000	if (!nr_good_pages) {
2001		printk(KERN_WARNING "Empty swap-file\n");
2002		return -EINVAL;
2003	}
2004
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2005	return nr_extents;
2006}
2007
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2008SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2009{
2010	struct swap_info_struct *p;
2011	char *name;
2012	struct file *swap_file = NULL;
2013	struct address_space *mapping;
2014	int i;
2015	int prio;
2016	int error;
2017	union swap_header *swap_header;
2018	int nr_extents;
2019	sector_t span;
2020	unsigned long maxpages;
2021	unsigned char *swap_map = NULL;
 
 
2022	struct page *page = NULL;
2023	struct inode *inode = NULL;
2024
 
 
 
2025	if (!capable(CAP_SYS_ADMIN))
2026		return -EPERM;
2027
2028	p = alloc_swap_info();
2029	if (IS_ERR(p))
2030		return PTR_ERR(p);
2031
 
 
2032	name = getname(specialfile);
2033	if (IS_ERR(name)) {
2034		error = PTR_ERR(name);
2035		name = NULL;
2036		goto bad_swap;
2037	}
2038	swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0);
2039	if (IS_ERR(swap_file)) {
2040		error = PTR_ERR(swap_file);
2041		swap_file = NULL;
2042		goto bad_swap;
2043	}
2044
2045	p->swap_file = swap_file;
2046	mapping = swap_file->f_mapping;
2047
2048	for (i = 0; i < nr_swapfiles; i++) {
2049		struct swap_info_struct *q = swap_info[i];
2050
2051		if (q == p || !q->swap_file)
2052			continue;
2053		if (mapping == q->swap_file->f_mapping) {
2054			error = -EBUSY;
2055			goto bad_swap;
2056		}
2057	}
2058
2059	inode = mapping->host;
2060	/* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
2061	error = claim_swapfile(p, inode);
2062	if (unlikely(error))
2063		goto bad_swap;
2064
2065	/*
2066	 * Read the swap header.
2067	 */
2068	if (!mapping->a_ops->readpage) {
2069		error = -EINVAL;
2070		goto bad_swap;
2071	}
2072	page = read_mapping_page(mapping, 0, swap_file);
2073	if (IS_ERR(page)) {
2074		error = PTR_ERR(page);
2075		goto bad_swap;
2076	}
2077	swap_header = kmap(page);
2078
2079	maxpages = read_swap_header(p, swap_header, inode);
2080	if (unlikely(!maxpages)) {
2081		error = -EINVAL;
2082		goto bad_swap;
2083	}
2084
2085	/* OK, set up the swap map and apply the bad block list */
2086	swap_map = vzalloc(maxpages);
2087	if (!swap_map) {
2088		error = -ENOMEM;
2089		goto bad_swap;
2090	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2091
2092	error = swap_cgroup_swapon(p->type, maxpages);
2093	if (error)
2094		goto bad_swap;
2095
2096	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2097		maxpages, &span);
2098	if (unlikely(nr_extents < 0)) {
2099		error = nr_extents;
2100		goto bad_swap;
2101	}
2102
2103	if (p->bdev) {
2104		if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2105			p->flags |= SWP_SOLIDSTATE;
2106			p->cluster_next = 1 + (random32() % p->highest_bit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2107		}
2108		if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
2109			p->flags |= SWP_DISCARDABLE;
2110	}
2111
2112	mutex_lock(&swapon_mutex);
2113	prio = -1;
2114	if (swap_flags & SWAP_FLAG_PREFER)
2115		prio =
2116		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2117	enable_swap_info(p, prio, swap_map);
2118
2119	printk(KERN_INFO "Adding %uk swap on %s.  "
2120			"Priority:%d extents:%d across:%lluk %s%s\n",
2121		p->pages<<(PAGE_SHIFT-10), name, p->prio,
2122		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2123		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2124		(p->flags & SWP_DISCARDABLE) ? "D" : "");
 
 
 
2125
2126	mutex_unlock(&swapon_mutex);
2127	atomic_inc(&proc_poll_event);
2128	wake_up_interruptible(&proc_poll_wait);
2129
2130	if (S_ISREG(inode->i_mode))
2131		inode->i_flags |= S_SWAPFILE;
2132	error = 0;
2133	goto out;
2134bad_swap:
 
 
2135	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
2136		set_blocksize(p->bdev, p->old_block_size);
2137		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2138	}
2139	destroy_swap_extents(p);
2140	swap_cgroup_swapoff(p->type);
2141	spin_lock(&swap_lock);
2142	p->swap_file = NULL;
2143	p->flags = 0;
2144	spin_unlock(&swap_lock);
2145	vfree(swap_map);
 
2146	if (swap_file) {
2147		if (inode && S_ISREG(inode->i_mode)) {
2148			mutex_unlock(&inode->i_mutex);
2149			inode = NULL;
2150		}
2151		filp_close(swap_file, NULL);
2152	}
2153out:
2154	if (page && !IS_ERR(page)) {
2155		kunmap(page);
2156		page_cache_release(page);
2157	}
2158	if (name)
2159		putname(name);
2160	if (inode && S_ISREG(inode->i_mode))
2161		mutex_unlock(&inode->i_mutex);
2162	return error;
2163}
2164
2165void si_swapinfo(struct sysinfo *val)
2166{
2167	unsigned int type;
2168	unsigned long nr_to_be_unused = 0;
2169
2170	spin_lock(&swap_lock);
2171	for (type = 0; type < nr_swapfiles; type++) {
2172		struct swap_info_struct *si = swap_info[type];
2173
2174		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2175			nr_to_be_unused += si->inuse_pages;
2176	}
2177	val->freeswap = nr_swap_pages + nr_to_be_unused;
2178	val->totalswap = total_swap_pages + nr_to_be_unused;
2179	spin_unlock(&swap_lock);
2180}
2181
2182/*
2183 * Verify that a swap entry is valid and increment its swap map count.
2184 *
2185 * Returns error code in following case.
2186 * - success -> 0
2187 * - swp_entry is invalid -> EINVAL
2188 * - swp_entry is migration entry -> EINVAL
2189 * - swap-cache reference is requested but there is already one. -> EEXIST
2190 * - swap-cache reference is requested but the entry is not used. -> ENOENT
2191 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2192 */
2193static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2194{
2195	struct swap_info_struct *p;
2196	unsigned long offset, type;
2197	unsigned char count;
2198	unsigned char has_cache;
2199	int err = -EINVAL;
2200
2201	if (non_swap_entry(entry))
2202		goto out;
2203
2204	type = swp_type(entry);
2205	if (type >= nr_swapfiles)
2206		goto bad_file;
2207	p = swap_info[type];
2208	offset = swp_offset(entry);
2209
2210	spin_lock(&swap_lock);
2211	if (unlikely(offset >= p->max))
2212		goto unlock_out;
2213
2214	count = p->swap_map[offset];
 
 
 
 
 
 
 
 
 
 
2215	has_cache = count & SWAP_HAS_CACHE;
2216	count &= ~SWAP_HAS_CACHE;
2217	err = 0;
2218
2219	if (usage == SWAP_HAS_CACHE) {
2220
2221		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
2222		if (!has_cache && count)
2223			has_cache = SWAP_HAS_CACHE;
2224		else if (has_cache)		/* someone else added cache */
2225			err = -EEXIST;
2226		else				/* no users remaining */
2227			err = -ENOENT;
2228
2229	} else if (count || has_cache) {
2230
2231		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2232			count += usage;
2233		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2234			err = -EINVAL;
2235		else if (swap_count_continued(p, offset, count))
2236			count = COUNT_CONTINUED;
2237		else
2238			err = -ENOMEM;
2239	} else
2240		err = -ENOENT;			/* unused swap entry */
2241
2242	p->swap_map[offset] = count | has_cache;
2243
2244unlock_out:
2245	spin_unlock(&swap_lock);
2246out:
2247	return err;
2248
2249bad_file:
2250	printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
2251	goto out;
2252}
2253
2254/*
2255 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2256 * (in which case its reference count is never incremented).
2257 */
2258void swap_shmem_alloc(swp_entry_t entry)
2259{
2260	__swap_duplicate(entry, SWAP_MAP_SHMEM);
2261}
2262
2263/*
2264 * Increase reference count of swap entry by 1.
2265 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
2266 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
2267 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
2268 * might occur if a page table entry has got corrupted.
2269 */
2270int swap_duplicate(swp_entry_t entry)
2271{
2272	int err = 0;
2273
2274	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2275		err = add_swap_count_continuation(entry, GFP_ATOMIC);
2276	return err;
2277}
2278
2279/*
2280 * @entry: swap entry for which we allocate swap cache.
2281 *
2282 * Called when allocating swap cache for existing swap entry,
2283 * This can return error codes. Returns 0 at success.
2284 * -EBUSY means there is a swap cache.
2285 * Note: return code is different from swap_duplicate().
2286 */
2287int swapcache_prepare(swp_entry_t entry)
2288{
2289	return __swap_duplicate(entry, SWAP_HAS_CACHE);
2290}
2291
 
 
 
 
 
 
 
2292/*
2293 * swap_lock prevents swap_map being freed. Don't grab an extra
2294 * reference on the swaphandle, it doesn't matter if it becomes unused.
2295 */
2296int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
2297{
2298	struct swap_info_struct *si;
2299	int our_page_cluster = page_cluster;
2300	pgoff_t target, toff;
2301	pgoff_t base, end;
2302	int nr_pages = 0;
2303
2304	if (!our_page_cluster)	/* no readahead */
2305		return 0;
2306
2307	si = swap_info[swp_type(entry)];
2308	target = swp_offset(entry);
2309	base = (target >> our_page_cluster) << our_page_cluster;
2310	end = base + (1 << our_page_cluster);
2311	if (!base)		/* first page is swap header */
2312		base++;
2313
2314	spin_lock(&swap_lock);
2315	if (end > si->max)	/* don't go beyond end of map */
2316		end = si->max;
2317
2318	/* Count contiguous allocated slots above our target */
2319	for (toff = target; ++toff < end; nr_pages++) {
2320		/* Don't read in free or bad pages */
2321		if (!si->swap_map[toff])
2322			break;
2323		if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2324			break;
2325	}
2326	/* Count contiguous allocated slots below our target */
2327	for (toff = target; --toff >= base; nr_pages++) {
2328		/* Don't read in free or bad pages */
2329		if (!si->swap_map[toff])
2330			break;
2331		if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2332			break;
2333	}
2334	spin_unlock(&swap_lock);
2335
2336	/*
2337	 * Indicate starting offset, and return number of pages to get:
2338	 * if only 1, say 0, since there's then no readahead to be done.
2339	 */
2340	*offset = ++toff;
2341	return nr_pages? ++nr_pages: 0;
2342}
 
2343
2344/*
2345 * add_swap_count_continuation - called when a swap count is duplicated
2346 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2347 * page of the original vmalloc'ed swap_map, to hold the continuation count
2348 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
2349 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2350 *
2351 * These continuation pages are seldom referenced: the common paths all work
2352 * on the original swap_map, only referring to a continuation page when the
2353 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2354 *
2355 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2356 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2357 * can be called after dropping locks.
2358 */
2359int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2360{
2361	struct swap_info_struct *si;
2362	struct page *head;
2363	struct page *page;
2364	struct page *list_page;
2365	pgoff_t offset;
2366	unsigned char count;
2367
2368	/*
2369	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
2370	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
2371	 */
2372	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2373
2374	si = swap_info_get(entry);
2375	if (!si) {
2376		/*
2377		 * An acceptable race has occurred since the failing
2378		 * __swap_duplicate(): the swap entry has been freed,
2379		 * perhaps even the whole swap_map cleared for swapoff.
2380		 */
2381		goto outer;
2382	}
2383
2384	offset = swp_offset(entry);
2385	count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
2386
2387	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2388		/*
2389		 * The higher the swap count, the more likely it is that tasks
2390		 * will race to add swap count continuation: we need to avoid
2391		 * over-provisioning.
2392		 */
2393		goto out;
2394	}
2395
2396	if (!page) {
2397		spin_unlock(&swap_lock);
2398		return -ENOMEM;
2399	}
2400
2401	/*
2402	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2403	 * no architecture is using highmem pages for kernel pagetables: so it
2404	 * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
2405	 */
2406	head = vmalloc_to_page(si->swap_map + offset);
2407	offset &= ~PAGE_MASK;
2408
2409	/*
2410	 * Page allocation does not initialize the page's lru field,
2411	 * but it does always reset its private field.
2412	 */
2413	if (!page_private(head)) {
2414		BUG_ON(count & COUNT_CONTINUED);
2415		INIT_LIST_HEAD(&head->lru);
2416		set_page_private(head, SWP_CONTINUED);
2417		si->flags |= SWP_CONTINUED;
2418	}
2419
2420	list_for_each_entry(list_page, &head->lru, lru) {
2421		unsigned char *map;
2422
2423		/*
2424		 * If the previous map said no continuation, but we've found
2425		 * a continuation page, free our allocation and use this one.
2426		 */
2427		if (!(count & COUNT_CONTINUED))
2428			goto out;
2429
2430		map = kmap_atomic(list_page, KM_USER0) + offset;
2431		count = *map;
2432		kunmap_atomic(map, KM_USER0);
2433
2434		/*
2435		 * If this continuation count now has some space in it,
2436		 * free our allocation and use this one.
2437		 */
2438		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2439			goto out;
2440	}
2441
2442	list_add_tail(&page->lru, &head->lru);
2443	page = NULL;			/* now it's attached, don't free it */
2444out:
2445	spin_unlock(&swap_lock);
2446outer:
2447	if (page)
2448		__free_page(page);
2449	return 0;
2450}
2451
2452/*
2453 * swap_count_continued - when the original swap_map count is incremented
2454 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2455 * into, carry if so, or else fail until a new continuation page is allocated;
2456 * when the original swap_map count is decremented from 0 with continuation,
2457 * borrow from the continuation and report whether it still holds more.
2458 * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
2459 */
2460static bool swap_count_continued(struct swap_info_struct *si,
2461				 pgoff_t offset, unsigned char count)
2462{
2463	struct page *head;
2464	struct page *page;
2465	unsigned char *map;
2466
2467	head = vmalloc_to_page(si->swap_map + offset);
2468	if (page_private(head) != SWP_CONTINUED) {
2469		BUG_ON(count & COUNT_CONTINUED);
2470		return false;		/* need to add count continuation */
2471	}
2472
2473	offset &= ~PAGE_MASK;
2474	page = list_entry(head->lru.next, struct page, lru);
2475	map = kmap_atomic(page, KM_USER0) + offset;
2476
2477	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
2478		goto init_map;		/* jump over SWAP_CONT_MAX checks */
2479
2480	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2481		/*
2482		 * Think of how you add 1 to 999
2483		 */
2484		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2485			kunmap_atomic(map, KM_USER0);
2486			page = list_entry(page->lru.next, struct page, lru);
2487			BUG_ON(page == head);
2488			map = kmap_atomic(page, KM_USER0) + offset;
2489		}
2490		if (*map == SWAP_CONT_MAX) {
2491			kunmap_atomic(map, KM_USER0);
2492			page = list_entry(page->lru.next, struct page, lru);
2493			if (page == head)
2494				return false;	/* add count continuation */
2495			map = kmap_atomic(page, KM_USER0) + offset;
2496init_map:		*map = 0;		/* we didn't zero the page */
2497		}
2498		*map += 1;
2499		kunmap_atomic(map, KM_USER0);
2500		page = list_entry(page->lru.prev, struct page, lru);
2501		while (page != head) {
2502			map = kmap_atomic(page, KM_USER0) + offset;
2503			*map = COUNT_CONTINUED;
2504			kunmap_atomic(map, KM_USER0);
2505			page = list_entry(page->lru.prev, struct page, lru);
2506		}
2507		return true;			/* incremented */
2508
2509	} else {				/* decrementing */
2510		/*
2511		 * Think of how you subtract 1 from 1000
2512		 */
2513		BUG_ON(count != COUNT_CONTINUED);
2514		while (*map == COUNT_CONTINUED) {
2515			kunmap_atomic(map, KM_USER0);
2516			page = list_entry(page->lru.next, struct page, lru);
2517			BUG_ON(page == head);
2518			map = kmap_atomic(page, KM_USER0) + offset;
2519		}
2520		BUG_ON(*map == 0);
2521		*map -= 1;
2522		if (*map == 0)
2523			count = 0;
2524		kunmap_atomic(map, KM_USER0);
2525		page = list_entry(page->lru.prev, struct page, lru);
2526		while (page != head) {
2527			map = kmap_atomic(page, KM_USER0) + offset;
2528			*map = SWAP_CONT_MAX | count;
2529			count = COUNT_CONTINUED;
2530			kunmap_atomic(map, KM_USER0);
2531			page = list_entry(page->lru.prev, struct page, lru);
2532		}
2533		return count == COUNT_CONTINUED;
2534	}
2535}
2536
2537/*
2538 * free_swap_count_continuations - swapoff free all the continuation pages
2539 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2540 */
2541static void free_swap_count_continuations(struct swap_info_struct *si)
2542{
2543	pgoff_t offset;
2544
2545	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2546		struct page *head;
2547		head = vmalloc_to_page(si->swap_map + offset);
2548		if (page_private(head)) {
2549			struct list_head *this, *next;
2550			list_for_each_safe(this, next, &head->lru) {
2551				struct page *page;
2552				page = list_entry(this, struct page, lru);
2553				list_del(this);
2554				__free_page(page);
2555			}
2556		}
2557	}
2558}
v3.15
   1/*
   2 *  linux/mm/swapfile.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/hugetlb.h>
  10#include <linux/mman.h>
  11#include <linux/slab.h>
  12#include <linux/kernel_stat.h>
  13#include <linux/swap.h>
  14#include <linux/vmalloc.h>
  15#include <linux/pagemap.h>
  16#include <linux/namei.h>
  17#include <linux/shmem_fs.h>
  18#include <linux/blkdev.h>
  19#include <linux/random.h>
  20#include <linux/writeback.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/init.h>
 
  24#include <linux/ksm.h>
  25#include <linux/rmap.h>
  26#include <linux/security.h>
  27#include <linux/backing-dev.h>
  28#include <linux/mutex.h>
  29#include <linux/capability.h>
  30#include <linux/syscalls.h>
  31#include <linux/memcontrol.h>
  32#include <linux/poll.h>
  33#include <linux/oom.h>
  34#include <linux/frontswap.h>
  35#include <linux/swapfile.h>
  36#include <linux/export.h>
  37
  38#include <asm/pgtable.h>
  39#include <asm/tlbflush.h>
  40#include <linux/swapops.h>
  41#include <linux/page_cgroup.h>
  42
  43static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  44				 unsigned char);
  45static void free_swap_count_continuations(struct swap_info_struct *);
  46static sector_t map_swap_entry(swp_entry_t, struct block_device**);
  47
  48DEFINE_SPINLOCK(swap_lock);
  49static unsigned int nr_swapfiles;
  50atomic_long_t nr_swap_pages;
  51/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  52long total_swap_pages;
  53static int least_priority;
  54static atomic_t highest_priority_index = ATOMIC_INIT(-1);
  55
  56static const char Bad_file[] = "Bad swap file entry ";
  57static const char Unused_file[] = "Unused swap file entry ";
  58static const char Bad_offset[] = "Bad swap offset entry ";
  59static const char Unused_offset[] = "Unused swap offset entry ";
  60
  61struct swap_list_t swap_list = {-1, -1};
  62
  63struct swap_info_struct *swap_info[MAX_SWAPFILES];
  64
  65static DEFINE_MUTEX(swapon_mutex);
  66
  67static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  68/* Activity counter to indicate that a swapon or swapoff has occurred */
  69static atomic_t proc_poll_event = ATOMIC_INIT(0);
  70
  71static inline unsigned char swap_count(unsigned char ent)
  72{
  73	return ent & ~SWAP_HAS_CACHE;	/* may include SWAP_HAS_CONT flag */
  74}
  75
  76/* returns 1 if swap entry is freed */
  77static int
  78__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
  79{
  80	swp_entry_t entry = swp_entry(si->type, offset);
  81	struct page *page;
  82	int ret = 0;
  83
  84	page = find_get_page(swap_address_space(entry), entry.val);
  85	if (!page)
  86		return 0;
  87	/*
  88	 * This function is called from scan_swap_map() and it's called
  89	 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
  90	 * We have to use trylock for avoiding deadlock. This is a special
  91	 * case and you should use try_to_free_swap() with explicit lock_page()
  92	 * in usual operations.
  93	 */
  94	if (trylock_page(page)) {
  95		ret = try_to_free_swap(page);
  96		unlock_page(page);
  97	}
  98	page_cache_release(page);
  99	return ret;
 100}
 101
 102/*
 103 * swapon tell device that all the old swap contents can be discarded,
 104 * to allow the swap device to optimize its wear-levelling.
 105 */
 106static int discard_swap(struct swap_info_struct *si)
 107{
 108	struct swap_extent *se;
 109	sector_t start_block;
 110	sector_t nr_blocks;
 111	int err = 0;
 112
 113	/* Do not discard the swap header page! */
 114	se = &si->first_swap_extent;
 115	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 116	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 117	if (nr_blocks) {
 118		err = blkdev_issue_discard(si->bdev, start_block,
 119				nr_blocks, GFP_KERNEL, 0);
 120		if (err)
 121			return err;
 122		cond_resched();
 123	}
 124
 125	list_for_each_entry(se, &si->first_swap_extent.list, list) {
 126		start_block = se->start_block << (PAGE_SHIFT - 9);
 127		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 128
 129		err = blkdev_issue_discard(si->bdev, start_block,
 130				nr_blocks, GFP_KERNEL, 0);
 131		if (err)
 132			break;
 133
 134		cond_resched();
 135	}
 136	return err;		/* That will often be -EOPNOTSUPP */
 137}
 138
 139/*
 140 * swap allocation tell device that a cluster of swap can now be discarded,
 141 * to allow the swap device to optimize its wear-levelling.
 142 */
 143static void discard_swap_cluster(struct swap_info_struct *si,
 144				 pgoff_t start_page, pgoff_t nr_pages)
 145{
 146	struct swap_extent *se = si->curr_swap_extent;
 147	int found_extent = 0;
 148
 149	while (nr_pages) {
 150		struct list_head *lh;
 151
 152		if (se->start_page <= start_page &&
 153		    start_page < se->start_page + se->nr_pages) {
 154			pgoff_t offset = start_page - se->start_page;
 155			sector_t start_block = se->start_block + offset;
 156			sector_t nr_blocks = se->nr_pages - offset;
 157
 158			if (nr_blocks > nr_pages)
 159				nr_blocks = nr_pages;
 160			start_page += nr_blocks;
 161			nr_pages -= nr_blocks;
 162
 163			if (!found_extent++)
 164				si->curr_swap_extent = se;
 165
 166			start_block <<= PAGE_SHIFT - 9;
 167			nr_blocks <<= PAGE_SHIFT - 9;
 168			if (blkdev_issue_discard(si->bdev, start_block,
 169				    nr_blocks, GFP_NOIO, 0))
 170				break;
 171		}
 172
 173		lh = se->list.next;
 174		se = list_entry(lh, struct swap_extent, list);
 175	}
 176}
 177
 178#define SWAPFILE_CLUSTER	256
 179#define LATENCY_LIMIT		256
 180
 181static inline void cluster_set_flag(struct swap_cluster_info *info,
 182	unsigned int flag)
 183{
 184	info->flags = flag;
 
 185}
 186
 187static inline unsigned int cluster_count(struct swap_cluster_info *info)
 188{
 189	return info->data;
 190}
 191
 192static inline void cluster_set_count(struct swap_cluster_info *info,
 193				     unsigned int c)
 194{
 195	info->data = c;
 196}
 197
 198static inline void cluster_set_count_flag(struct swap_cluster_info *info,
 199					 unsigned int c, unsigned int f)
 200{
 201	info->flags = f;
 202	info->data = c;
 203}
 204
 205static inline unsigned int cluster_next(struct swap_cluster_info *info)
 206{
 207	return info->data;
 208}
 209
 210static inline void cluster_set_next(struct swap_cluster_info *info,
 211				    unsigned int n)
 212{
 213	info->data = n;
 214}
 215
 216static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 217					 unsigned int n, unsigned int f)
 218{
 219	info->flags = f;
 220	info->data = n;
 221}
 222
 223static inline bool cluster_is_free(struct swap_cluster_info *info)
 224{
 225	return info->flags & CLUSTER_FLAG_FREE;
 226}
 227
 228static inline bool cluster_is_null(struct swap_cluster_info *info)
 229{
 230	return info->flags & CLUSTER_FLAG_NEXT_NULL;
 231}
 232
 233static inline void cluster_set_null(struct swap_cluster_info *info)
 234{
 235	info->flags = CLUSTER_FLAG_NEXT_NULL;
 236	info->data = 0;
 237}
 238
 239/* Add a cluster to discard list and schedule it to do discard */
 240static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 241		unsigned int idx)
 242{
 243	/*
 244	 * If scan_swap_map() can't find a free cluster, it will check
 245	 * si->swap_map directly. To make sure the discarding cluster isn't
 246	 * taken by scan_swap_map(), mark the swap entries bad (occupied). It
 247	 * will be cleared after discard
 248	 */
 249	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 250			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 251
 252	if (cluster_is_null(&si->discard_cluster_head)) {
 253		cluster_set_next_flag(&si->discard_cluster_head,
 254						idx, 0);
 255		cluster_set_next_flag(&si->discard_cluster_tail,
 256						idx, 0);
 257	} else {
 258		unsigned int tail = cluster_next(&si->discard_cluster_tail);
 259		cluster_set_next(&si->cluster_info[tail], idx);
 260		cluster_set_next_flag(&si->discard_cluster_tail,
 261						idx, 0);
 262	}
 263
 264	schedule_work(&si->discard_work);
 265}
 266
 267/*
 268 * Doing discard actually. After a cluster discard is finished, the cluster
 269 * will be added to free cluster list. caller should hold si->lock.
 270*/
 271static void swap_do_scheduled_discard(struct swap_info_struct *si)
 272{
 273	struct swap_cluster_info *info;
 274	unsigned int idx;
 275
 276	info = si->cluster_info;
 277
 278	while (!cluster_is_null(&si->discard_cluster_head)) {
 279		idx = cluster_next(&si->discard_cluster_head);
 280
 281		cluster_set_next_flag(&si->discard_cluster_head,
 282						cluster_next(&info[idx]), 0);
 283		if (cluster_next(&si->discard_cluster_tail) == idx) {
 284			cluster_set_null(&si->discard_cluster_head);
 285			cluster_set_null(&si->discard_cluster_tail);
 286		}
 287		spin_unlock(&si->lock);
 288
 289		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 290				SWAPFILE_CLUSTER);
 291
 292		spin_lock(&si->lock);
 293		cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE);
 294		if (cluster_is_null(&si->free_cluster_head)) {
 295			cluster_set_next_flag(&si->free_cluster_head,
 296						idx, 0);
 297			cluster_set_next_flag(&si->free_cluster_tail,
 298						idx, 0);
 299		} else {
 300			unsigned int tail;
 301
 302			tail = cluster_next(&si->free_cluster_tail);
 303			cluster_set_next(&info[tail], idx);
 304			cluster_set_next_flag(&si->free_cluster_tail,
 305						idx, 0);
 306		}
 307		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 308				0, SWAPFILE_CLUSTER);
 309	}
 310}
 311
 312static void swap_discard_work(struct work_struct *work)
 313{
 314	struct swap_info_struct *si;
 315
 316	si = container_of(work, struct swap_info_struct, discard_work);
 317
 318	spin_lock(&si->lock);
 319	swap_do_scheduled_discard(si);
 320	spin_unlock(&si->lock);
 321}
 322
 323/*
 324 * The cluster corresponding to page_nr will be used. The cluster will be
 325 * removed from free cluster list and its usage counter will be increased.
 326 */
 327static void inc_cluster_info_page(struct swap_info_struct *p,
 328	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 329{
 330	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 331
 332	if (!cluster_info)
 333		return;
 334	if (cluster_is_free(&cluster_info[idx])) {
 335		VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx);
 336		cluster_set_next_flag(&p->free_cluster_head,
 337			cluster_next(&cluster_info[idx]), 0);
 338		if (cluster_next(&p->free_cluster_tail) == idx) {
 339			cluster_set_null(&p->free_cluster_tail);
 340			cluster_set_null(&p->free_cluster_head);
 341		}
 342		cluster_set_count_flag(&cluster_info[idx], 0, 0);
 343	}
 344
 345	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
 346	cluster_set_count(&cluster_info[idx],
 347		cluster_count(&cluster_info[idx]) + 1);
 348}
 349
 350/*
 351 * The cluster corresponding to page_nr decreases one usage. If the usage
 352 * counter becomes 0, which means no page in the cluster is in using, we can
 353 * optionally discard the cluster and add it to free cluster list.
 354 */
 355static void dec_cluster_info_page(struct swap_info_struct *p,
 356	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 357{
 358	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 359
 360	if (!cluster_info)
 361		return;
 362
 363	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
 364	cluster_set_count(&cluster_info[idx],
 365		cluster_count(&cluster_info[idx]) - 1);
 366
 367	if (cluster_count(&cluster_info[idx]) == 0) {
 368		/*
 369		 * If the swap is discardable, prepare discard the cluster
 370		 * instead of free it immediately. The cluster will be freed
 371		 * after discard.
 372		 */
 373		if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 374				 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 375			swap_cluster_schedule_discard(p, idx);
 376			return;
 377		}
 378
 379		cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
 380		if (cluster_is_null(&p->free_cluster_head)) {
 381			cluster_set_next_flag(&p->free_cluster_head, idx, 0);
 382			cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
 383		} else {
 384			unsigned int tail = cluster_next(&p->free_cluster_tail);
 385			cluster_set_next(&cluster_info[tail], idx);
 386			cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
 387		}
 388	}
 389}
 390
 391/*
 392 * It's possible scan_swap_map() uses a free cluster in the middle of free
 393 * cluster list. Avoiding such abuse to avoid list corruption.
 394 */
 395static bool
 396scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
 397	unsigned long offset)
 398{
 399	struct percpu_cluster *percpu_cluster;
 400	bool conflict;
 401
 402	offset /= SWAPFILE_CLUSTER;
 403	conflict = !cluster_is_null(&si->free_cluster_head) &&
 404		offset != cluster_next(&si->free_cluster_head) &&
 405		cluster_is_free(&si->cluster_info[offset]);
 406
 407	if (!conflict)
 408		return false;
 409
 410	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
 411	cluster_set_null(&percpu_cluster->index);
 412	return true;
 413}
 414
 415/*
 416 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
 417 * might involve allocating a new cluster for current CPU too.
 418 */
 419static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
 420	unsigned long *offset, unsigned long *scan_base)
 421{
 422	struct percpu_cluster *cluster;
 423	bool found_free;
 424	unsigned long tmp;
 425
 426new_cluster:
 427	cluster = this_cpu_ptr(si->percpu_cluster);
 428	if (cluster_is_null(&cluster->index)) {
 429		if (!cluster_is_null(&si->free_cluster_head)) {
 430			cluster->index = si->free_cluster_head;
 431			cluster->next = cluster_next(&cluster->index) *
 432					SWAPFILE_CLUSTER;
 433		} else if (!cluster_is_null(&si->discard_cluster_head)) {
 434			/*
 435			 * we don't have free cluster but have some clusters in
 436			 * discarding, do discard now and reclaim them
 437			 */
 438			swap_do_scheduled_discard(si);
 439			*scan_base = *offset = si->cluster_next;
 440			goto new_cluster;
 441		} else
 442			return;
 443	}
 444
 445	found_free = false;
 446
 447	/*
 448	 * Other CPUs can use our cluster if they can't find a free cluster,
 449	 * check if there is still free entry in the cluster
 450	 */
 451	tmp = cluster->next;
 452	while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) *
 453	       SWAPFILE_CLUSTER) {
 454		if (!si->swap_map[tmp]) {
 455			found_free = true;
 456			break;
 457		}
 458		tmp++;
 459	}
 460	if (!found_free) {
 461		cluster_set_null(&cluster->index);
 462		goto new_cluster;
 463	}
 464	cluster->next = tmp + 1;
 465	*offset = tmp;
 466	*scan_base = tmp;
 467}
 468
 469static unsigned long scan_swap_map(struct swap_info_struct *si,
 470				   unsigned char usage)
 471{
 472	unsigned long offset;
 473	unsigned long scan_base;
 474	unsigned long last_in_cluster = 0;
 475	int latency_ration = LATENCY_LIMIT;
 
 476
 477	/*
 478	 * We try to cluster swap pages by allocating them sequentially
 479	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 480	 * way, however, we resort to first-free allocation, starting
 481	 * a new cluster.  This prevents us from scattering swap pages
 482	 * all over the entire swap partition, so that we reduce
 483	 * overall disk seek times between swap pages.  -- sct
 484	 * But we do now try to find an empty cluster.  -Andrea
 485	 * And we let swap pages go all over an SSD partition.  Hugh
 486	 */
 487
 488	si->flags += SWP_SCANNING;
 489	scan_base = offset = si->cluster_next;
 490
 491	/* SSD algorithm */
 492	if (si->cluster_info) {
 493		scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
 494		goto checks;
 495	}
 496
 497	if (unlikely(!si->cluster_nr--)) {
 498		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 499			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 500			goto checks;
 501		}
 502
 503		spin_unlock(&si->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 504
 505		/*
 506		 * If seek is expensive, start searching for new cluster from
 507		 * start of partition, to minimize the span of allocated swap.
 508		 * But if seek is cheap, search from our current position, so
 509		 * that swap is allocated from all over the partition: if the
 510		 * Flash Translation Layer only remaps within limited zones,
 511		 * we don't want to wear out the first zone too quickly.
 512		 */
 513		if (!(si->flags & SWP_SOLIDSTATE))
 514			scan_base = offset = si->lowest_bit;
 515		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 516
 517		/* Locate the first empty (unaligned) cluster */
 518		for (; last_in_cluster <= si->highest_bit; offset++) {
 519			if (si->swap_map[offset])
 520				last_in_cluster = offset + SWAPFILE_CLUSTER;
 521			else if (offset == last_in_cluster) {
 522				spin_lock(&si->lock);
 523				offset -= SWAPFILE_CLUSTER - 1;
 524				si->cluster_next = offset;
 525				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 
 526				goto checks;
 527			}
 528			if (unlikely(--latency_ration < 0)) {
 529				cond_resched();
 530				latency_ration = LATENCY_LIMIT;
 531			}
 532		}
 533
 534		offset = si->lowest_bit;
 535		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 536
 537		/* Locate the first empty (unaligned) cluster */
 538		for (; last_in_cluster < scan_base; offset++) {
 539			if (si->swap_map[offset])
 540				last_in_cluster = offset + SWAPFILE_CLUSTER;
 541			else if (offset == last_in_cluster) {
 542				spin_lock(&si->lock);
 543				offset -= SWAPFILE_CLUSTER - 1;
 544				si->cluster_next = offset;
 545				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 
 546				goto checks;
 547			}
 548			if (unlikely(--latency_ration < 0)) {
 549				cond_resched();
 550				latency_ration = LATENCY_LIMIT;
 551			}
 552		}
 553
 554		offset = scan_base;
 555		spin_lock(&si->lock);
 556		si->cluster_nr = SWAPFILE_CLUSTER - 1;
 
 557	}
 558
 559checks:
 560	if (si->cluster_info) {
 561		while (scan_swap_map_ssd_cluster_conflict(si, offset))
 562			scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
 563	}
 564	if (!(si->flags & SWP_WRITEOK))
 565		goto no_page;
 566	if (!si->highest_bit)
 567		goto no_page;
 568	if (offset > si->highest_bit)
 569		scan_base = offset = si->lowest_bit;
 570
 571	/* reuse swap entry of cache-only swap if not busy. */
 572	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 573		int swap_was_freed;
 574		spin_unlock(&si->lock);
 575		swap_was_freed = __try_to_reclaim_swap(si, offset);
 576		spin_lock(&si->lock);
 577		/* entry was freed successfully, try to use this again */
 578		if (swap_was_freed)
 579			goto checks;
 580		goto scan; /* check next one */
 581	}
 582
 583	if (si->swap_map[offset])
 584		goto scan;
 585
 586	if (offset == si->lowest_bit)
 587		si->lowest_bit++;
 588	if (offset == si->highest_bit)
 589		si->highest_bit--;
 590	si->inuse_pages++;
 591	if (si->inuse_pages == si->pages) {
 592		si->lowest_bit = si->max;
 593		si->highest_bit = 0;
 594	}
 595	si->swap_map[offset] = usage;
 596	inc_cluster_info_page(si, si->cluster_info, offset);
 597	si->cluster_next = offset + 1;
 598	si->flags -= SWP_SCANNING;
 599
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600	return offset;
 601
 602scan:
 603	spin_unlock(&si->lock);
 604	while (++offset <= si->highest_bit) {
 605		if (!si->swap_map[offset]) {
 606			spin_lock(&si->lock);
 607			goto checks;
 608		}
 609		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 610			spin_lock(&si->lock);
 611			goto checks;
 612		}
 613		if (unlikely(--latency_ration < 0)) {
 614			cond_resched();
 615			latency_ration = LATENCY_LIMIT;
 616		}
 617	}
 618	offset = si->lowest_bit;
 619	while (offset < scan_base) {
 620		if (!si->swap_map[offset]) {
 621			spin_lock(&si->lock);
 622			goto checks;
 623		}
 624		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 625			spin_lock(&si->lock);
 626			goto checks;
 627		}
 628		if (unlikely(--latency_ration < 0)) {
 629			cond_resched();
 630			latency_ration = LATENCY_LIMIT;
 631		}
 632		offset++;
 633	}
 634	spin_lock(&si->lock);
 635
 636no_page:
 637	si->flags -= SWP_SCANNING;
 638	return 0;
 639}
 640
 641swp_entry_t get_swap_page(void)
 642{
 643	struct swap_info_struct *si;
 644	pgoff_t offset;
 645	int type, next;
 646	int wrapped = 0;
 647	int hp_index;
 648
 649	spin_lock(&swap_lock);
 650	if (atomic_long_read(&nr_swap_pages) <= 0)
 651		goto noswap;
 652	atomic_long_dec(&nr_swap_pages);
 653
 654	for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
 655		hp_index = atomic_xchg(&highest_priority_index, -1);
 656		/*
 657		 * highest_priority_index records current highest priority swap
 658		 * type which just frees swap entries. If its priority is
 659		 * higher than that of swap_list.next swap type, we use it.  It
 660		 * isn't protected by swap_lock, so it can be an invalid value
 661		 * if the corresponding swap type is swapoff. We double check
 662		 * the flags here. It's even possible the swap type is swapoff
 663		 * and swapon again and its priority is changed. In such rare
 664		 * case, low prority swap type might be used, but eventually
 665		 * high priority swap will be used after several rounds of
 666		 * swap.
 667		 */
 668		if (hp_index != -1 && hp_index != type &&
 669		    swap_info[type]->prio < swap_info[hp_index]->prio &&
 670		    (swap_info[hp_index]->flags & SWP_WRITEOK)) {
 671			type = hp_index;
 672			swap_list.next = type;
 673		}
 674
 675		si = swap_info[type];
 676		next = si->next;
 677		if (next < 0 ||
 678		    (!wrapped && si->prio != swap_info[next]->prio)) {
 679			next = swap_list.head;
 680			wrapped++;
 681		}
 682
 683		spin_lock(&si->lock);
 684		if (!si->highest_bit) {
 685			spin_unlock(&si->lock);
 686			continue;
 687		}
 688		if (!(si->flags & SWP_WRITEOK)) {
 689			spin_unlock(&si->lock);
 690			continue;
 691		}
 692
 693		swap_list.next = next;
 694
 695		spin_unlock(&swap_lock);
 696		/* This is called for allocating swap entry for cache */
 697		offset = scan_swap_map(si, SWAP_HAS_CACHE);
 698		spin_unlock(&si->lock);
 699		if (offset)
 700			return swp_entry(type, offset);
 701		spin_lock(&swap_lock);
 702		next = swap_list.next;
 703	}
 704
 705	atomic_long_inc(&nr_swap_pages);
 706noswap:
 707	spin_unlock(&swap_lock);
 708	return (swp_entry_t) {0};
 709}
 710
 711/* The only caller of this function is now suspend routine */
 712swp_entry_t get_swap_page_of_type(int type)
 713{
 714	struct swap_info_struct *si;
 715	pgoff_t offset;
 716
 
 717	si = swap_info[type];
 718	spin_lock(&si->lock);
 719	if (si && (si->flags & SWP_WRITEOK)) {
 720		atomic_long_dec(&nr_swap_pages);
 721		/* This is called for allocating swap entry, not cache */
 722		offset = scan_swap_map(si, 1);
 723		if (offset) {
 724			spin_unlock(&si->lock);
 725			return swp_entry(type, offset);
 726		}
 727		atomic_long_inc(&nr_swap_pages);
 728	}
 729	spin_unlock(&si->lock);
 730	return (swp_entry_t) {0};
 731}
 732
 733static struct swap_info_struct *swap_info_get(swp_entry_t entry)
 734{
 735	struct swap_info_struct *p;
 736	unsigned long offset, type;
 737
 738	if (!entry.val)
 739		goto out;
 740	type = swp_type(entry);
 741	if (type >= nr_swapfiles)
 742		goto bad_nofile;
 743	p = swap_info[type];
 744	if (!(p->flags & SWP_USED))
 745		goto bad_device;
 746	offset = swp_offset(entry);
 747	if (offset >= p->max)
 748		goto bad_offset;
 749	if (!p->swap_map[offset])
 750		goto bad_free;
 751	spin_lock(&p->lock);
 752	return p;
 753
 754bad_free:
 755	pr_err("swap_free: %s%08lx\n", Unused_offset, entry.val);
 756	goto out;
 757bad_offset:
 758	pr_err("swap_free: %s%08lx\n", Bad_offset, entry.val);
 759	goto out;
 760bad_device:
 761	pr_err("swap_free: %s%08lx\n", Unused_file, entry.val);
 762	goto out;
 763bad_nofile:
 764	pr_err("swap_free: %s%08lx\n", Bad_file, entry.val);
 765out:
 766	return NULL;
 767}
 768
 769/*
 770 * This swap type frees swap entry, check if it is the highest priority swap
 771 * type which just frees swap entry. get_swap_page() uses
 772 * highest_priority_index to search highest priority swap type. The
 773 * swap_info_struct.lock can't protect us if there are multiple swap types
 774 * active, so we use atomic_cmpxchg.
 775 */
 776static void set_highest_priority_index(int type)
 777{
 778	int old_hp_index, new_hp_index;
 779
 780	do {
 781		old_hp_index = atomic_read(&highest_priority_index);
 782		if (old_hp_index != -1 &&
 783			swap_info[old_hp_index]->prio >= swap_info[type]->prio)
 784			break;
 785		new_hp_index = type;
 786	} while (atomic_cmpxchg(&highest_priority_index,
 787		old_hp_index, new_hp_index) != old_hp_index);
 788}
 789
 790static unsigned char swap_entry_free(struct swap_info_struct *p,
 791				     swp_entry_t entry, unsigned char usage)
 792{
 793	unsigned long offset = swp_offset(entry);
 794	unsigned char count;
 795	unsigned char has_cache;
 796
 797	count = p->swap_map[offset];
 798	has_cache = count & SWAP_HAS_CACHE;
 799	count &= ~SWAP_HAS_CACHE;
 800
 801	if (usage == SWAP_HAS_CACHE) {
 802		VM_BUG_ON(!has_cache);
 803		has_cache = 0;
 804	} else if (count == SWAP_MAP_SHMEM) {
 805		/*
 806		 * Or we could insist on shmem.c using a special
 807		 * swap_shmem_free() and free_shmem_swap_and_cache()...
 808		 */
 809		count = 0;
 810	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
 811		if (count == COUNT_CONTINUED) {
 812			if (swap_count_continued(p, offset, count))
 813				count = SWAP_MAP_MAX | COUNT_CONTINUED;
 814			else
 815				count = SWAP_MAP_MAX;
 816		} else
 817			count--;
 818	}
 819
 820	if (!count)
 821		mem_cgroup_uncharge_swap(entry);
 822
 823	usage = count | has_cache;
 824	p->swap_map[offset] = usage;
 825
 826	/* free if no reference */
 827	if (!usage) {
 828		dec_cluster_info_page(p, p->cluster_info, offset);
 829		if (offset < p->lowest_bit)
 830			p->lowest_bit = offset;
 831		if (offset > p->highest_bit)
 832			p->highest_bit = offset;
 833		set_highest_priority_index(p->type);
 834		atomic_long_inc(&nr_swap_pages);
 
 
 835		p->inuse_pages--;
 836		frontswap_invalidate_page(p->type, offset);
 837		if (p->flags & SWP_BLKDEV) {
 838			struct gendisk *disk = p->bdev->bd_disk;
 839			if (disk->fops->swap_slot_free_notify)
 840				disk->fops->swap_slot_free_notify(p->bdev,
 841								  offset);
 842		}
 843	}
 844
 845	return usage;
 846}
 847
 848/*
 849 * Caller has made sure that the swap device corresponding to entry
 850 * is still around or has not been recycled.
 851 */
 852void swap_free(swp_entry_t entry)
 853{
 854	struct swap_info_struct *p;
 855
 856	p = swap_info_get(entry);
 857	if (p) {
 858		swap_entry_free(p, entry, 1);
 859		spin_unlock(&p->lock);
 860	}
 861}
 862
 863/*
 864 * Called after dropping swapcache to decrease refcnt to swap entries.
 865 */
 866void swapcache_free(swp_entry_t entry, struct page *page)
 867{
 868	struct swap_info_struct *p;
 869	unsigned char count;
 870
 871	p = swap_info_get(entry);
 872	if (p) {
 873		count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
 874		if (page)
 875			mem_cgroup_uncharge_swapcache(page, entry, count != 0);
 876		spin_unlock(&p->lock);
 877	}
 878}
 879
 880/*
 881 * How many references to page are currently swapped out?
 882 * This does not give an exact answer when swap count is continued,
 883 * but does include the high COUNT_CONTINUED flag to allow for that.
 884 */
 885int page_swapcount(struct page *page)
 886{
 887	int count = 0;
 888	struct swap_info_struct *p;
 889	swp_entry_t entry;
 890
 891	entry.val = page_private(page);
 892	p = swap_info_get(entry);
 893	if (p) {
 894		count = swap_count(p->swap_map[swp_offset(entry)]);
 895		spin_unlock(&p->lock);
 896	}
 897	return count;
 898}
 899
 900/*
 901 * We can write to an anon page without COW if there are no other references
 902 * to it.  And as a side-effect, free up its swap: because the old content
 903 * on disk will never be read, and seeking back there to write new content
 904 * later would only waste time away from clustering.
 905 */
 906int reuse_swap_page(struct page *page)
 907{
 908	int count;
 909
 910	VM_BUG_ON_PAGE(!PageLocked(page), page);
 911	if (unlikely(PageKsm(page)))
 912		return 0;
 913	count = page_mapcount(page);
 914	if (count <= 1 && PageSwapCache(page)) {
 915		count += page_swapcount(page);
 916		if (count == 1 && !PageWriteback(page)) {
 917			delete_from_swap_cache(page);
 918			SetPageDirty(page);
 919		}
 920	}
 921	return count <= 1;
 922}
 923
 924/*
 925 * If swap is getting full, or if there are no more mappings of this page,
 926 * then try_to_free_swap is called to free its swap space.
 927 */
 928int try_to_free_swap(struct page *page)
 929{
 930	VM_BUG_ON_PAGE(!PageLocked(page), page);
 931
 932	if (!PageSwapCache(page))
 933		return 0;
 934	if (PageWriteback(page))
 935		return 0;
 936	if (page_swapcount(page))
 937		return 0;
 938
 939	/*
 940	 * Once hibernation has begun to create its image of memory,
 941	 * there's a danger that one of the calls to try_to_free_swap()
 942	 * - most probably a call from __try_to_reclaim_swap() while
 943	 * hibernation is allocating its own swap pages for the image,
 944	 * but conceivably even a call from memory reclaim - will free
 945	 * the swap from a page which has already been recorded in the
 946	 * image as a clean swapcache page, and then reuse its swap for
 947	 * another page of the image.  On waking from hibernation, the
 948	 * original page might be freed under memory pressure, then
 949	 * later read back in from swap, now with the wrong data.
 950	 *
 951	 * Hibernation suspends storage while it is writing the image
 952	 * to disk so check that here.
 953	 */
 954	if (pm_suspended_storage())
 955		return 0;
 956
 957	delete_from_swap_cache(page);
 958	SetPageDirty(page);
 959	return 1;
 960}
 961
 962/*
 963 * Free the swap entry like above, but also try to
 964 * free the page cache entry if it is the last user.
 965 */
 966int free_swap_and_cache(swp_entry_t entry)
 967{
 968	struct swap_info_struct *p;
 969	struct page *page = NULL;
 970
 971	if (non_swap_entry(entry))
 972		return 1;
 973
 974	p = swap_info_get(entry);
 975	if (p) {
 976		if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
 977			page = find_get_page(swap_address_space(entry),
 978						entry.val);
 979			if (page && !trylock_page(page)) {
 980				page_cache_release(page);
 981				page = NULL;
 982			}
 983		}
 984		spin_unlock(&p->lock);
 985	}
 986	if (page) {
 987		/*
 988		 * Not mapped elsewhere, or swap space full? Free it!
 989		 * Also recheck PageSwapCache now page is locked (above).
 990		 */
 991		if (PageSwapCache(page) && !PageWriteback(page) &&
 992				(!page_mapped(page) || vm_swap_full())) {
 993			delete_from_swap_cache(page);
 994			SetPageDirty(page);
 995		}
 996		unlock_page(page);
 997		page_cache_release(page);
 998	}
 999	return p != NULL;
1000}
1001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1002#ifdef CONFIG_HIBERNATION
1003/*
1004 * Find the swap type that corresponds to given device (if any).
1005 *
1006 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1007 * from 0, in which the swap header is expected to be located.
1008 *
1009 * This is needed for the suspend to disk (aka swsusp).
1010 */
1011int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
1012{
1013	struct block_device *bdev = NULL;
1014	int type;
1015
1016	if (device)
1017		bdev = bdget(device);
1018
1019	spin_lock(&swap_lock);
1020	for (type = 0; type < nr_swapfiles; type++) {
1021		struct swap_info_struct *sis = swap_info[type];
1022
1023		if (!(sis->flags & SWP_WRITEOK))
1024			continue;
1025
1026		if (!bdev) {
1027			if (bdev_p)
1028				*bdev_p = bdgrab(sis->bdev);
1029
1030			spin_unlock(&swap_lock);
1031			return type;
1032		}
1033		if (bdev == sis->bdev) {
1034			struct swap_extent *se = &sis->first_swap_extent;
1035
1036			if (se->start_block == offset) {
1037				if (bdev_p)
1038					*bdev_p = bdgrab(sis->bdev);
1039
1040				spin_unlock(&swap_lock);
1041				bdput(bdev);
1042				return type;
1043			}
1044		}
1045	}
1046	spin_unlock(&swap_lock);
1047	if (bdev)
1048		bdput(bdev);
1049
1050	return -ENODEV;
1051}
1052
1053/*
1054 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1055 * corresponding to given index in swap_info (swap type).
1056 */
1057sector_t swapdev_block(int type, pgoff_t offset)
1058{
1059	struct block_device *bdev;
1060
1061	if ((unsigned int)type >= nr_swapfiles)
1062		return 0;
1063	if (!(swap_info[type]->flags & SWP_WRITEOK))
1064		return 0;
1065	return map_swap_entry(swp_entry(type, offset), &bdev);
1066}
1067
1068/*
1069 * Return either the total number of swap pages of given type, or the number
1070 * of free pages of that type (depending on @free)
1071 *
1072 * This is needed for software suspend
1073 */
1074unsigned int count_swap_pages(int type, int free)
1075{
1076	unsigned int n = 0;
1077
1078	spin_lock(&swap_lock);
1079	if ((unsigned int)type < nr_swapfiles) {
1080		struct swap_info_struct *sis = swap_info[type];
1081
1082		spin_lock(&sis->lock);
1083		if (sis->flags & SWP_WRITEOK) {
1084			n = sis->pages;
1085			if (free)
1086				n -= sis->inuse_pages;
1087		}
1088		spin_unlock(&sis->lock);
1089	}
1090	spin_unlock(&swap_lock);
1091	return n;
1092}
1093#endif /* CONFIG_HIBERNATION */
1094
1095static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
1096{
1097#ifdef CONFIG_MEM_SOFT_DIRTY
1098	/*
1099	 * When pte keeps soft dirty bit the pte generated
1100	 * from swap entry does not has it, still it's same
1101	 * pte from logical point of view.
1102	 */
1103	pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
1104	return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
1105#else
1106	return pte_same(pte, swp_pte);
1107#endif
1108}
1109
1110/*
1111 * No need to decide whether this PTE shares the swap entry with others,
1112 * just let do_wp_page work it out if a write is requested later - to
1113 * force COW, vm_page_prot omits write permission from any private vma.
1114 */
1115static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1116		unsigned long addr, swp_entry_t entry, struct page *page)
1117{
1118	struct page *swapcache;
1119	struct mem_cgroup *memcg;
1120	spinlock_t *ptl;
1121	pte_t *pte;
1122	int ret = 1;
1123
1124	swapcache = page;
1125	page = ksm_might_need_to_copy(page, vma, addr);
1126	if (unlikely(!page))
1127		return -ENOMEM;
1128
1129	if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
1130					 GFP_KERNEL, &memcg)) {
1131		ret = -ENOMEM;
1132		goto out_nolock;
1133	}
1134
1135	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1136	if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
1137		mem_cgroup_cancel_charge_swapin(memcg);
 
1138		ret = 0;
1139		goto out;
1140	}
1141
1142	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1143	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1144	get_page(page);
1145	set_pte_at(vma->vm_mm, addr, pte,
1146		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
1147	if (page == swapcache)
1148		page_add_anon_rmap(page, vma, addr);
1149	else /* ksm created a completely new copy */
1150		page_add_new_anon_rmap(page, vma, addr);
1151	mem_cgroup_commit_charge_swapin(page, memcg);
1152	swap_free(entry);
1153	/*
1154	 * Move the page to the active list so it is not
1155	 * immediately swapped out again after swapon.
1156	 */
1157	activate_page(page);
1158out:
1159	pte_unmap_unlock(pte, ptl);
1160out_nolock:
1161	if (page != swapcache) {
1162		unlock_page(page);
1163		put_page(page);
1164	}
1165	return ret;
1166}
1167
1168static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1169				unsigned long addr, unsigned long end,
1170				swp_entry_t entry, struct page *page)
1171{
1172	pte_t swp_pte = swp_entry_to_pte(entry);
1173	pte_t *pte;
1174	int ret = 0;
1175
1176	/*
1177	 * We don't actually need pte lock while scanning for swp_pte: since
1178	 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
1179	 * page table while we're scanning; though it could get zapped, and on
1180	 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
1181	 * of unmatched parts which look like swp_pte, so unuse_pte must
1182	 * recheck under pte lock.  Scanning without pte lock lets it be
1183	 * preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
1184	 */
1185	pte = pte_offset_map(pmd, addr);
1186	do {
1187		/*
1188		 * swapoff spends a _lot_ of time in this loop!
1189		 * Test inline before going to call unuse_pte.
1190		 */
1191		if (unlikely(maybe_same_pte(*pte, swp_pte))) {
1192			pte_unmap(pte);
1193			ret = unuse_pte(vma, pmd, addr, entry, page);
1194			if (ret)
1195				goto out;
1196			pte = pte_offset_map(pmd, addr);
1197		}
1198	} while (pte++, addr += PAGE_SIZE, addr != end);
1199	pte_unmap(pte - 1);
1200out:
1201	return ret;
1202}
1203
1204static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1205				unsigned long addr, unsigned long end,
1206				swp_entry_t entry, struct page *page)
1207{
1208	pmd_t *pmd;
1209	unsigned long next;
1210	int ret;
1211
1212	pmd = pmd_offset(pud, addr);
1213	do {
1214		next = pmd_addr_end(addr, end);
1215		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
 
 
1216			continue;
1217		ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
1218		if (ret)
1219			return ret;
1220	} while (pmd++, addr = next, addr != end);
1221	return 0;
1222}
1223
1224static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
1225				unsigned long addr, unsigned long end,
1226				swp_entry_t entry, struct page *page)
1227{
1228	pud_t *pud;
1229	unsigned long next;
1230	int ret;
1231
1232	pud = pud_offset(pgd, addr);
1233	do {
1234		next = pud_addr_end(addr, end);
1235		if (pud_none_or_clear_bad(pud))
1236			continue;
1237		ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
1238		if (ret)
1239			return ret;
1240	} while (pud++, addr = next, addr != end);
1241	return 0;
1242}
1243
1244static int unuse_vma(struct vm_area_struct *vma,
1245				swp_entry_t entry, struct page *page)
1246{
1247	pgd_t *pgd;
1248	unsigned long addr, end, next;
1249	int ret;
1250
1251	if (page_anon_vma(page)) {
1252		addr = page_address_in_vma(page, vma);
1253		if (addr == -EFAULT)
1254			return 0;
1255		else
1256			end = addr + PAGE_SIZE;
1257	} else {
1258		addr = vma->vm_start;
1259		end = vma->vm_end;
1260	}
1261
1262	pgd = pgd_offset(vma->vm_mm, addr);
1263	do {
1264		next = pgd_addr_end(addr, end);
1265		if (pgd_none_or_clear_bad(pgd))
1266			continue;
1267		ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
1268		if (ret)
1269			return ret;
1270	} while (pgd++, addr = next, addr != end);
1271	return 0;
1272}
1273
1274static int unuse_mm(struct mm_struct *mm,
1275				swp_entry_t entry, struct page *page)
1276{
1277	struct vm_area_struct *vma;
1278	int ret = 0;
1279
1280	if (!down_read_trylock(&mm->mmap_sem)) {
1281		/*
1282		 * Activate page so shrink_inactive_list is unlikely to unmap
1283		 * its ptes while lock is dropped, so swapoff can make progress.
1284		 */
1285		activate_page(page);
1286		unlock_page(page);
1287		down_read(&mm->mmap_sem);
1288		lock_page(page);
1289	}
1290	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1291		if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
1292			break;
1293	}
1294	up_read(&mm->mmap_sem);
1295	return (ret < 0)? ret: 0;
1296}
1297
1298/*
1299 * Scan swap_map (or frontswap_map if frontswap parameter is true)
1300 * from current position to next entry still in use.
1301 * Recycle to start on reaching the end, returning 0 when empty.
1302 */
1303static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1304					unsigned int prev, bool frontswap)
1305{
1306	unsigned int max = si->max;
1307	unsigned int i = prev;
1308	unsigned char count;
1309
1310	/*
1311	 * No need for swap_lock here: we're just looking
1312	 * for whether an entry is in use, not modifying it; false
1313	 * hits are okay, and sys_swapoff() has already prevented new
1314	 * allocations from this area (while holding swap_lock).
1315	 */
1316	for (;;) {
1317		if (++i >= max) {
1318			if (!prev) {
1319				i = 0;
1320				break;
1321			}
1322			/*
1323			 * No entries in use at top of swap_map,
1324			 * loop back to start and recheck there.
1325			 */
1326			max = prev + 1;
1327			prev = 0;
1328			i = 1;
1329		}
1330		if (frontswap) {
1331			if (frontswap_test(si, i))
1332				break;
1333			else
1334				continue;
1335		}
1336		count = ACCESS_ONCE(si->swap_map[i]);
1337		if (count && swap_count(count) != SWAP_MAP_BAD)
1338			break;
1339	}
1340	return i;
1341}
1342
1343/*
1344 * We completely avoid races by reading each swap page in advance,
1345 * and then search for the process using it.  All the necessary
1346 * page table adjustments can then be made atomically.
1347 *
1348 * if the boolean frontswap is true, only unuse pages_to_unuse pages;
1349 * pages_to_unuse==0 means all pages; ignored if frontswap is false
1350 */
1351int try_to_unuse(unsigned int type, bool frontswap,
1352		 unsigned long pages_to_unuse)
1353{
1354	struct swap_info_struct *si = swap_info[type];
1355	struct mm_struct *start_mm;
1356	volatile unsigned char *swap_map; /* swap_map is accessed without
1357					   * locking. Mark it as volatile
1358					   * to prevent compiler doing
1359					   * something odd.
1360					   */
1361	unsigned char swcount;
1362	struct page *page;
1363	swp_entry_t entry;
1364	unsigned int i = 0;
1365	int retval = 0;
1366
1367	/*
1368	 * When searching mms for an entry, a good strategy is to
1369	 * start at the first mm we freed the previous entry from
1370	 * (though actually we don't notice whether we or coincidence
1371	 * freed the entry).  Initialize this start_mm with a hold.
1372	 *
1373	 * A simpler strategy would be to start at the last mm we
1374	 * freed the previous entry from; but that would take less
1375	 * advantage of mmlist ordering, which clusters forked mms
1376	 * together, child after parent.  If we race with dup_mmap(), we
1377	 * prefer to resolve parent before child, lest we miss entries
1378	 * duplicated after we scanned child: using last mm would invert
1379	 * that.
1380	 */
1381	start_mm = &init_mm;
1382	atomic_inc(&init_mm.mm_users);
1383
1384	/*
1385	 * Keep on scanning until all entries have gone.  Usually,
1386	 * one pass through swap_map is enough, but not necessarily:
1387	 * there are races when an instance of an entry might be missed.
1388	 */
1389	while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
1390		if (signal_pending(current)) {
1391			retval = -EINTR;
1392			break;
1393		}
1394
1395		/*
1396		 * Get a page for the entry, using the existing swap
1397		 * cache page if there is one.  Otherwise, get a clean
1398		 * page and read the swap into it.
1399		 */
1400		swap_map = &si->swap_map[i];
1401		entry = swp_entry(type, i);
1402		page = read_swap_cache_async(entry,
1403					GFP_HIGHUSER_MOVABLE, NULL, 0);
1404		if (!page) {
1405			/*
1406			 * Either swap_duplicate() failed because entry
1407			 * has been freed independently, and will not be
1408			 * reused since sys_swapoff() already disabled
1409			 * allocation from here, or alloc_page() failed.
1410			 */
1411			swcount = *swap_map;
1412			/*
1413			 * We don't hold lock here, so the swap entry could be
1414			 * SWAP_MAP_BAD (when the cluster is discarding).
1415			 * Instead of fail out, We can just skip the swap
1416			 * entry because swapoff will wait for discarding
1417			 * finish anyway.
1418			 */
1419			if (!swcount || swcount == SWAP_MAP_BAD)
1420				continue;
1421			retval = -ENOMEM;
1422			break;
1423		}
1424
1425		/*
1426		 * Don't hold on to start_mm if it looks like exiting.
1427		 */
1428		if (atomic_read(&start_mm->mm_users) == 1) {
1429			mmput(start_mm);
1430			start_mm = &init_mm;
1431			atomic_inc(&init_mm.mm_users);
1432		}
1433
1434		/*
1435		 * Wait for and lock page.  When do_swap_page races with
1436		 * try_to_unuse, do_swap_page can handle the fault much
1437		 * faster than try_to_unuse can locate the entry.  This
1438		 * apparently redundant "wait_on_page_locked" lets try_to_unuse
1439		 * defer to do_swap_page in such a case - in some tests,
1440		 * do_swap_page and try_to_unuse repeatedly compete.
1441		 */
1442		wait_on_page_locked(page);
1443		wait_on_page_writeback(page);
1444		lock_page(page);
1445		wait_on_page_writeback(page);
1446
1447		/*
1448		 * Remove all references to entry.
1449		 */
1450		swcount = *swap_map;
1451		if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1452			retval = shmem_unuse(entry, page);
1453			/* page has already been unlocked and released */
1454			if (retval < 0)
1455				break;
1456			continue;
1457		}
1458		if (swap_count(swcount) && start_mm != &init_mm)
1459			retval = unuse_mm(start_mm, entry, page);
1460
1461		if (swap_count(*swap_map)) {
1462			int set_start_mm = (*swap_map >= swcount);
1463			struct list_head *p = &start_mm->mmlist;
1464			struct mm_struct *new_start_mm = start_mm;
1465			struct mm_struct *prev_mm = start_mm;
1466			struct mm_struct *mm;
1467
1468			atomic_inc(&new_start_mm->mm_users);
1469			atomic_inc(&prev_mm->mm_users);
1470			spin_lock(&mmlist_lock);
1471			while (swap_count(*swap_map) && !retval &&
1472					(p = p->next) != &start_mm->mmlist) {
1473				mm = list_entry(p, struct mm_struct, mmlist);
1474				if (!atomic_inc_not_zero(&mm->mm_users))
1475					continue;
1476				spin_unlock(&mmlist_lock);
1477				mmput(prev_mm);
1478				prev_mm = mm;
1479
1480				cond_resched();
1481
1482				swcount = *swap_map;
1483				if (!swap_count(swcount)) /* any usage ? */
1484					;
1485				else if (mm == &init_mm)
1486					set_start_mm = 1;
1487				else
1488					retval = unuse_mm(mm, entry, page);
1489
1490				if (set_start_mm && *swap_map < swcount) {
1491					mmput(new_start_mm);
1492					atomic_inc(&mm->mm_users);
1493					new_start_mm = mm;
1494					set_start_mm = 0;
1495				}
1496				spin_lock(&mmlist_lock);
1497			}
1498			spin_unlock(&mmlist_lock);
1499			mmput(prev_mm);
1500			mmput(start_mm);
1501			start_mm = new_start_mm;
1502		}
1503		if (retval) {
1504			unlock_page(page);
1505			page_cache_release(page);
1506			break;
1507		}
1508
1509		/*
1510		 * If a reference remains (rare), we would like to leave
1511		 * the page in the swap cache; but try_to_unmap could
1512		 * then re-duplicate the entry once we drop page lock,
1513		 * so we might loop indefinitely; also, that page could
1514		 * not be swapped out to other storage meanwhile.  So:
1515		 * delete from cache even if there's another reference,
1516		 * after ensuring that the data has been saved to disk -
1517		 * since if the reference remains (rarer), it will be
1518		 * read from disk into another page.  Splitting into two
1519		 * pages would be incorrect if swap supported "shared
1520		 * private" pages, but they are handled by tmpfs files.
1521		 *
1522		 * Given how unuse_vma() targets one particular offset
1523		 * in an anon_vma, once the anon_vma has been determined,
1524		 * this splitting happens to be just what is needed to
1525		 * handle where KSM pages have been swapped out: re-reading
1526		 * is unnecessarily slow, but we can fix that later on.
1527		 */
1528		if (swap_count(*swap_map) &&
1529		     PageDirty(page) && PageSwapCache(page)) {
1530			struct writeback_control wbc = {
1531				.sync_mode = WB_SYNC_NONE,
1532			};
1533
1534			swap_writepage(page, &wbc);
1535			lock_page(page);
1536			wait_on_page_writeback(page);
1537		}
1538
1539		/*
1540		 * It is conceivable that a racing task removed this page from
1541		 * swap cache just before we acquired the page lock at the top,
1542		 * or while we dropped it in unuse_mm().  The page might even
1543		 * be back in swap cache on another swap area: that we must not
1544		 * delete, since it may not have been written out to swap yet.
1545		 */
1546		if (PageSwapCache(page) &&
1547		    likely(page_private(page) == entry.val))
1548			delete_from_swap_cache(page);
1549
1550		/*
1551		 * So we could skip searching mms once swap count went
1552		 * to 1, we did not mark any present ptes as dirty: must
1553		 * mark page dirty so shrink_page_list will preserve it.
1554		 */
1555		SetPageDirty(page);
1556		unlock_page(page);
1557		page_cache_release(page);
1558
1559		/*
1560		 * Make sure that we aren't completely killing
1561		 * interactive performance.
1562		 */
1563		cond_resched();
1564		if (frontswap && pages_to_unuse > 0) {
1565			if (!--pages_to_unuse)
1566				break;
1567		}
1568	}
1569
1570	mmput(start_mm);
1571	return retval;
1572}
1573
1574/*
1575 * After a successful try_to_unuse, if no swap is now in use, we know
1576 * we can empty the mmlist.  swap_lock must be held on entry and exit.
1577 * Note that mmlist_lock nests inside swap_lock, and an mm must be
1578 * added to the mmlist just after page_duplicate - before would be racy.
1579 */
1580static void drain_mmlist(void)
1581{
1582	struct list_head *p, *next;
1583	unsigned int type;
1584
1585	for (type = 0; type < nr_swapfiles; type++)
1586		if (swap_info[type]->inuse_pages)
1587			return;
1588	spin_lock(&mmlist_lock);
1589	list_for_each_safe(p, next, &init_mm.mmlist)
1590		list_del_init(p);
1591	spin_unlock(&mmlist_lock);
1592}
1593
1594/*
1595 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1596 * corresponds to page offset for the specified swap entry.
1597 * Note that the type of this function is sector_t, but it returns page offset
1598 * into the bdev, not sector offset.
1599 */
1600static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1601{
1602	struct swap_info_struct *sis;
1603	struct swap_extent *start_se;
1604	struct swap_extent *se;
1605	pgoff_t offset;
1606
1607	sis = swap_info[swp_type(entry)];
1608	*bdev = sis->bdev;
1609
1610	offset = swp_offset(entry);
1611	start_se = sis->curr_swap_extent;
1612	se = start_se;
1613
1614	for ( ; ; ) {
1615		struct list_head *lh;
1616
1617		if (se->start_page <= offset &&
1618				offset < (se->start_page + se->nr_pages)) {
1619			return se->start_block + (offset - se->start_page);
1620		}
1621		lh = se->list.next;
1622		se = list_entry(lh, struct swap_extent, list);
1623		sis->curr_swap_extent = se;
1624		BUG_ON(se == start_se);		/* It *must* be present */
1625	}
1626}
1627
1628/*
1629 * Returns the page offset into bdev for the specified page's swap entry.
1630 */
1631sector_t map_swap_page(struct page *page, struct block_device **bdev)
1632{
1633	swp_entry_t entry;
1634	entry.val = page_private(page);
1635	return map_swap_entry(entry, bdev);
1636}
1637
1638/*
1639 * Free all of a swapdev's extent information
1640 */
1641static void destroy_swap_extents(struct swap_info_struct *sis)
1642{
1643	while (!list_empty(&sis->first_swap_extent.list)) {
1644		struct swap_extent *se;
1645
1646		se = list_entry(sis->first_swap_extent.list.next,
1647				struct swap_extent, list);
1648		list_del(&se->list);
1649		kfree(se);
1650	}
1651
1652	if (sis->flags & SWP_FILE) {
1653		struct file *swap_file = sis->swap_file;
1654		struct address_space *mapping = swap_file->f_mapping;
1655
1656		sis->flags &= ~SWP_FILE;
1657		mapping->a_ops->swap_deactivate(swap_file);
1658	}
1659}
1660
1661/*
1662 * Add a block range (and the corresponding page range) into this swapdev's
1663 * extent list.  The extent list is kept sorted in page order.
1664 *
1665 * This function rather assumes that it is called in ascending page order.
1666 */
1667int
1668add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1669		unsigned long nr_pages, sector_t start_block)
1670{
1671	struct swap_extent *se;
1672	struct swap_extent *new_se;
1673	struct list_head *lh;
1674
1675	if (start_page == 0) {
1676		se = &sis->first_swap_extent;
1677		sis->curr_swap_extent = se;
1678		se->start_page = 0;
1679		se->nr_pages = nr_pages;
1680		se->start_block = start_block;
1681		return 1;
1682	} else {
1683		lh = sis->first_swap_extent.list.prev;	/* Highest extent */
1684		se = list_entry(lh, struct swap_extent, list);
1685		BUG_ON(se->start_page + se->nr_pages != start_page);
1686		if (se->start_block + se->nr_pages == start_block) {
1687			/* Merge it */
1688			se->nr_pages += nr_pages;
1689			return 0;
1690		}
1691	}
1692
1693	/*
1694	 * No merge.  Insert a new extent, preserving ordering.
1695	 */
1696	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1697	if (new_se == NULL)
1698		return -ENOMEM;
1699	new_se->start_page = start_page;
1700	new_se->nr_pages = nr_pages;
1701	new_se->start_block = start_block;
1702
1703	list_add_tail(&new_se->list, &sis->first_swap_extent.list);
1704	return 1;
1705}
1706
1707/*
1708 * A `swap extent' is a simple thing which maps a contiguous range of pages
1709 * onto a contiguous range of disk blocks.  An ordered list of swap extents
1710 * is built at swapon time and is then used at swap_writepage/swap_readpage
1711 * time for locating where on disk a page belongs.
1712 *
1713 * If the swapfile is an S_ISBLK block device, a single extent is installed.
1714 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1715 * swap files identically.
1716 *
1717 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1718 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
1719 * swapfiles are handled *identically* after swapon time.
1720 *
1721 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1722 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
1723 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1724 * requirements, they are simply tossed out - we will never use those blocks
1725 * for swapping.
1726 *
1727 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This
1728 * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1729 * which will scribble on the fs.
1730 *
1731 * The amount of disk space which a single swap extent represents varies.
1732 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
1733 * extents in the list.  To avoid much list walking, we cache the previous
1734 * search location in `curr_swap_extent', and start new searches from there.
1735 * This is extremely effective.  The average number of iterations in
1736 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
1737 */
1738static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1739{
1740	struct file *swap_file = sis->swap_file;
1741	struct address_space *mapping = swap_file->f_mapping;
1742	struct inode *inode = mapping->host;
 
 
 
 
 
 
1743	int ret;
1744
 
1745	if (S_ISBLK(inode->i_mode)) {
1746		ret = add_swap_extent(sis, 0, sis->max, 0);
1747		*span = sis->pages;
1748		return ret;
1749	}
1750
1751	if (mapping->a_ops->swap_activate) {
1752		ret = mapping->a_ops->swap_activate(sis, swap_file, span);
1753		if (!ret) {
1754			sis->flags |= SWP_FILE;
1755			ret = add_swap_extent(sis, 0, sis->max, 0);
1756			*span = sis->pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1757		}
1758		return ret;
1759	}
1760
1761	return generic_swapfile_activate(sis, swap_file, span);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1762}
1763
1764static void _enable_swap_info(struct swap_info_struct *p, int prio,
1765				unsigned char *swap_map,
1766				struct swap_cluster_info *cluster_info)
1767{
1768	int i, prev;
1769
 
1770	if (prio >= 0)
1771		p->prio = prio;
1772	else
1773		p->prio = --least_priority;
1774	p->swap_map = swap_map;
1775	p->cluster_info = cluster_info;
1776	p->flags |= SWP_WRITEOK;
1777	atomic_long_add(p->pages, &nr_swap_pages);
1778	total_swap_pages += p->pages;
1779
1780	/* insert swap space into swap_list: */
1781	prev = -1;
1782	for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1783		if (p->prio >= swap_info[i]->prio)
1784			break;
1785		prev = i;
1786	}
1787	p->next = i;
1788	if (prev < 0)
1789		swap_list.head = swap_list.next = p->type;
1790	else
1791		swap_info[prev]->next = p->type;
1792}
1793
1794static void enable_swap_info(struct swap_info_struct *p, int prio,
1795				unsigned char *swap_map,
1796				struct swap_cluster_info *cluster_info,
1797				unsigned long *frontswap_map)
1798{
1799	frontswap_init(p->type, frontswap_map);
1800	spin_lock(&swap_lock);
1801	spin_lock(&p->lock);
1802	 _enable_swap_info(p, prio, swap_map, cluster_info);
1803	spin_unlock(&p->lock);
1804	spin_unlock(&swap_lock);
1805}
1806
1807static void reinsert_swap_info(struct swap_info_struct *p)
1808{
1809	spin_lock(&swap_lock);
1810	spin_lock(&p->lock);
1811	_enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
1812	spin_unlock(&p->lock);
1813	spin_unlock(&swap_lock);
1814}
1815
1816SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1817{
1818	struct swap_info_struct *p = NULL;
1819	unsigned char *swap_map;
1820	struct swap_cluster_info *cluster_info;
1821	unsigned long *frontswap_map;
1822	struct file *swap_file, *victim;
1823	struct address_space *mapping;
1824	struct inode *inode;
1825	struct filename *pathname;
 
1826	int i, type, prev;
1827	int err;
1828	unsigned int old_block_size;
1829
1830	if (!capable(CAP_SYS_ADMIN))
1831		return -EPERM;
1832
1833	BUG_ON(!current->mm);
1834
1835	pathname = getname(specialfile);
 
1836	if (IS_ERR(pathname))
1837		return PTR_ERR(pathname);
1838
1839	victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
 
1840	err = PTR_ERR(victim);
1841	if (IS_ERR(victim))
1842		goto out;
1843
1844	mapping = victim->f_mapping;
1845	prev = -1;
1846	spin_lock(&swap_lock);
1847	for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
1848		p = swap_info[type];
1849		if (p->flags & SWP_WRITEOK) {
1850			if (p->swap_file->f_mapping == mapping)
1851				break;
1852		}
1853		prev = type;
1854	}
1855	if (type < 0) {
1856		err = -EINVAL;
1857		spin_unlock(&swap_lock);
1858		goto out_dput;
1859	}
1860	if (!security_vm_enough_memory_mm(current->mm, p->pages))
1861		vm_unacct_memory(p->pages);
1862	else {
1863		err = -ENOMEM;
1864		spin_unlock(&swap_lock);
1865		goto out_dput;
1866	}
1867	if (prev < 0)
1868		swap_list.head = p->next;
1869	else
1870		swap_info[prev]->next = p->next;
1871	if (type == swap_list.next) {
1872		/* just pick something that's safe... */
1873		swap_list.next = swap_list.head;
1874	}
1875	spin_lock(&p->lock);
1876	if (p->prio < 0) {
1877		for (i = p->next; i >= 0; i = swap_info[i]->next)
1878			swap_info[i]->prio = p->prio--;
1879		least_priority++;
1880	}
1881	atomic_long_sub(p->pages, &nr_swap_pages);
1882	total_swap_pages -= p->pages;
1883	p->flags &= ~SWP_WRITEOK;
1884	spin_unlock(&p->lock);
1885	spin_unlock(&swap_lock);
1886
1887	set_current_oom_origin();
1888	err = try_to_unuse(type, false, 0); /* force all pages to be unused */
1889	clear_current_oom_origin();
1890
1891	if (err) {
 
 
 
 
 
 
1892		/* re-insert swap space back into swap_list */
1893		reinsert_swap_info(p);
1894		goto out_dput;
1895	}
1896
1897	flush_work(&p->discard_work);
1898
1899	destroy_swap_extents(p);
1900	if (p->flags & SWP_CONTINUED)
1901		free_swap_count_continuations(p);
1902
1903	mutex_lock(&swapon_mutex);
1904	spin_lock(&swap_lock);
1905	spin_lock(&p->lock);
1906	drain_mmlist();
1907
1908	/* wait for anyone still in scan_swap_map */
1909	p->highest_bit = 0;		/* cuts scans short */
1910	while (p->flags >= SWP_SCANNING) {
1911		spin_unlock(&p->lock);
1912		spin_unlock(&swap_lock);
1913		schedule_timeout_uninterruptible(1);
1914		spin_lock(&swap_lock);
1915		spin_lock(&p->lock);
1916	}
1917
1918	swap_file = p->swap_file;
1919	old_block_size = p->old_block_size;
1920	p->swap_file = NULL;
1921	p->max = 0;
1922	swap_map = p->swap_map;
1923	p->swap_map = NULL;
1924	cluster_info = p->cluster_info;
1925	p->cluster_info = NULL;
1926	frontswap_map = frontswap_map_get(p);
1927	spin_unlock(&p->lock);
1928	spin_unlock(&swap_lock);
1929	frontswap_invalidate_area(type);
1930	frontswap_map_set(p, NULL);
1931	mutex_unlock(&swapon_mutex);
1932	free_percpu(p->percpu_cluster);
1933	p->percpu_cluster = NULL;
1934	vfree(swap_map);
1935	vfree(cluster_info);
1936	vfree(frontswap_map);
1937	/* Destroy swap account information */
1938	swap_cgroup_swapoff(type);
1939
1940	inode = mapping->host;
1941	if (S_ISBLK(inode->i_mode)) {
1942		struct block_device *bdev = I_BDEV(inode);
1943		set_blocksize(bdev, old_block_size);
1944		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1945	} else {
1946		mutex_lock(&inode->i_mutex);
1947		inode->i_flags &= ~S_SWAPFILE;
1948		mutex_unlock(&inode->i_mutex);
1949	}
1950	filp_close(swap_file, NULL);
1951
1952	/*
1953	 * Clear the SWP_USED flag after all resources are freed so that swapon
1954	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
1955	 * not hold p->lock after we cleared its SWP_WRITEOK.
1956	 */
1957	spin_lock(&swap_lock);
1958	p->flags = 0;
1959	spin_unlock(&swap_lock);
1960
1961	err = 0;
1962	atomic_inc(&proc_poll_event);
1963	wake_up_interruptible(&proc_poll_wait);
1964
1965out_dput:
1966	filp_close(victim, NULL);
1967out:
1968	putname(pathname);
1969	return err;
1970}
1971
1972#ifdef CONFIG_PROC_FS
1973static unsigned swaps_poll(struct file *file, poll_table *wait)
1974{
1975	struct seq_file *seq = file->private_data;
1976
1977	poll_wait(file, &proc_poll_wait, wait);
1978
1979	if (seq->poll_event != atomic_read(&proc_poll_event)) {
1980		seq->poll_event = atomic_read(&proc_poll_event);
1981		return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
1982	}
1983
1984	return POLLIN | POLLRDNORM;
1985}
1986
1987/* iterator */
1988static void *swap_start(struct seq_file *swap, loff_t *pos)
1989{
1990	struct swap_info_struct *si;
1991	int type;
1992	loff_t l = *pos;
1993
1994	mutex_lock(&swapon_mutex);
1995
1996	if (!l)
1997		return SEQ_START_TOKEN;
1998
1999	for (type = 0; type < nr_swapfiles; type++) {
2000		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
2001		si = swap_info[type];
2002		if (!(si->flags & SWP_USED) || !si->swap_map)
2003			continue;
2004		if (!--l)
2005			return si;
2006	}
2007
2008	return NULL;
2009}
2010
2011static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2012{
2013	struct swap_info_struct *si = v;
2014	int type;
2015
2016	if (v == SEQ_START_TOKEN)
2017		type = 0;
2018	else
2019		type = si->type + 1;
2020
2021	for (; type < nr_swapfiles; type++) {
2022		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
2023		si = swap_info[type];
2024		if (!(si->flags & SWP_USED) || !si->swap_map)
2025			continue;
2026		++*pos;
2027		return si;
2028	}
2029
2030	return NULL;
2031}
2032
2033static void swap_stop(struct seq_file *swap, void *v)
2034{
2035	mutex_unlock(&swapon_mutex);
2036}
2037
2038static int swap_show(struct seq_file *swap, void *v)
2039{
2040	struct swap_info_struct *si = v;
2041	struct file *file;
2042	int len;
2043
2044	if (si == SEQ_START_TOKEN) {
2045		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
2046		return 0;
2047	}
2048
2049	file = si->swap_file;
2050	len = seq_path(swap, &file->f_path, " \t\n\\");
2051	seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
2052			len < 40 ? 40 - len : 1, " ",
2053			S_ISBLK(file_inode(file)->i_mode) ?
2054				"partition" : "file\t",
2055			si->pages << (PAGE_SHIFT - 10),
2056			si->inuse_pages << (PAGE_SHIFT - 10),
2057			si->prio);
2058	return 0;
2059}
2060
2061static const struct seq_operations swaps_op = {
2062	.start =	swap_start,
2063	.next =		swap_next,
2064	.stop =		swap_stop,
2065	.show =		swap_show
2066};
2067
2068static int swaps_open(struct inode *inode, struct file *file)
2069{
2070	struct seq_file *seq;
2071	int ret;
2072
2073	ret = seq_open(file, &swaps_op);
2074	if (ret)
2075		return ret;
2076
2077	seq = file->private_data;
2078	seq->poll_event = atomic_read(&proc_poll_event);
2079	return 0;
2080}
2081
2082static const struct file_operations proc_swaps_operations = {
2083	.open		= swaps_open,
2084	.read		= seq_read,
2085	.llseek		= seq_lseek,
2086	.release	= seq_release,
2087	.poll		= swaps_poll,
2088};
2089
2090static int __init procswaps_init(void)
2091{
2092	proc_create("swaps", 0, NULL, &proc_swaps_operations);
2093	return 0;
2094}
2095__initcall(procswaps_init);
2096#endif /* CONFIG_PROC_FS */
2097
2098#ifdef MAX_SWAPFILES_CHECK
2099static int __init max_swapfiles_check(void)
2100{
2101	MAX_SWAPFILES_CHECK();
2102	return 0;
2103}
2104late_initcall(max_swapfiles_check);
2105#endif
2106
2107static struct swap_info_struct *alloc_swap_info(void)
2108{
2109	struct swap_info_struct *p;
2110	unsigned int type;
2111
2112	p = kzalloc(sizeof(*p), GFP_KERNEL);
2113	if (!p)
2114		return ERR_PTR(-ENOMEM);
2115
2116	spin_lock(&swap_lock);
2117	for (type = 0; type < nr_swapfiles; type++) {
2118		if (!(swap_info[type]->flags & SWP_USED))
2119			break;
2120	}
2121	if (type >= MAX_SWAPFILES) {
2122		spin_unlock(&swap_lock);
2123		kfree(p);
2124		return ERR_PTR(-EPERM);
2125	}
2126	if (type >= nr_swapfiles) {
2127		p->type = type;
2128		swap_info[type] = p;
2129		/*
2130		 * Write swap_info[type] before nr_swapfiles, in case a
2131		 * racing procfs swap_start() or swap_next() is reading them.
2132		 * (We never shrink nr_swapfiles, we never free this entry.)
2133		 */
2134		smp_wmb();
2135		nr_swapfiles++;
2136	} else {
2137		kfree(p);
2138		p = swap_info[type];
2139		/*
2140		 * Do not memset this entry: a racing procfs swap_next()
2141		 * would be relying on p->type to remain valid.
2142		 */
2143	}
2144	INIT_LIST_HEAD(&p->first_swap_extent.list);
2145	p->flags = SWP_USED;
2146	p->next = -1;
2147	spin_unlock(&swap_lock);
2148	spin_lock_init(&p->lock);
2149
2150	return p;
2151}
2152
2153static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2154{
2155	int error;
2156
2157	if (S_ISBLK(inode->i_mode)) {
2158		p->bdev = bdgrab(I_BDEV(inode));
2159		error = blkdev_get(p->bdev,
2160				   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
2161				   sys_swapon);
2162		if (error < 0) {
2163			p->bdev = NULL;
2164			return -EINVAL;
2165		}
2166		p->old_block_size = block_size(p->bdev);
2167		error = set_blocksize(p->bdev, PAGE_SIZE);
2168		if (error < 0)
2169			return error;
2170		p->flags |= SWP_BLKDEV;
2171	} else if (S_ISREG(inode->i_mode)) {
2172		p->bdev = inode->i_sb->s_bdev;
2173		mutex_lock(&inode->i_mutex);
2174		if (IS_SWAPFILE(inode))
2175			return -EBUSY;
2176	} else
2177		return -EINVAL;
2178
2179	return 0;
2180}
2181
2182static unsigned long read_swap_header(struct swap_info_struct *p,
2183					union swap_header *swap_header,
2184					struct inode *inode)
2185{
2186	int i;
2187	unsigned long maxpages;
2188	unsigned long swapfilepages;
2189	unsigned long last_page;
2190
2191	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2192		pr_err("Unable to find swap-space signature\n");
2193		return 0;
2194	}
2195
2196	/* swap partition endianess hack... */
2197	if (swab32(swap_header->info.version) == 1) {
2198		swab32s(&swap_header->info.version);
2199		swab32s(&swap_header->info.last_page);
2200		swab32s(&swap_header->info.nr_badpages);
2201		for (i = 0; i < swap_header->info.nr_badpages; i++)
2202			swab32s(&swap_header->info.badpages[i]);
2203	}
2204	/* Check the swap header's sub-version */
2205	if (swap_header->info.version != 1) {
2206		pr_warn("Unable to handle swap header version %d\n",
2207			swap_header->info.version);
 
2208		return 0;
2209	}
2210
2211	p->lowest_bit  = 1;
2212	p->cluster_next = 1;
2213	p->cluster_nr = 0;
2214
2215	/*
2216	 * Find out how many pages are allowed for a single swap
2217	 * device. There are two limiting factors: 1) the number
2218	 * of bits for the swap offset in the swp_entry_t type, and
2219	 * 2) the number of bits in the swap pte as defined by the
2220	 * different architectures. In order to find the
 
2221	 * largest possible bit mask, a swap entry with swap type 0
2222	 * and swap offset ~0UL is created, encoded to a swap pte,
2223	 * decoded to a swp_entry_t again, and finally the swap
2224	 * offset is extracted. This will mask all the bits from
2225	 * the initial ~0UL mask that can't be encoded in either
2226	 * the swp_entry_t or the architecture definition of a
2227	 * swap pte.
2228	 */
2229	maxpages = swp_offset(pte_to_swp_entry(
2230			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2231	last_page = swap_header->info.last_page;
2232	if (last_page > maxpages) {
2233		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2234			maxpages << (PAGE_SHIFT - 10),
2235			last_page << (PAGE_SHIFT - 10));
2236	}
2237	if (maxpages > last_page) {
2238		maxpages = last_page + 1;
2239		/* p->max is an unsigned int: don't overflow it */
2240		if ((unsigned int)maxpages == 0)
2241			maxpages = UINT_MAX;
2242	}
2243	p->highest_bit = maxpages - 1;
2244
2245	if (!maxpages)
2246		return 0;
2247	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2248	if (swapfilepages && maxpages > swapfilepages) {
2249		pr_warn("Swap area shorter than signature indicates\n");
 
2250		return 0;
2251	}
2252	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
2253		return 0;
2254	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2255		return 0;
2256
2257	return maxpages;
2258}
2259
2260static int setup_swap_map_and_extents(struct swap_info_struct *p,
2261					union swap_header *swap_header,
2262					unsigned char *swap_map,
2263					struct swap_cluster_info *cluster_info,
2264					unsigned long maxpages,
2265					sector_t *span)
2266{
2267	int i;
2268	unsigned int nr_good_pages;
2269	int nr_extents;
2270	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
2271	unsigned long idx = p->cluster_next / SWAPFILE_CLUSTER;
2272
2273	nr_good_pages = maxpages - 1;	/* omit header page */
2274
2275	cluster_set_null(&p->free_cluster_head);
2276	cluster_set_null(&p->free_cluster_tail);
2277	cluster_set_null(&p->discard_cluster_head);
2278	cluster_set_null(&p->discard_cluster_tail);
2279
2280	for (i = 0; i < swap_header->info.nr_badpages; i++) {
2281		unsigned int page_nr = swap_header->info.badpages[i];
2282		if (page_nr == 0 || page_nr > swap_header->info.last_page)
2283			return -EINVAL;
2284		if (page_nr < maxpages) {
2285			swap_map[page_nr] = SWAP_MAP_BAD;
2286			nr_good_pages--;
2287			/*
2288			 * Haven't marked the cluster free yet, no list
2289			 * operation involved
2290			 */
2291			inc_cluster_info_page(p, cluster_info, page_nr);
2292		}
2293	}
2294
2295	/* Haven't marked the cluster free yet, no list operation involved */
2296	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
2297		inc_cluster_info_page(p, cluster_info, i);
2298
2299	if (nr_good_pages) {
2300		swap_map[0] = SWAP_MAP_BAD;
2301		/*
2302		 * Not mark the cluster free yet, no list
2303		 * operation involved
2304		 */
2305		inc_cluster_info_page(p, cluster_info, 0);
2306		p->max = maxpages;
2307		p->pages = nr_good_pages;
2308		nr_extents = setup_swap_extents(p, span);
2309		if (nr_extents < 0)
2310			return nr_extents;
2311		nr_good_pages = p->pages;
2312	}
2313	if (!nr_good_pages) {
2314		pr_warn("Empty swap-file\n");
2315		return -EINVAL;
2316	}
2317
2318	if (!cluster_info)
2319		return nr_extents;
2320
2321	for (i = 0; i < nr_clusters; i++) {
2322		if (!cluster_count(&cluster_info[idx])) {
2323			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
2324			if (cluster_is_null(&p->free_cluster_head)) {
2325				cluster_set_next_flag(&p->free_cluster_head,
2326								idx, 0);
2327				cluster_set_next_flag(&p->free_cluster_tail,
2328								idx, 0);
2329			} else {
2330				unsigned int tail;
2331
2332				tail = cluster_next(&p->free_cluster_tail);
2333				cluster_set_next(&cluster_info[tail], idx);
2334				cluster_set_next_flag(&p->free_cluster_tail,
2335								idx, 0);
2336			}
2337		}
2338		idx++;
2339		if (idx == nr_clusters)
2340			idx = 0;
2341	}
2342	return nr_extents;
2343}
2344
2345/*
2346 * Helper to sys_swapon determining if a given swap
2347 * backing device queue supports DISCARD operations.
2348 */
2349static bool swap_discardable(struct swap_info_struct *si)
2350{
2351	struct request_queue *q = bdev_get_queue(si->bdev);
2352
2353	if (!q || !blk_queue_discard(q))
2354		return false;
2355
2356	return true;
2357}
2358
2359SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2360{
2361	struct swap_info_struct *p;
2362	struct filename *name;
2363	struct file *swap_file = NULL;
2364	struct address_space *mapping;
2365	int i;
2366	int prio;
2367	int error;
2368	union swap_header *swap_header;
2369	int nr_extents;
2370	sector_t span;
2371	unsigned long maxpages;
2372	unsigned char *swap_map = NULL;
2373	struct swap_cluster_info *cluster_info = NULL;
2374	unsigned long *frontswap_map = NULL;
2375	struct page *page = NULL;
2376	struct inode *inode = NULL;
2377
2378	if (swap_flags & ~SWAP_FLAGS_VALID)
2379		return -EINVAL;
2380
2381	if (!capable(CAP_SYS_ADMIN))
2382		return -EPERM;
2383
2384	p = alloc_swap_info();
2385	if (IS_ERR(p))
2386		return PTR_ERR(p);
2387
2388	INIT_WORK(&p->discard_work, swap_discard_work);
2389
2390	name = getname(specialfile);
2391	if (IS_ERR(name)) {
2392		error = PTR_ERR(name);
2393		name = NULL;
2394		goto bad_swap;
2395	}
2396	swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
2397	if (IS_ERR(swap_file)) {
2398		error = PTR_ERR(swap_file);
2399		swap_file = NULL;
2400		goto bad_swap;
2401	}
2402
2403	p->swap_file = swap_file;
2404	mapping = swap_file->f_mapping;
2405
2406	for (i = 0; i < nr_swapfiles; i++) {
2407		struct swap_info_struct *q = swap_info[i];
2408
2409		if (q == p || !q->swap_file)
2410			continue;
2411		if (mapping == q->swap_file->f_mapping) {
2412			error = -EBUSY;
2413			goto bad_swap;
2414		}
2415	}
2416
2417	inode = mapping->host;
2418	/* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
2419	error = claim_swapfile(p, inode);
2420	if (unlikely(error))
2421		goto bad_swap;
2422
2423	/*
2424	 * Read the swap header.
2425	 */
2426	if (!mapping->a_ops->readpage) {
2427		error = -EINVAL;
2428		goto bad_swap;
2429	}
2430	page = read_mapping_page(mapping, 0, swap_file);
2431	if (IS_ERR(page)) {
2432		error = PTR_ERR(page);
2433		goto bad_swap;
2434	}
2435	swap_header = kmap(page);
2436
2437	maxpages = read_swap_header(p, swap_header, inode);
2438	if (unlikely(!maxpages)) {
2439		error = -EINVAL;
2440		goto bad_swap;
2441	}
2442
2443	/* OK, set up the swap map and apply the bad block list */
2444	swap_map = vzalloc(maxpages);
2445	if (!swap_map) {
2446		error = -ENOMEM;
2447		goto bad_swap;
2448	}
2449	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2450		p->flags |= SWP_SOLIDSTATE;
2451		/*
2452		 * select a random position to start with to help wear leveling
2453		 * SSD
2454		 */
2455		p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
2456
2457		cluster_info = vzalloc(DIV_ROUND_UP(maxpages,
2458			SWAPFILE_CLUSTER) * sizeof(*cluster_info));
2459		if (!cluster_info) {
2460			error = -ENOMEM;
2461			goto bad_swap;
2462		}
2463		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
2464		if (!p->percpu_cluster) {
2465			error = -ENOMEM;
2466			goto bad_swap;
2467		}
2468		for_each_possible_cpu(i) {
2469			struct percpu_cluster *cluster;
2470			cluster = per_cpu_ptr(p->percpu_cluster, i);
2471			cluster_set_null(&cluster->index);
2472		}
2473	}
2474
2475	error = swap_cgroup_swapon(p->type, maxpages);
2476	if (error)
2477		goto bad_swap;
2478
2479	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2480		cluster_info, maxpages, &span);
2481	if (unlikely(nr_extents < 0)) {
2482		error = nr_extents;
2483		goto bad_swap;
2484	}
2485	/* frontswap enabled? set up bit-per-page map for frontswap */
2486	if (frontswap_enabled)
2487		frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
2488
2489	if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
2490		/*
2491		 * When discard is enabled for swap with no particular
2492		 * policy flagged, we set all swap discard flags here in
2493		 * order to sustain backward compatibility with older
2494		 * swapon(8) releases.
2495		 */
2496		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
2497			     SWP_PAGE_DISCARD);
2498
2499		/*
2500		 * By flagging sys_swapon, a sysadmin can tell us to
2501		 * either do single-time area discards only, or to just
2502		 * perform discards for released swap page-clusters.
2503		 * Now it's time to adjust the p->flags accordingly.
2504		 */
2505		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
2506			p->flags &= ~SWP_PAGE_DISCARD;
2507		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
2508			p->flags &= ~SWP_AREA_DISCARD;
2509
2510		/* issue a swapon-time discard if it's still required */
2511		if (p->flags & SWP_AREA_DISCARD) {
2512			int err = discard_swap(p);
2513			if (unlikely(err))
2514				pr_err("swapon: discard_swap(%p): %d\n",
2515					p, err);
2516		}
 
 
2517	}
2518
2519	mutex_lock(&swapon_mutex);
2520	prio = -1;
2521	if (swap_flags & SWAP_FLAG_PREFER)
2522		prio =
2523		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2524	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
2525
2526	pr_info("Adding %uk swap on %s.  "
2527			"Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
2528		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
2529		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2530		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2531		(p->flags & SWP_DISCARDABLE) ? "D" : "",
2532		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
2533		(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
2534		(frontswap_map) ? "FS" : "");
2535
2536	mutex_unlock(&swapon_mutex);
2537	atomic_inc(&proc_poll_event);
2538	wake_up_interruptible(&proc_poll_wait);
2539
2540	if (S_ISREG(inode->i_mode))
2541		inode->i_flags |= S_SWAPFILE;
2542	error = 0;
2543	goto out;
2544bad_swap:
2545	free_percpu(p->percpu_cluster);
2546	p->percpu_cluster = NULL;
2547	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
2548		set_blocksize(p->bdev, p->old_block_size);
2549		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2550	}
2551	destroy_swap_extents(p);
2552	swap_cgroup_swapoff(p->type);
2553	spin_lock(&swap_lock);
2554	p->swap_file = NULL;
2555	p->flags = 0;
2556	spin_unlock(&swap_lock);
2557	vfree(swap_map);
2558	vfree(cluster_info);
2559	if (swap_file) {
2560		if (inode && S_ISREG(inode->i_mode)) {
2561			mutex_unlock(&inode->i_mutex);
2562			inode = NULL;
2563		}
2564		filp_close(swap_file, NULL);
2565	}
2566out:
2567	if (page && !IS_ERR(page)) {
2568		kunmap(page);
2569		page_cache_release(page);
2570	}
2571	if (name)
2572		putname(name);
2573	if (inode && S_ISREG(inode->i_mode))
2574		mutex_unlock(&inode->i_mutex);
2575	return error;
2576}
2577
2578void si_swapinfo(struct sysinfo *val)
2579{
2580	unsigned int type;
2581	unsigned long nr_to_be_unused = 0;
2582
2583	spin_lock(&swap_lock);
2584	for (type = 0; type < nr_swapfiles; type++) {
2585		struct swap_info_struct *si = swap_info[type];
2586
2587		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2588			nr_to_be_unused += si->inuse_pages;
2589	}
2590	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
2591	val->totalswap = total_swap_pages + nr_to_be_unused;
2592	spin_unlock(&swap_lock);
2593}
2594
2595/*
2596 * Verify that a swap entry is valid and increment its swap map count.
2597 *
2598 * Returns error code in following case.
2599 * - success -> 0
2600 * - swp_entry is invalid -> EINVAL
2601 * - swp_entry is migration entry -> EINVAL
2602 * - swap-cache reference is requested but there is already one. -> EEXIST
2603 * - swap-cache reference is requested but the entry is not used. -> ENOENT
2604 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2605 */
2606static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2607{
2608	struct swap_info_struct *p;
2609	unsigned long offset, type;
2610	unsigned char count;
2611	unsigned char has_cache;
2612	int err = -EINVAL;
2613
2614	if (non_swap_entry(entry))
2615		goto out;
2616
2617	type = swp_type(entry);
2618	if (type >= nr_swapfiles)
2619		goto bad_file;
2620	p = swap_info[type];
2621	offset = swp_offset(entry);
2622
2623	spin_lock(&p->lock);
2624	if (unlikely(offset >= p->max))
2625		goto unlock_out;
2626
2627	count = p->swap_map[offset];
2628
2629	/*
2630	 * swapin_readahead() doesn't check if a swap entry is valid, so the
2631	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
2632	 */
2633	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
2634		err = -ENOENT;
2635		goto unlock_out;
2636	}
2637
2638	has_cache = count & SWAP_HAS_CACHE;
2639	count &= ~SWAP_HAS_CACHE;
2640	err = 0;
2641
2642	if (usage == SWAP_HAS_CACHE) {
2643
2644		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
2645		if (!has_cache && count)
2646			has_cache = SWAP_HAS_CACHE;
2647		else if (has_cache)		/* someone else added cache */
2648			err = -EEXIST;
2649		else				/* no users remaining */
2650			err = -ENOENT;
2651
2652	} else if (count || has_cache) {
2653
2654		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2655			count += usage;
2656		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2657			err = -EINVAL;
2658		else if (swap_count_continued(p, offset, count))
2659			count = COUNT_CONTINUED;
2660		else
2661			err = -ENOMEM;
2662	} else
2663		err = -ENOENT;			/* unused swap entry */
2664
2665	p->swap_map[offset] = count | has_cache;
2666
2667unlock_out:
2668	spin_unlock(&p->lock);
2669out:
2670	return err;
2671
2672bad_file:
2673	pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
2674	goto out;
2675}
2676
2677/*
2678 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2679 * (in which case its reference count is never incremented).
2680 */
2681void swap_shmem_alloc(swp_entry_t entry)
2682{
2683	__swap_duplicate(entry, SWAP_MAP_SHMEM);
2684}
2685
2686/*
2687 * Increase reference count of swap entry by 1.
2688 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
2689 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
2690 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
2691 * might occur if a page table entry has got corrupted.
2692 */
2693int swap_duplicate(swp_entry_t entry)
2694{
2695	int err = 0;
2696
2697	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2698		err = add_swap_count_continuation(entry, GFP_ATOMIC);
2699	return err;
2700}
2701
2702/*
2703 * @entry: swap entry for which we allocate swap cache.
2704 *
2705 * Called when allocating swap cache for existing swap entry,
2706 * This can return error codes. Returns 0 at success.
2707 * -EBUSY means there is a swap cache.
2708 * Note: return code is different from swap_duplicate().
2709 */
2710int swapcache_prepare(swp_entry_t entry)
2711{
2712	return __swap_duplicate(entry, SWAP_HAS_CACHE);
2713}
2714
2715struct swap_info_struct *page_swap_info(struct page *page)
2716{
2717	swp_entry_t swap = { .val = page_private(page) };
2718	BUG_ON(!PageSwapCache(page));
2719	return swap_info[swp_type(swap)];
2720}
2721
2722/*
2723 * out-of-line __page_file_ methods to avoid include hell.
 
2724 */
2725struct address_space *__page_file_mapping(struct page *page)
2726{
2727	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
2728	return page_swap_info(page)->swap_file->f_mapping;
2729}
2730EXPORT_SYMBOL_GPL(__page_file_mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2731
2732pgoff_t __page_file_index(struct page *page)
2733{
2734	swp_entry_t swap = { .val = page_private(page) };
2735	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
2736	return swp_offset(swap);
 
2737}
2738EXPORT_SYMBOL_GPL(__page_file_index);
2739
2740/*
2741 * add_swap_count_continuation - called when a swap count is duplicated
2742 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2743 * page of the original vmalloc'ed swap_map, to hold the continuation count
2744 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
2745 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2746 *
2747 * These continuation pages are seldom referenced: the common paths all work
2748 * on the original swap_map, only referring to a continuation page when the
2749 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2750 *
2751 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2752 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2753 * can be called after dropping locks.
2754 */
2755int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2756{
2757	struct swap_info_struct *si;
2758	struct page *head;
2759	struct page *page;
2760	struct page *list_page;
2761	pgoff_t offset;
2762	unsigned char count;
2763
2764	/*
2765	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
2766	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
2767	 */
2768	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2769
2770	si = swap_info_get(entry);
2771	if (!si) {
2772		/*
2773		 * An acceptable race has occurred since the failing
2774		 * __swap_duplicate(): the swap entry has been freed,
2775		 * perhaps even the whole swap_map cleared for swapoff.
2776		 */
2777		goto outer;
2778	}
2779
2780	offset = swp_offset(entry);
2781	count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
2782
2783	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2784		/*
2785		 * The higher the swap count, the more likely it is that tasks
2786		 * will race to add swap count continuation: we need to avoid
2787		 * over-provisioning.
2788		 */
2789		goto out;
2790	}
2791
2792	if (!page) {
2793		spin_unlock(&si->lock);
2794		return -ENOMEM;
2795	}
2796
2797	/*
2798	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2799	 * no architecture is using highmem pages for kernel page tables: so it
2800	 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
2801	 */
2802	head = vmalloc_to_page(si->swap_map + offset);
2803	offset &= ~PAGE_MASK;
2804
2805	/*
2806	 * Page allocation does not initialize the page's lru field,
2807	 * but it does always reset its private field.
2808	 */
2809	if (!page_private(head)) {
2810		BUG_ON(count & COUNT_CONTINUED);
2811		INIT_LIST_HEAD(&head->lru);
2812		set_page_private(head, SWP_CONTINUED);
2813		si->flags |= SWP_CONTINUED;
2814	}
2815
2816	list_for_each_entry(list_page, &head->lru, lru) {
2817		unsigned char *map;
2818
2819		/*
2820		 * If the previous map said no continuation, but we've found
2821		 * a continuation page, free our allocation and use this one.
2822		 */
2823		if (!(count & COUNT_CONTINUED))
2824			goto out;
2825
2826		map = kmap_atomic(list_page) + offset;
2827		count = *map;
2828		kunmap_atomic(map);
2829
2830		/*
2831		 * If this continuation count now has some space in it,
2832		 * free our allocation and use this one.
2833		 */
2834		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2835			goto out;
2836	}
2837
2838	list_add_tail(&page->lru, &head->lru);
2839	page = NULL;			/* now it's attached, don't free it */
2840out:
2841	spin_unlock(&si->lock);
2842outer:
2843	if (page)
2844		__free_page(page);
2845	return 0;
2846}
2847
2848/*
2849 * swap_count_continued - when the original swap_map count is incremented
2850 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2851 * into, carry if so, or else fail until a new continuation page is allocated;
2852 * when the original swap_map count is decremented from 0 with continuation,
2853 * borrow from the continuation and report whether it still holds more.
2854 * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
2855 */
2856static bool swap_count_continued(struct swap_info_struct *si,
2857				 pgoff_t offset, unsigned char count)
2858{
2859	struct page *head;
2860	struct page *page;
2861	unsigned char *map;
2862
2863	head = vmalloc_to_page(si->swap_map + offset);
2864	if (page_private(head) != SWP_CONTINUED) {
2865		BUG_ON(count & COUNT_CONTINUED);
2866		return false;		/* need to add count continuation */
2867	}
2868
2869	offset &= ~PAGE_MASK;
2870	page = list_entry(head->lru.next, struct page, lru);
2871	map = kmap_atomic(page) + offset;
2872
2873	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
2874		goto init_map;		/* jump over SWAP_CONT_MAX checks */
2875
2876	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2877		/*
2878		 * Think of how you add 1 to 999
2879		 */
2880		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2881			kunmap_atomic(map);
2882			page = list_entry(page->lru.next, struct page, lru);
2883			BUG_ON(page == head);
2884			map = kmap_atomic(page) + offset;
2885		}
2886		if (*map == SWAP_CONT_MAX) {
2887			kunmap_atomic(map);
2888			page = list_entry(page->lru.next, struct page, lru);
2889			if (page == head)
2890				return false;	/* add count continuation */
2891			map = kmap_atomic(page) + offset;
2892init_map:		*map = 0;		/* we didn't zero the page */
2893		}
2894		*map += 1;
2895		kunmap_atomic(map);
2896		page = list_entry(page->lru.prev, struct page, lru);
2897		while (page != head) {
2898			map = kmap_atomic(page) + offset;
2899			*map = COUNT_CONTINUED;
2900			kunmap_atomic(map);
2901			page = list_entry(page->lru.prev, struct page, lru);
2902		}
2903		return true;			/* incremented */
2904
2905	} else {				/* decrementing */
2906		/*
2907		 * Think of how you subtract 1 from 1000
2908		 */
2909		BUG_ON(count != COUNT_CONTINUED);
2910		while (*map == COUNT_CONTINUED) {
2911			kunmap_atomic(map);
2912			page = list_entry(page->lru.next, struct page, lru);
2913			BUG_ON(page == head);
2914			map = kmap_atomic(page) + offset;
2915		}
2916		BUG_ON(*map == 0);
2917		*map -= 1;
2918		if (*map == 0)
2919			count = 0;
2920		kunmap_atomic(map);
2921		page = list_entry(page->lru.prev, struct page, lru);
2922		while (page != head) {
2923			map = kmap_atomic(page) + offset;
2924			*map = SWAP_CONT_MAX | count;
2925			count = COUNT_CONTINUED;
2926			kunmap_atomic(map);
2927			page = list_entry(page->lru.prev, struct page, lru);
2928		}
2929		return count == COUNT_CONTINUED;
2930	}
2931}
2932
2933/*
2934 * free_swap_count_continuations - swapoff free all the continuation pages
2935 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2936 */
2937static void free_swap_count_continuations(struct swap_info_struct *si)
2938{
2939	pgoff_t offset;
2940
2941	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2942		struct page *head;
2943		head = vmalloc_to_page(si->swap_map + offset);
2944		if (page_private(head)) {
2945			struct list_head *this, *next;
2946			list_for_each_safe(this, next, &head->lru) {
2947				struct page *page;
2948				page = list_entry(this, struct page, lru);
2949				list_del(this);
2950				__free_page(page);
2951			}
2952		}
2953	}
2954}