Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v4.17
   1/*
   2 *  linux/mm/swapfile.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/task.h>
  11#include <linux/hugetlb.h>
  12#include <linux/mman.h>
  13#include <linux/slab.h>
  14#include <linux/kernel_stat.h>
  15#include <linux/swap.h>
  16#include <linux/vmalloc.h>
  17#include <linux/pagemap.h>
  18#include <linux/namei.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/blkdev.h>
  21#include <linux/random.h>
  22#include <linux/writeback.h>
  23#include <linux/proc_fs.h>
  24#include <linux/seq_file.h>
  25#include <linux/init.h>
  26#include <linux/ksm.h>
  27#include <linux/rmap.h>
  28#include <linux/security.h>
  29#include <linux/backing-dev.h>
  30#include <linux/mutex.h>
  31#include <linux/capability.h>
  32#include <linux/syscalls.h>
  33#include <linux/memcontrol.h>
  34#include <linux/poll.h>
  35#include <linux/oom.h>
  36#include <linux/frontswap.h>
  37#include <linux/swapfile.h>
  38#include <linux/export.h>
  39#include <linux/swap_slots.h>
  40#include <linux/sort.h>
  41
  42#include <asm/pgtable.h>
  43#include <asm/tlbflush.h>
  44#include <linux/swapops.h>
  45#include <linux/swap_cgroup.h>
  46
  47static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  48				 unsigned char);
  49static void free_swap_count_continuations(struct swap_info_struct *);
  50static sector_t map_swap_entry(swp_entry_t, struct block_device**);
  51
  52DEFINE_SPINLOCK(swap_lock);
  53static unsigned int nr_swapfiles;
  54atomic_long_t nr_swap_pages;
  55/*
  56 * Some modules use swappable objects and may try to swap them out under
  57 * memory pressure (via the shrinker). Before doing so, they may wish to
  58 * check to see if any swap space is available.
  59 */
  60EXPORT_SYMBOL_GPL(nr_swap_pages);
  61/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  62long total_swap_pages;
  63static int least_priority = -1;
  64
  65static const char Bad_file[] = "Bad swap file entry ";
  66static const char Unused_file[] = "Unused swap file entry ";
  67static const char Bad_offset[] = "Bad swap offset entry ";
  68static const char Unused_offset[] = "Unused swap offset entry ";
  69
  70/*
  71 * all active swap_info_structs
  72 * protected with swap_lock, and ordered by priority.
  73 */
  74PLIST_HEAD(swap_active_head);
  75
  76/*
  77 * all available (active, not full) swap_info_structs
  78 * protected with swap_avail_lock, ordered by priority.
  79 * This is used by get_swap_page() instead of swap_active_head
  80 * because swap_active_head includes all swap_info_structs,
  81 * but get_swap_page() doesn't need to look at full ones.
  82 * This uses its own lock instead of swap_lock because when a
  83 * swap_info_struct changes between not-full/full, it needs to
  84 * add/remove itself to/from this list, but the swap_info_struct->lock
  85 * is held and the locking order requires swap_lock to be taken
  86 * before any swap_info_struct->lock.
  87 */
  88static struct plist_head *swap_avail_heads;
  89static DEFINE_SPINLOCK(swap_avail_lock);
  90
  91struct swap_info_struct *swap_info[MAX_SWAPFILES];
  92
  93static DEFINE_MUTEX(swapon_mutex);
  94
  95static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  96/* Activity counter to indicate that a swapon or swapoff has occurred */
  97static atomic_t proc_poll_event = ATOMIC_INIT(0);
  98
  99atomic_t nr_rotate_swap = ATOMIC_INIT(0);
 100
 101static inline unsigned char swap_count(unsigned char ent)
 102{
 103	return ent & ~SWAP_HAS_CACHE;	/* may include SWAP_HAS_CONT flag */
 104}
 105
 106/* returns 1 if swap entry is freed */
 107static int
 108__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
 109{
 110	swp_entry_t entry = swp_entry(si->type, offset);
 111	struct page *page;
 112	int ret = 0;
 113
 114	page = find_get_page(swap_address_space(entry), swp_offset(entry));
 115	if (!page)
 116		return 0;
 117	/*
 118	 * This function is called from scan_swap_map() and it's called
 119	 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
 120	 * We have to use trylock for avoiding deadlock. This is a special
 121	 * case and you should use try_to_free_swap() with explicit lock_page()
 122	 * in usual operations.
 123	 */
 124	if (trylock_page(page)) {
 125		ret = try_to_free_swap(page);
 126		unlock_page(page);
 127	}
 128	put_page(page);
 129	return ret;
 130}
 131
 132/*
 133 * swapon tell device that all the old swap contents can be discarded,
 134 * to allow the swap device to optimize its wear-levelling.
 135 */
 136static int discard_swap(struct swap_info_struct *si)
 137{
 138	struct swap_extent *se;
 139	sector_t start_block;
 140	sector_t nr_blocks;
 141	int err = 0;
 142
 143	/* Do not discard the swap header page! */
 144	se = &si->first_swap_extent;
 145	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 146	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 147	if (nr_blocks) {
 148		err = blkdev_issue_discard(si->bdev, start_block,
 149				nr_blocks, GFP_KERNEL, 0);
 150		if (err)
 151			return err;
 152		cond_resched();
 153	}
 154
 155	list_for_each_entry(se, &si->first_swap_extent.list, list) {
 156		start_block = se->start_block << (PAGE_SHIFT - 9);
 157		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 158
 159		err = blkdev_issue_discard(si->bdev, start_block,
 160				nr_blocks, GFP_KERNEL, 0);
 161		if (err)
 162			break;
 163
 164		cond_resched();
 165	}
 166	return err;		/* That will often be -EOPNOTSUPP */
 167}
 168
 169/*
 170 * swap allocation tell device that a cluster of swap can now be discarded,
 171 * to allow the swap device to optimize its wear-levelling.
 172 */
 173static void discard_swap_cluster(struct swap_info_struct *si,
 174				 pgoff_t start_page, pgoff_t nr_pages)
 175{
 176	struct swap_extent *se = si->curr_swap_extent;
 177	int found_extent = 0;
 178
 179	while (nr_pages) {
 180		if (se->start_page <= start_page &&
 181		    start_page < se->start_page + se->nr_pages) {
 182			pgoff_t offset = start_page - se->start_page;
 183			sector_t start_block = se->start_block + offset;
 184			sector_t nr_blocks = se->nr_pages - offset;
 185
 186			if (nr_blocks > nr_pages)
 187				nr_blocks = nr_pages;
 188			start_page += nr_blocks;
 189			nr_pages -= nr_blocks;
 190
 191			if (!found_extent++)
 192				si->curr_swap_extent = se;
 193
 194			start_block <<= PAGE_SHIFT - 9;
 195			nr_blocks <<= PAGE_SHIFT - 9;
 196			if (blkdev_issue_discard(si->bdev, start_block,
 197				    nr_blocks, GFP_NOIO, 0))
 198				break;
 199		}
 200
 201		se = list_next_entry(se, list);
 202	}
 203}
 204
 205#ifdef CONFIG_THP_SWAP
 206#define SWAPFILE_CLUSTER	HPAGE_PMD_NR
 207#else
 208#define SWAPFILE_CLUSTER	256
 209#endif
 210#define LATENCY_LIMIT		256
 211
 212static inline void cluster_set_flag(struct swap_cluster_info *info,
 213	unsigned int flag)
 214{
 215	info->flags = flag;
 216}
 217
 218static inline unsigned int cluster_count(struct swap_cluster_info *info)
 219{
 220	return info->data;
 221}
 222
 223static inline void cluster_set_count(struct swap_cluster_info *info,
 224				     unsigned int c)
 225{
 226	info->data = c;
 227}
 228
 229static inline void cluster_set_count_flag(struct swap_cluster_info *info,
 230					 unsigned int c, unsigned int f)
 231{
 232	info->flags = f;
 233	info->data = c;
 234}
 235
 236static inline unsigned int cluster_next(struct swap_cluster_info *info)
 237{
 238	return info->data;
 239}
 240
 241static inline void cluster_set_next(struct swap_cluster_info *info,
 242				    unsigned int n)
 243{
 244	info->data = n;
 245}
 246
 247static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 248					 unsigned int n, unsigned int f)
 249{
 250	info->flags = f;
 251	info->data = n;
 252}
 253
 254static inline bool cluster_is_free(struct swap_cluster_info *info)
 255{
 256	return info->flags & CLUSTER_FLAG_FREE;
 257}
 258
 259static inline bool cluster_is_null(struct swap_cluster_info *info)
 260{
 261	return info->flags & CLUSTER_FLAG_NEXT_NULL;
 262}
 263
 264static inline void cluster_set_null(struct swap_cluster_info *info)
 265{
 266	info->flags = CLUSTER_FLAG_NEXT_NULL;
 267	info->data = 0;
 268}
 269
 270static inline bool cluster_is_huge(struct swap_cluster_info *info)
 271{
 272	return info->flags & CLUSTER_FLAG_HUGE;
 273}
 274
 275static inline void cluster_clear_huge(struct swap_cluster_info *info)
 276{
 277	info->flags &= ~CLUSTER_FLAG_HUGE;
 278}
 279
 280static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
 281						     unsigned long offset)
 282{
 283	struct swap_cluster_info *ci;
 284
 285	ci = si->cluster_info;
 286	if (ci) {
 287		ci += offset / SWAPFILE_CLUSTER;
 288		spin_lock(&ci->lock);
 289	}
 290	return ci;
 291}
 292
 293static inline void unlock_cluster(struct swap_cluster_info *ci)
 294{
 295	if (ci)
 296		spin_unlock(&ci->lock);
 297}
 298
 299static inline struct swap_cluster_info *lock_cluster_or_swap_info(
 300	struct swap_info_struct *si,
 301	unsigned long offset)
 302{
 303	struct swap_cluster_info *ci;
 304
 305	ci = lock_cluster(si, offset);
 306	if (!ci)
 307		spin_lock(&si->lock);
 308
 309	return ci;
 310}
 311
 312static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
 313					       struct swap_cluster_info *ci)
 314{
 315	if (ci)
 316		unlock_cluster(ci);
 317	else
 318		spin_unlock(&si->lock);
 319}
 320
 321static inline bool cluster_list_empty(struct swap_cluster_list *list)
 322{
 323	return cluster_is_null(&list->head);
 324}
 325
 326static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
 327{
 328	return cluster_next(&list->head);
 329}
 330
 331static void cluster_list_init(struct swap_cluster_list *list)
 332{
 333	cluster_set_null(&list->head);
 334	cluster_set_null(&list->tail);
 335}
 336
 337static void cluster_list_add_tail(struct swap_cluster_list *list,
 338				  struct swap_cluster_info *ci,
 339				  unsigned int idx)
 340{
 341	if (cluster_list_empty(list)) {
 342		cluster_set_next_flag(&list->head, idx, 0);
 343		cluster_set_next_flag(&list->tail, idx, 0);
 344	} else {
 345		struct swap_cluster_info *ci_tail;
 346		unsigned int tail = cluster_next(&list->tail);
 347
 348		/*
 349		 * Nested cluster lock, but both cluster locks are
 350		 * only acquired when we held swap_info_struct->lock
 351		 */
 352		ci_tail = ci + tail;
 353		spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
 354		cluster_set_next(ci_tail, idx);
 355		spin_unlock(&ci_tail->lock);
 356		cluster_set_next_flag(&list->tail, idx, 0);
 357	}
 358}
 359
 360static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
 361					   struct swap_cluster_info *ci)
 362{
 363	unsigned int idx;
 364
 365	idx = cluster_next(&list->head);
 366	if (cluster_next(&list->tail) == idx) {
 367		cluster_set_null(&list->head);
 368		cluster_set_null(&list->tail);
 369	} else
 370		cluster_set_next_flag(&list->head,
 371				      cluster_next(&ci[idx]), 0);
 372
 373	return idx;
 374}
 375
 376/* Add a cluster to discard list and schedule it to do discard */
 377static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 378		unsigned int idx)
 379{
 380	/*
 381	 * If scan_swap_map() can't find a free cluster, it will check
 382	 * si->swap_map directly. To make sure the discarding cluster isn't
 383	 * taken by scan_swap_map(), mark the swap entries bad (occupied). It
 384	 * will be cleared after discard
 385	 */
 386	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 387			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 388
 389	cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
 
 
 
 
 
 
 
 
 
 
 390
 391	schedule_work(&si->discard_work);
 392}
 393
 394static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
 395{
 396	struct swap_cluster_info *ci = si->cluster_info;
 397
 398	cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
 399	cluster_list_add_tail(&si->free_clusters, ci, idx);
 400}
 401
 402/*
 403 * Doing discard actually. After a cluster discard is finished, the cluster
 404 * will be added to free cluster list. caller should hold si->lock.
 405*/
 406static void swap_do_scheduled_discard(struct swap_info_struct *si)
 407{
 408	struct swap_cluster_info *info, *ci;
 409	unsigned int idx;
 410
 411	info = si->cluster_info;
 412
 413	while (!cluster_list_empty(&si->discard_clusters)) {
 414		idx = cluster_list_del_first(&si->discard_clusters, info);
 
 
 
 
 
 
 
 415		spin_unlock(&si->lock);
 416
 417		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 418				SWAPFILE_CLUSTER);
 419
 420		spin_lock(&si->lock);
 421		ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
 422		__free_cluster(si, idx);
 
 
 
 
 
 
 
 
 
 
 
 
 423		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 424				0, SWAPFILE_CLUSTER);
 425		unlock_cluster(ci);
 426	}
 427}
 428
 429static void swap_discard_work(struct work_struct *work)
 430{
 431	struct swap_info_struct *si;
 432
 433	si = container_of(work, struct swap_info_struct, discard_work);
 434
 435	spin_lock(&si->lock);
 436	swap_do_scheduled_discard(si);
 437	spin_unlock(&si->lock);
 438}
 439
 440static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
 441{
 442	struct swap_cluster_info *ci = si->cluster_info;
 443
 444	VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
 445	cluster_list_del_first(&si->free_clusters, ci);
 446	cluster_set_count_flag(ci + idx, 0, 0);
 447}
 448
 449static void free_cluster(struct swap_info_struct *si, unsigned long idx)
 450{
 451	struct swap_cluster_info *ci = si->cluster_info + idx;
 452
 453	VM_BUG_ON(cluster_count(ci) != 0);
 454	/*
 455	 * If the swap is discardable, prepare discard the cluster
 456	 * instead of free it immediately. The cluster will be freed
 457	 * after discard.
 458	 */
 459	if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 460	    (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 461		swap_cluster_schedule_discard(si, idx);
 462		return;
 463	}
 464
 465	__free_cluster(si, idx);
 466}
 467
 468/*
 469 * The cluster corresponding to page_nr will be used. The cluster will be
 470 * removed from free cluster list and its usage counter will be increased.
 471 */
 472static void inc_cluster_info_page(struct swap_info_struct *p,
 473	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 474{
 475	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 476
 477	if (!cluster_info)
 478		return;
 479	if (cluster_is_free(&cluster_info[idx]))
 480		alloc_cluster(p, idx);
 
 
 
 
 
 
 
 
 481
 482	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
 483	cluster_set_count(&cluster_info[idx],
 484		cluster_count(&cluster_info[idx]) + 1);
 485}
 486
 487/*
 488 * The cluster corresponding to page_nr decreases one usage. If the usage
 489 * counter becomes 0, which means no page in the cluster is in using, we can
 490 * optionally discard the cluster and add it to free cluster list.
 491 */
 492static void dec_cluster_info_page(struct swap_info_struct *p,
 493	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 494{
 495	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 496
 497	if (!cluster_info)
 498		return;
 499
 500	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
 501	cluster_set_count(&cluster_info[idx],
 502		cluster_count(&cluster_info[idx]) - 1);
 503
 504	if (cluster_count(&cluster_info[idx]) == 0)
 505		free_cluster(p, idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 506}
 507
 508/*
 509 * It's possible scan_swap_map() uses a free cluster in the middle of free
 510 * cluster list. Avoiding such abuse to avoid list corruption.
 511 */
 512static bool
 513scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
 514	unsigned long offset)
 515{
 516	struct percpu_cluster *percpu_cluster;
 517	bool conflict;
 518
 519	offset /= SWAPFILE_CLUSTER;
 520	conflict = !cluster_list_empty(&si->free_clusters) &&
 521		offset != cluster_list_first(&si->free_clusters) &&
 522		cluster_is_free(&si->cluster_info[offset]);
 523
 524	if (!conflict)
 525		return false;
 526
 527	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
 528	cluster_set_null(&percpu_cluster->index);
 529	return true;
 530}
 531
 532/*
 533 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
 534 * might involve allocating a new cluster for current CPU too.
 535 */
 536static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
 537	unsigned long *offset, unsigned long *scan_base)
 538{
 539	struct percpu_cluster *cluster;
 540	struct swap_cluster_info *ci;
 541	bool found_free;
 542	unsigned long tmp, max;
 543
 544new_cluster:
 545	cluster = this_cpu_ptr(si->percpu_cluster);
 546	if (cluster_is_null(&cluster->index)) {
 547		if (!cluster_list_empty(&si->free_clusters)) {
 548			cluster->index = si->free_clusters.head;
 549			cluster->next = cluster_next(&cluster->index) *
 550					SWAPFILE_CLUSTER;
 551		} else if (!cluster_list_empty(&si->discard_clusters)) {
 552			/*
 553			 * we don't have free cluster but have some clusters in
 554			 * discarding, do discard now and reclaim them
 555			 */
 556			swap_do_scheduled_discard(si);
 557			*scan_base = *offset = si->cluster_next;
 558			goto new_cluster;
 559		} else
 560			return false;
 561	}
 562
 563	found_free = false;
 564
 565	/*
 566	 * Other CPUs can use our cluster if they can't find a free cluster,
 567	 * check if there is still free entry in the cluster
 568	 */
 569	tmp = cluster->next;
 570	max = min_t(unsigned long, si->max,
 571		    (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
 572	if (tmp >= max) {
 573		cluster_set_null(&cluster->index);
 574		goto new_cluster;
 575	}
 576	ci = lock_cluster(si, tmp);
 577	while (tmp < max) {
 578		if (!si->swap_map[tmp]) {
 579			found_free = true;
 580			break;
 581		}
 582		tmp++;
 583	}
 584	unlock_cluster(ci);
 585	if (!found_free) {
 586		cluster_set_null(&cluster->index);
 587		goto new_cluster;
 588	}
 589	cluster->next = tmp + 1;
 590	*offset = tmp;
 591	*scan_base = tmp;
 592	return found_free;
 593}
 594
 595static void __del_from_avail_list(struct swap_info_struct *p)
 596{
 597	int nid;
 598
 599	for_each_node(nid)
 600		plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
 601}
 602
 603static void del_from_avail_list(struct swap_info_struct *p)
 604{
 605	spin_lock(&swap_avail_lock);
 606	__del_from_avail_list(p);
 607	spin_unlock(&swap_avail_lock);
 608}
 609
 610static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
 611			     unsigned int nr_entries)
 612{
 613	unsigned int end = offset + nr_entries - 1;
 614
 615	if (offset == si->lowest_bit)
 616		si->lowest_bit += nr_entries;
 617	if (end == si->highest_bit)
 618		si->highest_bit -= nr_entries;
 619	si->inuse_pages += nr_entries;
 620	if (si->inuse_pages == si->pages) {
 621		si->lowest_bit = si->max;
 622		si->highest_bit = 0;
 623		del_from_avail_list(si);
 624	}
 625}
 626
 627static void add_to_avail_list(struct swap_info_struct *p)
 628{
 629	int nid;
 630
 631	spin_lock(&swap_avail_lock);
 632	for_each_node(nid) {
 633		WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
 634		plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
 635	}
 636	spin_unlock(&swap_avail_lock);
 637}
 638
 639static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
 640			    unsigned int nr_entries)
 641{
 642	unsigned long end = offset + nr_entries - 1;
 643	void (*swap_slot_free_notify)(struct block_device *, unsigned long);
 644
 645	if (offset < si->lowest_bit)
 646		si->lowest_bit = offset;
 647	if (end > si->highest_bit) {
 648		bool was_full = !si->highest_bit;
 649
 650		si->highest_bit = end;
 651		if (was_full && (si->flags & SWP_WRITEOK))
 652			add_to_avail_list(si);
 653	}
 654	atomic_long_add(nr_entries, &nr_swap_pages);
 655	si->inuse_pages -= nr_entries;
 656	if (si->flags & SWP_BLKDEV)
 657		swap_slot_free_notify =
 658			si->bdev->bd_disk->fops->swap_slot_free_notify;
 659	else
 660		swap_slot_free_notify = NULL;
 661	while (offset <= end) {
 662		frontswap_invalidate_page(si->type, offset);
 663		if (swap_slot_free_notify)
 664			swap_slot_free_notify(si->bdev, offset);
 665		offset++;
 666	}
 667}
 668
 669static int scan_swap_map_slots(struct swap_info_struct *si,
 670			       unsigned char usage, int nr,
 671			       swp_entry_t slots[])
 672{
 673	struct swap_cluster_info *ci;
 674	unsigned long offset;
 675	unsigned long scan_base;
 676	unsigned long last_in_cluster = 0;
 677	int latency_ration = LATENCY_LIMIT;
 678	int n_ret = 0;
 679
 680	if (nr > SWAP_BATCH)
 681		nr = SWAP_BATCH;
 682
 683	/*
 684	 * We try to cluster swap pages by allocating them sequentially
 685	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 686	 * way, however, we resort to first-free allocation, starting
 687	 * a new cluster.  This prevents us from scattering swap pages
 688	 * all over the entire swap partition, so that we reduce
 689	 * overall disk seek times between swap pages.  -- sct
 690	 * But we do now try to find an empty cluster.  -Andrea
 691	 * And we let swap pages go all over an SSD partition.  Hugh
 692	 */
 693
 694	si->flags += SWP_SCANNING;
 695	scan_base = offset = si->cluster_next;
 696
 697	/* SSD algorithm */
 698	if (si->cluster_info) {
 699		if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 700			goto checks;
 701		else
 702			goto scan;
 703	}
 704
 705	if (unlikely(!si->cluster_nr--)) {
 706		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 707			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 708			goto checks;
 709		}
 710
 711		spin_unlock(&si->lock);
 712
 713		/*
 714		 * If seek is expensive, start searching for new cluster from
 715		 * start of partition, to minimize the span of allocated swap.
 716		 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
 717		 * case, just handled by scan_swap_map_try_ssd_cluster() above.
 718		 */
 719		scan_base = offset = si->lowest_bit;
 720		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 721
 722		/* Locate the first empty (unaligned) cluster */
 723		for (; last_in_cluster <= si->highest_bit; offset++) {
 724			if (si->swap_map[offset])
 725				last_in_cluster = offset + SWAPFILE_CLUSTER;
 726			else if (offset == last_in_cluster) {
 727				spin_lock(&si->lock);
 728				offset -= SWAPFILE_CLUSTER - 1;
 729				si->cluster_next = offset;
 730				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 731				goto checks;
 732			}
 733			if (unlikely(--latency_ration < 0)) {
 734				cond_resched();
 735				latency_ration = LATENCY_LIMIT;
 736			}
 737		}
 738
 739		offset = scan_base;
 740		spin_lock(&si->lock);
 741		si->cluster_nr = SWAPFILE_CLUSTER - 1;
 742	}
 743
 744checks:
 745	if (si->cluster_info) {
 746		while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
 747		/* take a break if we already got some slots */
 748			if (n_ret)
 749				goto done;
 750			if (!scan_swap_map_try_ssd_cluster(si, &offset,
 751							&scan_base))
 752				goto scan;
 753		}
 754	}
 755	if (!(si->flags & SWP_WRITEOK))
 756		goto no_page;
 757	if (!si->highest_bit)
 758		goto no_page;
 759	if (offset > si->highest_bit)
 760		scan_base = offset = si->lowest_bit;
 761
 762	ci = lock_cluster(si, offset);
 763	/* reuse swap entry of cache-only swap if not busy. */
 764	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 765		int swap_was_freed;
 766		unlock_cluster(ci);
 767		spin_unlock(&si->lock);
 768		swap_was_freed = __try_to_reclaim_swap(si, offset);
 769		spin_lock(&si->lock);
 770		/* entry was freed successfully, try to use this again */
 771		if (swap_was_freed)
 772			goto checks;
 773		goto scan; /* check next one */
 774	}
 775
 776	if (si->swap_map[offset]) {
 777		unlock_cluster(ci);
 778		if (!n_ret)
 779			goto scan;
 780		else
 781			goto done;
 
 
 
 
 
 
 
 
 782	}
 783	si->swap_map[offset] = usage;
 784	inc_cluster_info_page(si, si->cluster_info, offset);
 785	unlock_cluster(ci);
 786
 787	swap_range_alloc(si, offset, 1);
 788	si->cluster_next = offset + 1;
 789	slots[n_ret++] = swp_entry(si->type, offset);
 790
 791	/* got enough slots or reach max slots? */
 792	if ((n_ret == nr) || (offset >= si->highest_bit))
 793		goto done;
 794
 795	/* search for next available slot */
 796
 797	/* time to take a break? */
 798	if (unlikely(--latency_ration < 0)) {
 799		if (n_ret)
 800			goto done;
 801		spin_unlock(&si->lock);
 802		cond_resched();
 803		spin_lock(&si->lock);
 804		latency_ration = LATENCY_LIMIT;
 805	}
 806
 807	/* try to get more slots in cluster */
 808	if (si->cluster_info) {
 809		if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 810			goto checks;
 811		else
 812			goto done;
 813	}
 814	/* non-ssd case */
 815	++offset;
 816
 817	/* non-ssd case, still more slots in cluster? */
 818	if (si->cluster_nr && !si->swap_map[offset]) {
 819		--si->cluster_nr;
 820		goto checks;
 821	}
 822
 823done:
 824	si->flags -= SWP_SCANNING;
 825	return n_ret;
 
 826
 827scan:
 828	spin_unlock(&si->lock);
 829	while (++offset <= si->highest_bit) {
 830		if (!si->swap_map[offset]) {
 831			spin_lock(&si->lock);
 832			goto checks;
 833		}
 834		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 835			spin_lock(&si->lock);
 836			goto checks;
 837		}
 838		if (unlikely(--latency_ration < 0)) {
 839			cond_resched();
 840			latency_ration = LATENCY_LIMIT;
 841		}
 842	}
 843	offset = si->lowest_bit;
 844	while (offset < scan_base) {
 845		if (!si->swap_map[offset]) {
 846			spin_lock(&si->lock);
 847			goto checks;
 848		}
 849		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 850			spin_lock(&si->lock);
 851			goto checks;
 852		}
 853		if (unlikely(--latency_ration < 0)) {
 854			cond_resched();
 855			latency_ration = LATENCY_LIMIT;
 856		}
 857		offset++;
 858	}
 859	spin_lock(&si->lock);
 860
 861no_page:
 862	si->flags -= SWP_SCANNING;
 863	return n_ret;
 864}
 865
 866#ifdef CONFIG_THP_SWAP
 867static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
 868{
 869	unsigned long idx;
 870	struct swap_cluster_info *ci;
 871	unsigned long offset, i;
 872	unsigned char *map;
 873
 874	if (cluster_list_empty(&si->free_clusters))
 875		return 0;
 876
 877	idx = cluster_list_first(&si->free_clusters);
 878	offset = idx * SWAPFILE_CLUSTER;
 879	ci = lock_cluster(si, offset);
 880	alloc_cluster(si, idx);
 881	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
 882
 883	map = si->swap_map + offset;
 884	for (i = 0; i < SWAPFILE_CLUSTER; i++)
 885		map[i] = SWAP_HAS_CACHE;
 886	unlock_cluster(ci);
 887	swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
 888	*slot = swp_entry(si->type, offset);
 889
 890	return 1;
 891}
 892
 893static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
 894{
 895	unsigned long offset = idx * SWAPFILE_CLUSTER;
 896	struct swap_cluster_info *ci;
 897
 898	ci = lock_cluster(si, offset);
 899	cluster_set_count_flag(ci, 0, 0);
 900	free_cluster(si, idx);
 901	unlock_cluster(ci);
 902	swap_range_free(si, offset, SWAPFILE_CLUSTER);
 903}
 904#else
 905static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
 906{
 907	VM_WARN_ON_ONCE(1);
 908	return 0;
 909}
 910#endif /* CONFIG_THP_SWAP */
 911
 912static unsigned long scan_swap_map(struct swap_info_struct *si,
 913				   unsigned char usage)
 914{
 915	swp_entry_t entry;
 916	int n_ret;
 917
 918	n_ret = scan_swap_map_slots(si, usage, 1, &entry);
 919
 920	if (n_ret)
 921		return swp_offset(entry);
 922	else
 923		return 0;
 924
 925}
 926
 927int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
 928{
 929	unsigned long nr_pages = cluster ? SWAPFILE_CLUSTER : 1;
 930	struct swap_info_struct *si, *next;
 931	long avail_pgs;
 932	int n_ret = 0;
 933	int node;
 934
 935	/* Only single cluster request supported */
 936	WARN_ON_ONCE(n_goal > 1 && cluster);
 937
 938	avail_pgs = atomic_long_read(&nr_swap_pages) / nr_pages;
 939	if (avail_pgs <= 0)
 940		goto noswap;
 941
 942	if (n_goal > SWAP_BATCH)
 943		n_goal = SWAP_BATCH;
 944
 945	if (n_goal > avail_pgs)
 946		n_goal = avail_pgs;
 947
 948	atomic_long_sub(n_goal * nr_pages, &nr_swap_pages);
 949
 950	spin_lock(&swap_avail_lock);
 951
 952start_over:
 953	node = numa_node_id();
 954	plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
 955		/* requeue si to after same-priority siblings */
 956		plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
 957		spin_unlock(&swap_avail_lock);
 958		spin_lock(&si->lock);
 959		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
 960			spin_lock(&swap_avail_lock);
 961			if (plist_node_empty(&si->avail_lists[node])) {
 962				spin_unlock(&si->lock);
 963				goto nextsi;
 964			}
 965			WARN(!si->highest_bit,
 966			     "swap_info %d in list but !highest_bit\n",
 967			     si->type);
 968			WARN(!(si->flags & SWP_WRITEOK),
 969			     "swap_info %d in list but !SWP_WRITEOK\n",
 970			     si->type);
 971			__del_from_avail_list(si);
 972			spin_unlock(&si->lock);
 973			goto nextsi;
 974		}
 975		if (cluster) {
 976			if (!(si->flags & SWP_FILE))
 977				n_ret = swap_alloc_cluster(si, swp_entries);
 978		} else
 979			n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
 980						    n_goal, swp_entries);
 981		spin_unlock(&si->lock);
 982		if (n_ret || cluster)
 983			goto check_out;
 984		pr_debug("scan_swap_map of si %d failed to find offset\n",
 985			si->type);
 986
 987		spin_lock(&swap_avail_lock);
 988nextsi:
 989		/*
 990		 * if we got here, it's likely that si was almost full before,
 991		 * and since scan_swap_map() can drop the si->lock, multiple
 992		 * callers probably all tried to get a page from the same si
 993		 * and it filled up before we could get one; or, the si filled
 994		 * up between us dropping swap_avail_lock and taking si->lock.
 995		 * Since we dropped the swap_avail_lock, the swap_avail_head
 996		 * list may have been modified; so if next is still in the
 997		 * swap_avail_head list then try it, otherwise start over
 998		 * if we have not gotten any slots.
 999		 */
1000		if (plist_node_empty(&next->avail_lists[node]))
1001			goto start_over;
1002	}
1003
1004	spin_unlock(&swap_avail_lock);
1005
1006check_out:
1007	if (n_ret < n_goal)
1008		atomic_long_add((long)(n_goal - n_ret) * nr_pages,
1009				&nr_swap_pages);
1010noswap:
1011	return n_ret;
1012}
1013
1014/* The only caller of this function is now suspend routine */
1015swp_entry_t get_swap_page_of_type(int type)
1016{
1017	struct swap_info_struct *si;
1018	pgoff_t offset;
1019
1020	si = swap_info[type];
1021	spin_lock(&si->lock);
1022	if (si && (si->flags & SWP_WRITEOK)) {
1023		atomic_long_dec(&nr_swap_pages);
1024		/* This is called for allocating swap entry, not cache */
1025		offset = scan_swap_map(si, 1);
1026		if (offset) {
1027			spin_unlock(&si->lock);
1028			return swp_entry(type, offset);
1029		}
1030		atomic_long_inc(&nr_swap_pages);
1031	}
1032	spin_unlock(&si->lock);
1033	return (swp_entry_t) {0};
1034}
1035
1036static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
1037{
1038	struct swap_info_struct *p;
1039	unsigned long offset, type;
1040
1041	if (!entry.val)
1042		goto out;
1043	type = swp_type(entry);
1044	if (type >= nr_swapfiles)
1045		goto bad_nofile;
1046	p = swap_info[type];
1047	if (!(p->flags & SWP_USED))
1048		goto bad_device;
1049	offset = swp_offset(entry);
1050	if (offset >= p->max)
1051		goto bad_offset;
 
 
 
1052	return p;
1053
 
 
 
1054bad_offset:
1055	pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val);
1056	goto out;
1057bad_device:
1058	pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val);
1059	goto out;
1060bad_nofile:
1061	pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val);
1062out:
1063	return NULL;
1064}
1065
1066static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1067{
1068	struct swap_info_struct *p;
1069
1070	p = __swap_info_get(entry);
1071	if (!p)
1072		goto out;
1073	if (!p->swap_map[swp_offset(entry)])
1074		goto bad_free;
1075	return p;
1076
1077bad_free:
1078	pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val);
1079	goto out;
1080out:
1081	return NULL;
1082}
1083
1084static struct swap_info_struct *swap_info_get(swp_entry_t entry)
1085{
1086	struct swap_info_struct *p;
1087
1088	p = _swap_info_get(entry);
1089	if (p)
1090		spin_lock(&p->lock);
1091	return p;
1092}
1093
1094static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1095					struct swap_info_struct *q)
1096{
1097	struct swap_info_struct *p;
1098
1099	p = _swap_info_get(entry);
1100
1101	if (p != q) {
1102		if (q != NULL)
1103			spin_unlock(&q->lock);
1104		if (p != NULL)
1105			spin_lock(&p->lock);
1106	}
1107	return p;
1108}
1109
1110static unsigned char __swap_entry_free(struct swap_info_struct *p,
1111				       swp_entry_t entry, unsigned char usage)
1112{
1113	struct swap_cluster_info *ci;
1114	unsigned long offset = swp_offset(entry);
1115	unsigned char count;
1116	unsigned char has_cache;
1117
1118	ci = lock_cluster_or_swap_info(p, offset);
1119
1120	count = p->swap_map[offset];
1121
1122	has_cache = count & SWAP_HAS_CACHE;
1123	count &= ~SWAP_HAS_CACHE;
1124
1125	if (usage == SWAP_HAS_CACHE) {
1126		VM_BUG_ON(!has_cache);
1127		has_cache = 0;
1128	} else if (count == SWAP_MAP_SHMEM) {
1129		/*
1130		 * Or we could insist on shmem.c using a special
1131		 * swap_shmem_free() and free_shmem_swap_and_cache()...
1132		 */
1133		count = 0;
1134	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1135		if (count == COUNT_CONTINUED) {
1136			if (swap_count_continued(p, offset, count))
1137				count = SWAP_MAP_MAX | COUNT_CONTINUED;
1138			else
1139				count = SWAP_MAP_MAX;
1140		} else
1141			count--;
1142	}
1143
1144	usage = count | has_cache;
1145	p->swap_map[offset] = usage ? : SWAP_HAS_CACHE;
1146
1147	unlock_cluster_or_swap_info(p, ci);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1148
1149	return usage;
1150}
1151
1152static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1153{
1154	struct swap_cluster_info *ci;
1155	unsigned long offset = swp_offset(entry);
1156	unsigned char count;
1157
1158	ci = lock_cluster(p, offset);
1159	count = p->swap_map[offset];
1160	VM_BUG_ON(count != SWAP_HAS_CACHE);
1161	p->swap_map[offset] = 0;
1162	dec_cluster_info_page(p, p->cluster_info, offset);
1163	unlock_cluster(ci);
1164
1165	mem_cgroup_uncharge_swap(entry, 1);
1166	swap_range_free(p, offset, 1);
1167}
1168
1169/*
1170 * Caller has made sure that the swap device corresponding to entry
1171 * is still around or has not been recycled.
1172 */
1173void swap_free(swp_entry_t entry)
1174{
1175	struct swap_info_struct *p;
1176
1177	p = _swap_info_get(entry);
1178	if (p) {
1179		if (!__swap_entry_free(p, entry, 1))
1180			free_swap_slot(entry);
1181	}
1182}
1183
1184/*
1185 * Called after dropping swapcache to decrease refcnt to swap entries.
1186 */
1187static void swapcache_free(swp_entry_t entry)
1188{
1189	struct swap_info_struct *p;
1190
1191	p = _swap_info_get(entry);
1192	if (p) {
1193		if (!__swap_entry_free(p, entry, SWAP_HAS_CACHE))
1194			free_swap_slot(entry);
1195	}
1196}
1197
1198#ifdef CONFIG_THP_SWAP
1199static void swapcache_free_cluster(swp_entry_t entry)
1200{
1201	unsigned long offset = swp_offset(entry);
1202	unsigned long idx = offset / SWAPFILE_CLUSTER;
1203	struct swap_cluster_info *ci;
1204	struct swap_info_struct *si;
1205	unsigned char *map;
1206	unsigned int i, free_entries = 0;
1207	unsigned char val;
1208
1209	si = _swap_info_get(entry);
1210	if (!si)
1211		return;
1212
1213	ci = lock_cluster(si, offset);
1214	VM_BUG_ON(!cluster_is_huge(ci));
1215	map = si->swap_map + offset;
1216	for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1217		val = map[i];
1218		VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1219		if (val == SWAP_HAS_CACHE)
1220			free_entries++;
1221	}
1222	if (!free_entries) {
1223		for (i = 0; i < SWAPFILE_CLUSTER; i++)
1224			map[i] &= ~SWAP_HAS_CACHE;
1225	}
1226	cluster_clear_huge(ci);
1227	unlock_cluster(ci);
1228	if (free_entries == SWAPFILE_CLUSTER) {
1229		spin_lock(&si->lock);
1230		ci = lock_cluster(si, offset);
1231		memset(map, 0, SWAPFILE_CLUSTER);
1232		unlock_cluster(ci);
1233		mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1234		swap_free_cluster(si, idx);
1235		spin_unlock(&si->lock);
1236	} else if (free_entries) {
1237		for (i = 0; i < SWAPFILE_CLUSTER; i++, entry.val++) {
1238			if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE))
1239				free_swap_slot(entry);
1240		}
1241	}
1242}
1243
1244int split_swap_cluster(swp_entry_t entry)
1245{
1246	struct swap_info_struct *si;
1247	struct swap_cluster_info *ci;
1248	unsigned long offset = swp_offset(entry);
1249
1250	si = _swap_info_get(entry);
1251	if (!si)
1252		return -EBUSY;
1253	ci = lock_cluster(si, offset);
1254	cluster_clear_huge(ci);
1255	unlock_cluster(ci);
1256	return 0;
1257}
1258#else
1259static inline void swapcache_free_cluster(swp_entry_t entry)
1260{
1261}
1262#endif /* CONFIG_THP_SWAP */
1263
1264void put_swap_page(struct page *page, swp_entry_t entry)
1265{
1266	if (!PageTransHuge(page))
1267		swapcache_free(entry);
1268	else
1269		swapcache_free_cluster(entry);
1270}
1271
1272static int swp_entry_cmp(const void *ent1, const void *ent2)
1273{
1274	const swp_entry_t *e1 = ent1, *e2 = ent2;
1275
1276	return (int)swp_type(*e1) - (int)swp_type(*e2);
1277}
1278
1279void swapcache_free_entries(swp_entry_t *entries, int n)
1280{
1281	struct swap_info_struct *p, *prev;
1282	int i;
1283
1284	if (n <= 0)
1285		return;
1286
1287	prev = NULL;
1288	p = NULL;
1289
1290	/*
1291	 * Sort swap entries by swap device, so each lock is only taken once.
1292	 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1293	 * so low that it isn't necessary to optimize further.
1294	 */
1295	if (nr_swapfiles > 1)
1296		sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1297	for (i = 0; i < n; ++i) {
1298		p = swap_info_get_cont(entries[i], prev);
1299		if (p)
1300			swap_entry_free(p, entries[i]);
1301		prev = p;
1302	}
1303	if (p)
1304		spin_unlock(&p->lock);
 
1305}
1306
1307/*
1308 * How many references to page are currently swapped out?
1309 * This does not give an exact answer when swap count is continued,
1310 * but does include the high COUNT_CONTINUED flag to allow for that.
1311 */
1312int page_swapcount(struct page *page)
1313{
1314	int count = 0;
1315	struct swap_info_struct *p;
1316	struct swap_cluster_info *ci;
1317	swp_entry_t entry;
1318	unsigned long offset;
1319
1320	entry.val = page_private(page);
1321	p = _swap_info_get(entry);
1322	if (p) {
1323		offset = swp_offset(entry);
1324		ci = lock_cluster_or_swap_info(p, offset);
1325		count = swap_count(p->swap_map[offset]);
1326		unlock_cluster_or_swap_info(p, ci);
1327	}
1328	return count;
1329}
1330
1331int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
1332{
1333	pgoff_t offset = swp_offset(entry);
1334
1335	return swap_count(si->swap_map[offset]);
1336}
1337
1338static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1339{
1340	int count = 0;
1341	pgoff_t offset = swp_offset(entry);
1342	struct swap_cluster_info *ci;
1343
1344	ci = lock_cluster_or_swap_info(si, offset);
1345	count = swap_count(si->swap_map[offset]);
1346	unlock_cluster_or_swap_info(si, ci);
1347	return count;
1348}
1349
1350/*
1351 * How many references to @entry are currently swapped out?
1352 * This does not give an exact answer when swap count is continued,
1353 * but does include the high COUNT_CONTINUED flag to allow for that.
1354 */
1355int __swp_swapcount(swp_entry_t entry)
1356{
1357	int count = 0;
1358	struct swap_info_struct *si;
1359
1360	si = __swap_info_get(entry);
1361	if (si)
1362		count = swap_swapcount(si, entry);
1363	return count;
1364}
1365
1366/*
1367 * How many references to @entry are currently swapped out?
1368 * This considers COUNT_CONTINUED so it returns exact answer.
1369 */
1370int swp_swapcount(swp_entry_t entry)
1371{
1372	int count, tmp_count, n;
1373	struct swap_info_struct *p;
1374	struct swap_cluster_info *ci;
1375	struct page *page;
1376	pgoff_t offset;
1377	unsigned char *map;
1378
1379	p = _swap_info_get(entry);
1380	if (!p)
1381		return 0;
1382
1383	offset = swp_offset(entry);
1384
1385	ci = lock_cluster_or_swap_info(p, offset);
1386
1387	count = swap_count(p->swap_map[offset]);
1388	if (!(count & COUNT_CONTINUED))
1389		goto out;
1390
1391	count &= ~COUNT_CONTINUED;
1392	n = SWAP_MAP_MAX + 1;
1393
 
1394	page = vmalloc_to_page(p->swap_map + offset);
1395	offset &= ~PAGE_MASK;
1396	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1397
1398	do {
1399		page = list_next_entry(page, lru);
1400		map = kmap_atomic(page);
1401		tmp_count = map[offset];
1402		kunmap_atomic(map);
1403
1404		count += (tmp_count & ~COUNT_CONTINUED) * n;
1405		n *= (SWAP_CONT_MAX + 1);
1406	} while (tmp_count & COUNT_CONTINUED);
1407out:
1408	unlock_cluster_or_swap_info(p, ci);
1409	return count;
1410}
1411
1412#ifdef CONFIG_THP_SWAP
1413static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1414					 swp_entry_t entry)
1415{
1416	struct swap_cluster_info *ci;
1417	unsigned char *map = si->swap_map;
1418	unsigned long roffset = swp_offset(entry);
1419	unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1420	int i;
1421	bool ret = false;
1422
1423	ci = lock_cluster_or_swap_info(si, offset);
1424	if (!ci || !cluster_is_huge(ci)) {
1425		if (map[roffset] != SWAP_HAS_CACHE)
1426			ret = true;
1427		goto unlock_out;
1428	}
1429	for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1430		if (map[offset + i] != SWAP_HAS_CACHE) {
1431			ret = true;
1432			break;
1433		}
1434	}
1435unlock_out:
1436	unlock_cluster_or_swap_info(si, ci);
1437	return ret;
1438}
1439
1440static bool page_swapped(struct page *page)
1441{
1442	swp_entry_t entry;
1443	struct swap_info_struct *si;
1444
1445	if (likely(!PageTransCompound(page)))
1446		return page_swapcount(page) != 0;
1447
1448	page = compound_head(page);
1449	entry.val = page_private(page);
1450	si = _swap_info_get(entry);
1451	if (si)
1452		return swap_page_trans_huge_swapped(si, entry);
1453	return false;
1454}
1455
1456static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1457					 int *total_swapcount)
1458{
1459	int i, map_swapcount, _total_mapcount, _total_swapcount;
1460	unsigned long offset = 0;
1461	struct swap_info_struct *si;
1462	struct swap_cluster_info *ci = NULL;
1463	unsigned char *map = NULL;
1464	int mapcount, swapcount = 0;
1465
1466	/* hugetlbfs shouldn't call it */
1467	VM_BUG_ON_PAGE(PageHuge(page), page);
1468
1469	if (likely(!PageTransCompound(page))) {
1470		mapcount = atomic_read(&page->_mapcount) + 1;
1471		if (total_mapcount)
1472			*total_mapcount = mapcount;
1473		if (PageSwapCache(page))
1474			swapcount = page_swapcount(page);
1475		if (total_swapcount)
1476			*total_swapcount = swapcount;
1477		return mapcount + swapcount;
1478	}
1479
1480	page = compound_head(page);
1481
1482	_total_mapcount = _total_swapcount = map_swapcount = 0;
1483	if (PageSwapCache(page)) {
1484		swp_entry_t entry;
1485
1486		entry.val = page_private(page);
1487		si = _swap_info_get(entry);
1488		if (si) {
1489			map = si->swap_map;
1490			offset = swp_offset(entry);
1491		}
1492	}
1493	if (map)
1494		ci = lock_cluster(si, offset);
1495	for (i = 0; i < HPAGE_PMD_NR; i++) {
1496		mapcount = atomic_read(&page[i]._mapcount) + 1;
1497		_total_mapcount += mapcount;
1498		if (map) {
1499			swapcount = swap_count(map[offset + i]);
1500			_total_swapcount += swapcount;
1501		}
1502		map_swapcount = max(map_swapcount, mapcount + swapcount);
1503	}
1504	unlock_cluster(ci);
1505	if (PageDoubleMap(page)) {
1506		map_swapcount -= 1;
1507		_total_mapcount -= HPAGE_PMD_NR;
1508	}
1509	mapcount = compound_mapcount(page);
1510	map_swapcount += mapcount;
1511	_total_mapcount += mapcount;
1512	if (total_mapcount)
1513		*total_mapcount = _total_mapcount;
1514	if (total_swapcount)
1515		*total_swapcount = _total_swapcount;
1516
1517	return map_swapcount;
1518}
1519#else
1520#define swap_page_trans_huge_swapped(si, entry)	swap_swapcount(si, entry)
1521#define page_swapped(page)			(page_swapcount(page) != 0)
1522
1523static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1524					 int *total_swapcount)
1525{
1526	int mapcount, swapcount = 0;
1527
1528	/* hugetlbfs shouldn't call it */
1529	VM_BUG_ON_PAGE(PageHuge(page), page);
1530
1531	mapcount = page_trans_huge_mapcount(page, total_mapcount);
1532	if (PageSwapCache(page))
1533		swapcount = page_swapcount(page);
1534	if (total_swapcount)
1535		*total_swapcount = swapcount;
1536	return mapcount + swapcount;
1537}
1538#endif
1539
1540/*
1541 * We can write to an anon page without COW if there are no other references
1542 * to it.  And as a side-effect, free up its swap: because the old content
1543 * on disk will never be read, and seeking back there to write new content
1544 * later would only waste time away from clustering.
1545 *
1546 * NOTE: total_map_swapcount should not be relied upon by the caller if
1547 * reuse_swap_page() returns false, but it may be always overwritten
1548 * (see the other implementation for CONFIG_SWAP=n).
1549 */
1550bool reuse_swap_page(struct page *page, int *total_map_swapcount)
1551{
1552	int count, total_mapcount, total_swapcount;
1553
1554	VM_BUG_ON_PAGE(!PageLocked(page), page);
1555	if (unlikely(PageKsm(page)))
1556		return false;
1557	count = page_trans_huge_map_swapcount(page, &total_mapcount,
1558					      &total_swapcount);
1559	if (total_map_swapcount)
1560		*total_map_swapcount = total_mapcount + total_swapcount;
1561	if (count == 1 && PageSwapCache(page) &&
1562	    (likely(!PageTransCompound(page)) ||
1563	     /* The remaining swap count will be freed soon */
1564	     total_swapcount == page_swapcount(page))) {
1565		if (!PageWriteback(page)) {
1566			page = compound_head(page);
1567			delete_from_swap_cache(page);
1568			SetPageDirty(page);
1569		} else {
1570			swp_entry_t entry;
1571			struct swap_info_struct *p;
1572
1573			entry.val = page_private(page);
1574			p = swap_info_get(entry);
1575			if (p->flags & SWP_STABLE_WRITES) {
1576				spin_unlock(&p->lock);
1577				return false;
1578			}
1579			spin_unlock(&p->lock);
1580		}
1581	}
1582
1583	return count <= 1;
1584}
1585
1586/*
1587 * If swap is getting full, or if there are no more mappings of this page,
1588 * then try_to_free_swap is called to free its swap space.
1589 */
1590int try_to_free_swap(struct page *page)
1591{
1592	VM_BUG_ON_PAGE(!PageLocked(page), page);
1593
1594	if (!PageSwapCache(page))
1595		return 0;
1596	if (PageWriteback(page))
1597		return 0;
1598	if (page_swapped(page))
1599		return 0;
1600
1601	/*
1602	 * Once hibernation has begun to create its image of memory,
1603	 * there's a danger that one of the calls to try_to_free_swap()
1604	 * - most probably a call from __try_to_reclaim_swap() while
1605	 * hibernation is allocating its own swap pages for the image,
1606	 * but conceivably even a call from memory reclaim - will free
1607	 * the swap from a page which has already been recorded in the
1608	 * image as a clean swapcache page, and then reuse its swap for
1609	 * another page of the image.  On waking from hibernation, the
1610	 * original page might be freed under memory pressure, then
1611	 * later read back in from swap, now with the wrong data.
1612	 *
1613	 * Hibernation suspends storage while it is writing the image
1614	 * to disk so check that here.
1615	 */
1616	if (pm_suspended_storage())
1617		return 0;
1618
1619	page = compound_head(page);
1620	delete_from_swap_cache(page);
1621	SetPageDirty(page);
1622	return 1;
1623}
1624
1625/*
1626 * Free the swap entry like above, but also try to
1627 * free the page cache entry if it is the last user.
1628 */
1629int free_swap_and_cache(swp_entry_t entry)
1630{
1631	struct swap_info_struct *p;
1632	struct page *page = NULL;
1633	unsigned char count;
1634
1635	if (non_swap_entry(entry))
1636		return 1;
1637
1638	p = _swap_info_get(entry);
1639	if (p) {
1640		count = __swap_entry_free(p, entry, 1);
1641		if (count == SWAP_HAS_CACHE &&
1642		    !swap_page_trans_huge_swapped(p, entry)) {
1643			page = find_get_page(swap_address_space(entry),
1644					     swp_offset(entry));
1645			if (page && !trylock_page(page)) {
1646				put_page(page);
1647				page = NULL;
1648			}
1649		} else if (!count)
1650			free_swap_slot(entry);
1651	}
1652	if (page) {
1653		/*
1654		 * Not mapped elsewhere, or swap space full? Free it!
1655		 * Also recheck PageSwapCache now page is locked (above).
1656		 */
1657		if (PageSwapCache(page) && !PageWriteback(page) &&
1658		    (!page_mapped(page) || mem_cgroup_swap_full(page)) &&
1659		    !swap_page_trans_huge_swapped(p, entry)) {
1660			page = compound_head(page);
1661			delete_from_swap_cache(page);
1662			SetPageDirty(page);
1663		}
1664		unlock_page(page);
1665		put_page(page);
1666	}
1667	return p != NULL;
1668}
1669
1670#ifdef CONFIG_HIBERNATION
1671/*
1672 * Find the swap type that corresponds to given device (if any).
1673 *
1674 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1675 * from 0, in which the swap header is expected to be located.
1676 *
1677 * This is needed for the suspend to disk (aka swsusp).
1678 */
1679int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
1680{
1681	struct block_device *bdev = NULL;
1682	int type;
1683
1684	if (device)
1685		bdev = bdget(device);
1686
1687	spin_lock(&swap_lock);
1688	for (type = 0; type < nr_swapfiles; type++) {
1689		struct swap_info_struct *sis = swap_info[type];
1690
1691		if (!(sis->flags & SWP_WRITEOK))
1692			continue;
1693
1694		if (!bdev) {
1695			if (bdev_p)
1696				*bdev_p = bdgrab(sis->bdev);
1697
1698			spin_unlock(&swap_lock);
1699			return type;
1700		}
1701		if (bdev == sis->bdev) {
1702			struct swap_extent *se = &sis->first_swap_extent;
1703
1704			if (se->start_block == offset) {
1705				if (bdev_p)
1706					*bdev_p = bdgrab(sis->bdev);
1707
1708				spin_unlock(&swap_lock);
1709				bdput(bdev);
1710				return type;
1711			}
1712		}
1713	}
1714	spin_unlock(&swap_lock);
1715	if (bdev)
1716		bdput(bdev);
1717
1718	return -ENODEV;
1719}
1720
1721/*
1722 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1723 * corresponding to given index in swap_info (swap type).
1724 */
1725sector_t swapdev_block(int type, pgoff_t offset)
1726{
1727	struct block_device *bdev;
1728
1729	if ((unsigned int)type >= nr_swapfiles)
1730		return 0;
1731	if (!(swap_info[type]->flags & SWP_WRITEOK))
1732		return 0;
1733	return map_swap_entry(swp_entry(type, offset), &bdev);
1734}
1735
1736/*
1737 * Return either the total number of swap pages of given type, or the number
1738 * of free pages of that type (depending on @free)
1739 *
1740 * This is needed for software suspend
1741 */
1742unsigned int count_swap_pages(int type, int free)
1743{
1744	unsigned int n = 0;
1745
1746	spin_lock(&swap_lock);
1747	if ((unsigned int)type < nr_swapfiles) {
1748		struct swap_info_struct *sis = swap_info[type];
1749
1750		spin_lock(&sis->lock);
1751		if (sis->flags & SWP_WRITEOK) {
1752			n = sis->pages;
1753			if (free)
1754				n -= sis->inuse_pages;
1755		}
1756		spin_unlock(&sis->lock);
1757	}
1758	spin_unlock(&swap_lock);
1759	return n;
1760}
1761#endif /* CONFIG_HIBERNATION */
1762
1763static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1764{
1765	return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
1766}
1767
1768/*
1769 * No need to decide whether this PTE shares the swap entry with others,
1770 * just let do_wp_page work it out if a write is requested later - to
1771 * force COW, vm_page_prot omits write permission from any private vma.
1772 */
1773static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1774		unsigned long addr, swp_entry_t entry, struct page *page)
1775{
1776	struct page *swapcache;
1777	struct mem_cgroup *memcg;
1778	spinlock_t *ptl;
1779	pte_t *pte;
1780	int ret = 1;
1781
1782	swapcache = page;
1783	page = ksm_might_need_to_copy(page, vma, addr);
1784	if (unlikely(!page))
1785		return -ENOMEM;
1786
1787	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
1788				&memcg, false)) {
1789		ret = -ENOMEM;
1790		goto out_nolock;
1791	}
1792
1793	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1794	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1795		mem_cgroup_cancel_charge(page, memcg, false);
1796		ret = 0;
1797		goto out;
1798	}
1799
1800	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1801	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1802	get_page(page);
1803	set_pte_at(vma->vm_mm, addr, pte,
1804		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
1805	if (page == swapcache) {
1806		page_add_anon_rmap(page, vma, addr, false);
1807		mem_cgroup_commit_charge(page, memcg, true, false);
1808	} else { /* ksm created a completely new copy */
1809		page_add_new_anon_rmap(page, vma, addr, false);
1810		mem_cgroup_commit_charge(page, memcg, false, false);
1811		lru_cache_add_active_or_unevictable(page, vma);
1812	}
1813	swap_free(entry);
1814	/*
1815	 * Move the page to the active list so it is not
1816	 * immediately swapped out again after swapon.
1817	 */
1818	activate_page(page);
1819out:
1820	pte_unmap_unlock(pte, ptl);
1821out_nolock:
1822	if (page != swapcache) {
1823		unlock_page(page);
1824		put_page(page);
1825	}
1826	return ret;
1827}
1828
1829static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1830				unsigned long addr, unsigned long end,
1831				swp_entry_t entry, struct page *page)
1832{
1833	pte_t swp_pte = swp_entry_to_pte(entry);
1834	pte_t *pte;
1835	int ret = 0;
1836
1837	/*
1838	 * We don't actually need pte lock while scanning for swp_pte: since
1839	 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
1840	 * page table while we're scanning; though it could get zapped, and on
1841	 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
1842	 * of unmatched parts which look like swp_pte, so unuse_pte must
1843	 * recheck under pte lock.  Scanning without pte lock lets it be
1844	 * preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
1845	 */
1846	pte = pte_offset_map(pmd, addr);
1847	do {
1848		/*
1849		 * swapoff spends a _lot_ of time in this loop!
1850		 * Test inline before going to call unuse_pte.
1851		 */
1852		if (unlikely(pte_same_as_swp(*pte, swp_pte))) {
1853			pte_unmap(pte);
1854			ret = unuse_pte(vma, pmd, addr, entry, page);
1855			if (ret)
1856				goto out;
1857			pte = pte_offset_map(pmd, addr);
1858		}
1859	} while (pte++, addr += PAGE_SIZE, addr != end);
1860	pte_unmap(pte - 1);
1861out:
1862	return ret;
1863}
1864
1865static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1866				unsigned long addr, unsigned long end,
1867				swp_entry_t entry, struct page *page)
1868{
1869	pmd_t *pmd;
1870	unsigned long next;
1871	int ret;
1872
1873	pmd = pmd_offset(pud, addr);
1874	do {
1875		cond_resched();
1876		next = pmd_addr_end(addr, end);
1877		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1878			continue;
1879		ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
1880		if (ret)
1881			return ret;
1882	} while (pmd++, addr = next, addr != end);
1883	return 0;
1884}
1885
1886static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
1887				unsigned long addr, unsigned long end,
1888				swp_entry_t entry, struct page *page)
1889{
1890	pud_t *pud;
1891	unsigned long next;
1892	int ret;
1893
1894	pud = pud_offset(p4d, addr);
1895	do {
1896		next = pud_addr_end(addr, end);
1897		if (pud_none_or_clear_bad(pud))
1898			continue;
1899		ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
1900		if (ret)
1901			return ret;
1902	} while (pud++, addr = next, addr != end);
1903	return 0;
1904}
1905
1906static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
1907				unsigned long addr, unsigned long end,
1908				swp_entry_t entry, struct page *page)
1909{
1910	p4d_t *p4d;
1911	unsigned long next;
1912	int ret;
1913
1914	p4d = p4d_offset(pgd, addr);
1915	do {
1916		next = p4d_addr_end(addr, end);
1917		if (p4d_none_or_clear_bad(p4d))
1918			continue;
1919		ret = unuse_pud_range(vma, p4d, addr, next, entry, page);
1920		if (ret)
1921			return ret;
1922	} while (p4d++, addr = next, addr != end);
1923	return 0;
1924}
1925
1926static int unuse_vma(struct vm_area_struct *vma,
1927				swp_entry_t entry, struct page *page)
1928{
1929	pgd_t *pgd;
1930	unsigned long addr, end, next;
1931	int ret;
1932
1933	if (page_anon_vma(page)) {
1934		addr = page_address_in_vma(page, vma);
1935		if (addr == -EFAULT)
1936			return 0;
1937		else
1938			end = addr + PAGE_SIZE;
1939	} else {
1940		addr = vma->vm_start;
1941		end = vma->vm_end;
1942	}
1943
1944	pgd = pgd_offset(vma->vm_mm, addr);
1945	do {
1946		next = pgd_addr_end(addr, end);
1947		if (pgd_none_or_clear_bad(pgd))
1948			continue;
1949		ret = unuse_p4d_range(vma, pgd, addr, next, entry, page);
1950		if (ret)
1951			return ret;
1952	} while (pgd++, addr = next, addr != end);
1953	return 0;
1954}
1955
1956static int unuse_mm(struct mm_struct *mm,
1957				swp_entry_t entry, struct page *page)
1958{
1959	struct vm_area_struct *vma;
1960	int ret = 0;
1961
1962	if (!down_read_trylock(&mm->mmap_sem)) {
1963		/*
1964		 * Activate page so shrink_inactive_list is unlikely to unmap
1965		 * its ptes while lock is dropped, so swapoff can make progress.
1966		 */
1967		activate_page(page);
1968		unlock_page(page);
1969		down_read(&mm->mmap_sem);
1970		lock_page(page);
1971	}
1972	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1973		if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
1974			break;
1975		cond_resched();
1976	}
1977	up_read(&mm->mmap_sem);
1978	return (ret < 0)? ret: 0;
1979}
1980
1981/*
1982 * Scan swap_map (or frontswap_map if frontswap parameter is true)
1983 * from current position to next entry still in use.
1984 * Recycle to start on reaching the end, returning 0 when empty.
1985 */
1986static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1987					unsigned int prev, bool frontswap)
1988{
1989	unsigned int max = si->max;
1990	unsigned int i = prev;
1991	unsigned char count;
1992
1993	/*
1994	 * No need for swap_lock here: we're just looking
1995	 * for whether an entry is in use, not modifying it; false
1996	 * hits are okay, and sys_swapoff() has already prevented new
1997	 * allocations from this area (while holding swap_lock).
1998	 */
1999	for (;;) {
2000		if (++i >= max) {
2001			if (!prev) {
2002				i = 0;
2003				break;
2004			}
2005			/*
2006			 * No entries in use at top of swap_map,
2007			 * loop back to start and recheck there.
2008			 */
2009			max = prev + 1;
2010			prev = 0;
2011			i = 1;
2012		}
 
 
 
 
 
 
2013		count = READ_ONCE(si->swap_map[i]);
2014		if (count && swap_count(count) != SWAP_MAP_BAD)
2015			if (!frontswap || frontswap_test(si, i))
2016				break;
2017		if ((i % LATENCY_LIMIT) == 0)
2018			cond_resched();
2019	}
2020	return i;
2021}
2022
2023/*
2024 * We completely avoid races by reading each swap page in advance,
2025 * and then search for the process using it.  All the necessary
2026 * page table adjustments can then be made atomically.
2027 *
2028 * if the boolean frontswap is true, only unuse pages_to_unuse pages;
2029 * pages_to_unuse==0 means all pages; ignored if frontswap is false
2030 */
2031int try_to_unuse(unsigned int type, bool frontswap,
2032		 unsigned long pages_to_unuse)
2033{
2034	struct swap_info_struct *si = swap_info[type];
2035	struct mm_struct *start_mm;
2036	volatile unsigned char *swap_map; /* swap_map is accessed without
2037					   * locking. Mark it as volatile
2038					   * to prevent compiler doing
2039					   * something odd.
2040					   */
2041	unsigned char swcount;
2042	struct page *page;
2043	swp_entry_t entry;
2044	unsigned int i = 0;
2045	int retval = 0;
2046
2047	/*
2048	 * When searching mms for an entry, a good strategy is to
2049	 * start at the first mm we freed the previous entry from
2050	 * (though actually we don't notice whether we or coincidence
2051	 * freed the entry).  Initialize this start_mm with a hold.
2052	 *
2053	 * A simpler strategy would be to start at the last mm we
2054	 * freed the previous entry from; but that would take less
2055	 * advantage of mmlist ordering, which clusters forked mms
2056	 * together, child after parent.  If we race with dup_mmap(), we
2057	 * prefer to resolve parent before child, lest we miss entries
2058	 * duplicated after we scanned child: using last mm would invert
2059	 * that.
2060	 */
2061	start_mm = &init_mm;
2062	mmget(&init_mm);
2063
2064	/*
2065	 * Keep on scanning until all entries have gone.  Usually,
2066	 * one pass through swap_map is enough, but not necessarily:
2067	 * there are races when an instance of an entry might be missed.
2068	 */
2069	while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
2070		if (signal_pending(current)) {
2071			retval = -EINTR;
2072			break;
2073		}
2074
2075		/*
2076		 * Get a page for the entry, using the existing swap
2077		 * cache page if there is one.  Otherwise, get a clean
2078		 * page and read the swap into it.
2079		 */
2080		swap_map = &si->swap_map[i];
2081		entry = swp_entry(type, i);
2082		page = read_swap_cache_async(entry,
2083					GFP_HIGHUSER_MOVABLE, NULL, 0, false);
2084		if (!page) {
2085			/*
2086			 * Either swap_duplicate() failed because entry
2087			 * has been freed independently, and will not be
2088			 * reused since sys_swapoff() already disabled
2089			 * allocation from here, or alloc_page() failed.
2090			 */
2091			swcount = *swap_map;
2092			/*
2093			 * We don't hold lock here, so the swap entry could be
2094			 * SWAP_MAP_BAD (when the cluster is discarding).
2095			 * Instead of fail out, We can just skip the swap
2096			 * entry because swapoff will wait for discarding
2097			 * finish anyway.
2098			 */
2099			if (!swcount || swcount == SWAP_MAP_BAD)
2100				continue;
2101			retval = -ENOMEM;
2102			break;
2103		}
2104
2105		/*
2106		 * Don't hold on to start_mm if it looks like exiting.
2107		 */
2108		if (atomic_read(&start_mm->mm_users) == 1) {
2109			mmput(start_mm);
2110			start_mm = &init_mm;
2111			mmget(&init_mm);
2112		}
2113
2114		/*
2115		 * Wait for and lock page.  When do_swap_page races with
2116		 * try_to_unuse, do_swap_page can handle the fault much
2117		 * faster than try_to_unuse can locate the entry.  This
2118		 * apparently redundant "wait_on_page_locked" lets try_to_unuse
2119		 * defer to do_swap_page in such a case - in some tests,
2120		 * do_swap_page and try_to_unuse repeatedly compete.
2121		 */
2122		wait_on_page_locked(page);
2123		wait_on_page_writeback(page);
2124		lock_page(page);
2125		wait_on_page_writeback(page);
2126
2127		/*
2128		 * Remove all references to entry.
2129		 */
2130		swcount = *swap_map;
2131		if (swap_count(swcount) == SWAP_MAP_SHMEM) {
2132			retval = shmem_unuse(entry, page);
2133			/* page has already been unlocked and released */
2134			if (retval < 0)
2135				break;
2136			continue;
2137		}
2138		if (swap_count(swcount) && start_mm != &init_mm)
2139			retval = unuse_mm(start_mm, entry, page);
2140
2141		if (swap_count(*swap_map)) {
2142			int set_start_mm = (*swap_map >= swcount);
2143			struct list_head *p = &start_mm->mmlist;
2144			struct mm_struct *new_start_mm = start_mm;
2145			struct mm_struct *prev_mm = start_mm;
2146			struct mm_struct *mm;
2147
2148			mmget(new_start_mm);
2149			mmget(prev_mm);
2150			spin_lock(&mmlist_lock);
2151			while (swap_count(*swap_map) && !retval &&
2152					(p = p->next) != &start_mm->mmlist) {
2153				mm = list_entry(p, struct mm_struct, mmlist);
2154				if (!mmget_not_zero(mm))
2155					continue;
2156				spin_unlock(&mmlist_lock);
2157				mmput(prev_mm);
2158				prev_mm = mm;
2159
2160				cond_resched();
2161
2162				swcount = *swap_map;
2163				if (!swap_count(swcount)) /* any usage ? */
2164					;
2165				else if (mm == &init_mm)
2166					set_start_mm = 1;
2167				else
2168					retval = unuse_mm(mm, entry, page);
2169
2170				if (set_start_mm && *swap_map < swcount) {
2171					mmput(new_start_mm);
2172					mmget(mm);
2173					new_start_mm = mm;
2174					set_start_mm = 0;
2175				}
2176				spin_lock(&mmlist_lock);
2177			}
2178			spin_unlock(&mmlist_lock);
2179			mmput(prev_mm);
2180			mmput(start_mm);
2181			start_mm = new_start_mm;
2182		}
2183		if (retval) {
2184			unlock_page(page);
2185			put_page(page);
2186			break;
2187		}
2188
2189		/*
2190		 * If a reference remains (rare), we would like to leave
2191		 * the page in the swap cache; but try_to_unmap could
2192		 * then re-duplicate the entry once we drop page lock,
2193		 * so we might loop indefinitely; also, that page could
2194		 * not be swapped out to other storage meanwhile.  So:
2195		 * delete from cache even if there's another reference,
2196		 * after ensuring that the data has been saved to disk -
2197		 * since if the reference remains (rarer), it will be
2198		 * read from disk into another page.  Splitting into two
2199		 * pages would be incorrect if swap supported "shared
2200		 * private" pages, but they are handled by tmpfs files.
2201		 *
2202		 * Given how unuse_vma() targets one particular offset
2203		 * in an anon_vma, once the anon_vma has been determined,
2204		 * this splitting happens to be just what is needed to
2205		 * handle where KSM pages have been swapped out: re-reading
2206		 * is unnecessarily slow, but we can fix that later on.
2207		 */
2208		if (swap_count(*swap_map) &&
2209		     PageDirty(page) && PageSwapCache(page)) {
2210			struct writeback_control wbc = {
2211				.sync_mode = WB_SYNC_NONE,
2212			};
2213
2214			swap_writepage(compound_head(page), &wbc);
2215			lock_page(page);
2216			wait_on_page_writeback(page);
2217		}
2218
2219		/*
2220		 * It is conceivable that a racing task removed this page from
2221		 * swap cache just before we acquired the page lock at the top,
2222		 * or while we dropped it in unuse_mm().  The page might even
2223		 * be back in swap cache on another swap area: that we must not
2224		 * delete, since it may not have been written out to swap yet.
2225		 */
2226		if (PageSwapCache(page) &&
2227		    likely(page_private(page) == entry.val) &&
2228		    !page_swapped(page))
2229			delete_from_swap_cache(compound_head(page));
2230
2231		/*
2232		 * So we could skip searching mms once swap count went
2233		 * to 1, we did not mark any present ptes as dirty: must
2234		 * mark page dirty so shrink_page_list will preserve it.
2235		 */
2236		SetPageDirty(page);
2237		unlock_page(page);
2238		put_page(page);
2239
2240		/*
2241		 * Make sure that we aren't completely killing
2242		 * interactive performance.
2243		 */
2244		cond_resched();
2245		if (frontswap && pages_to_unuse > 0) {
2246			if (!--pages_to_unuse)
2247				break;
2248		}
2249	}
2250
2251	mmput(start_mm);
2252	return retval;
2253}
2254
2255/*
2256 * After a successful try_to_unuse, if no swap is now in use, we know
2257 * we can empty the mmlist.  swap_lock must be held on entry and exit.
2258 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2259 * added to the mmlist just after page_duplicate - before would be racy.
2260 */
2261static void drain_mmlist(void)
2262{
2263	struct list_head *p, *next;
2264	unsigned int type;
2265
2266	for (type = 0; type < nr_swapfiles; type++)
2267		if (swap_info[type]->inuse_pages)
2268			return;
2269	spin_lock(&mmlist_lock);
2270	list_for_each_safe(p, next, &init_mm.mmlist)
2271		list_del_init(p);
2272	spin_unlock(&mmlist_lock);
2273}
2274
2275/*
2276 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
2277 * corresponds to page offset for the specified swap entry.
2278 * Note that the type of this function is sector_t, but it returns page offset
2279 * into the bdev, not sector offset.
2280 */
2281static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
2282{
2283	struct swap_info_struct *sis;
2284	struct swap_extent *start_se;
2285	struct swap_extent *se;
2286	pgoff_t offset;
2287
2288	sis = swap_info[swp_type(entry)];
2289	*bdev = sis->bdev;
2290
2291	offset = swp_offset(entry);
2292	start_se = sis->curr_swap_extent;
2293	se = start_se;
2294
2295	for ( ; ; ) {
2296		if (se->start_page <= offset &&
2297				offset < (se->start_page + se->nr_pages)) {
2298			return se->start_block + (offset - se->start_page);
2299		}
2300		se = list_next_entry(se, list);
2301		sis->curr_swap_extent = se;
2302		BUG_ON(se == start_se);		/* It *must* be present */
2303	}
2304}
2305
2306/*
2307 * Returns the page offset into bdev for the specified page's swap entry.
2308 */
2309sector_t map_swap_page(struct page *page, struct block_device **bdev)
2310{
2311	swp_entry_t entry;
2312	entry.val = page_private(page);
2313	return map_swap_entry(entry, bdev);
2314}
2315
2316/*
2317 * Free all of a swapdev's extent information
2318 */
2319static void destroy_swap_extents(struct swap_info_struct *sis)
2320{
2321	while (!list_empty(&sis->first_swap_extent.list)) {
2322		struct swap_extent *se;
2323
2324		se = list_first_entry(&sis->first_swap_extent.list,
2325				struct swap_extent, list);
2326		list_del(&se->list);
2327		kfree(se);
2328	}
2329
2330	if (sis->flags & SWP_FILE) {
2331		struct file *swap_file = sis->swap_file;
2332		struct address_space *mapping = swap_file->f_mapping;
2333
2334		sis->flags &= ~SWP_FILE;
2335		mapping->a_ops->swap_deactivate(swap_file);
2336	}
2337}
2338
2339/*
2340 * Add a block range (and the corresponding page range) into this swapdev's
2341 * extent list.  The extent list is kept sorted in page order.
2342 *
2343 * This function rather assumes that it is called in ascending page order.
2344 */
2345int
2346add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2347		unsigned long nr_pages, sector_t start_block)
2348{
2349	struct swap_extent *se;
2350	struct swap_extent *new_se;
2351	struct list_head *lh;
2352
2353	if (start_page == 0) {
2354		se = &sis->first_swap_extent;
2355		sis->curr_swap_extent = se;
2356		se->start_page = 0;
2357		se->nr_pages = nr_pages;
2358		se->start_block = start_block;
2359		return 1;
2360	} else {
2361		lh = sis->first_swap_extent.list.prev;	/* Highest extent */
2362		se = list_entry(lh, struct swap_extent, list);
2363		BUG_ON(se->start_page + se->nr_pages != start_page);
2364		if (se->start_block + se->nr_pages == start_block) {
2365			/* Merge it */
2366			se->nr_pages += nr_pages;
2367			return 0;
2368		}
2369	}
2370
2371	/*
2372	 * No merge.  Insert a new extent, preserving ordering.
2373	 */
2374	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2375	if (new_se == NULL)
2376		return -ENOMEM;
2377	new_se->start_page = start_page;
2378	new_se->nr_pages = nr_pages;
2379	new_se->start_block = start_block;
2380
2381	list_add_tail(&new_se->list, &sis->first_swap_extent.list);
2382	return 1;
2383}
2384
2385/*
2386 * A `swap extent' is a simple thing which maps a contiguous range of pages
2387 * onto a contiguous range of disk blocks.  An ordered list of swap extents
2388 * is built at swapon time and is then used at swap_writepage/swap_readpage
2389 * time for locating where on disk a page belongs.
2390 *
2391 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2392 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2393 * swap files identically.
2394 *
2395 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2396 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2397 * swapfiles are handled *identically* after swapon time.
2398 *
2399 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2400 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
2401 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
2402 * requirements, they are simply tossed out - we will never use those blocks
2403 * for swapping.
2404 *
2405 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This
2406 * prevents root from shooting her foot off by ftruncating an in-use swapfile,
2407 * which will scribble on the fs.
2408 *
2409 * The amount of disk space which a single swap extent represents varies.
2410 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2411 * extents in the list.  To avoid much list walking, we cache the previous
2412 * search location in `curr_swap_extent', and start new searches from there.
2413 * This is extremely effective.  The average number of iterations in
2414 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
2415 */
2416static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2417{
2418	struct file *swap_file = sis->swap_file;
2419	struct address_space *mapping = swap_file->f_mapping;
2420	struct inode *inode = mapping->host;
2421	int ret;
2422
2423	if (S_ISBLK(inode->i_mode)) {
2424		ret = add_swap_extent(sis, 0, sis->max, 0);
2425		*span = sis->pages;
2426		return ret;
2427	}
2428
2429	if (mapping->a_ops->swap_activate) {
2430		ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2431		if (!ret) {
2432			sis->flags |= SWP_FILE;
2433			ret = add_swap_extent(sis, 0, sis->max, 0);
2434			*span = sis->pages;
2435		}
2436		return ret;
2437	}
2438
2439	return generic_swapfile_activate(sis, swap_file, span);
2440}
2441
2442static int swap_node(struct swap_info_struct *p)
2443{
2444	struct block_device *bdev;
2445
2446	if (p->bdev)
2447		bdev = p->bdev;
2448	else
2449		bdev = p->swap_file->f_inode->i_sb->s_bdev;
2450
2451	return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2452}
2453
2454static void _enable_swap_info(struct swap_info_struct *p, int prio,
2455				unsigned char *swap_map,
2456				struct swap_cluster_info *cluster_info)
2457{
2458	int i;
2459
2460	if (prio >= 0)
2461		p->prio = prio;
2462	else
2463		p->prio = --least_priority;
2464	/*
2465	 * the plist prio is negated because plist ordering is
2466	 * low-to-high, while swap ordering is high-to-low
2467	 */
2468	p->list.prio = -p->prio;
2469	for_each_node(i) {
2470		if (p->prio >= 0)
2471			p->avail_lists[i].prio = -p->prio;
2472		else {
2473			if (swap_node(p) == i)
2474				p->avail_lists[i].prio = 1;
2475			else
2476				p->avail_lists[i].prio = -p->prio;
2477		}
2478	}
2479	p->swap_map = swap_map;
2480	p->cluster_info = cluster_info;
2481	p->flags |= SWP_WRITEOK;
2482	atomic_long_add(p->pages, &nr_swap_pages);
2483	total_swap_pages += p->pages;
2484
2485	assert_spin_locked(&swap_lock);
2486	/*
2487	 * both lists are plists, and thus priority ordered.
2488	 * swap_active_head needs to be priority ordered for swapoff(),
2489	 * which on removal of any swap_info_struct with an auto-assigned
2490	 * (i.e. negative) priority increments the auto-assigned priority
2491	 * of any lower-priority swap_info_structs.
2492	 * swap_avail_head needs to be priority ordered for get_swap_page(),
2493	 * which allocates swap pages from the highest available priority
2494	 * swap_info_struct.
2495	 */
2496	plist_add(&p->list, &swap_active_head);
2497	add_to_avail_list(p);
 
 
2498}
2499
2500static void enable_swap_info(struct swap_info_struct *p, int prio,
2501				unsigned char *swap_map,
2502				struct swap_cluster_info *cluster_info,
2503				unsigned long *frontswap_map)
2504{
2505	frontswap_init(p->type, frontswap_map);
2506	spin_lock(&swap_lock);
2507	spin_lock(&p->lock);
2508	 _enable_swap_info(p, prio, swap_map, cluster_info);
2509	spin_unlock(&p->lock);
2510	spin_unlock(&swap_lock);
2511}
2512
2513static void reinsert_swap_info(struct swap_info_struct *p)
2514{
2515	spin_lock(&swap_lock);
2516	spin_lock(&p->lock);
2517	_enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2518	spin_unlock(&p->lock);
2519	spin_unlock(&swap_lock);
2520}
2521
2522bool has_usable_swap(void)
2523{
2524	bool ret = true;
2525
2526	spin_lock(&swap_lock);
2527	if (plist_head_empty(&swap_active_head))
2528		ret = false;
2529	spin_unlock(&swap_lock);
2530	return ret;
2531}
2532
2533SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2534{
2535	struct swap_info_struct *p = NULL;
2536	unsigned char *swap_map;
2537	struct swap_cluster_info *cluster_info;
2538	unsigned long *frontswap_map;
2539	struct file *swap_file, *victim;
2540	struct address_space *mapping;
2541	struct inode *inode;
2542	struct filename *pathname;
2543	int err, found = 0;
2544	unsigned int old_block_size;
2545
2546	if (!capable(CAP_SYS_ADMIN))
2547		return -EPERM;
2548
2549	BUG_ON(!current->mm);
2550
2551	pathname = getname(specialfile);
2552	if (IS_ERR(pathname))
2553		return PTR_ERR(pathname);
2554
2555	victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2556	err = PTR_ERR(victim);
2557	if (IS_ERR(victim))
2558		goto out;
2559
2560	mapping = victim->f_mapping;
2561	spin_lock(&swap_lock);
2562	plist_for_each_entry(p, &swap_active_head, list) {
2563		if (p->flags & SWP_WRITEOK) {
2564			if (p->swap_file->f_mapping == mapping) {
2565				found = 1;
2566				break;
2567			}
2568		}
2569	}
2570	if (!found) {
2571		err = -EINVAL;
2572		spin_unlock(&swap_lock);
2573		goto out_dput;
2574	}
2575	if (!security_vm_enough_memory_mm(current->mm, p->pages))
2576		vm_unacct_memory(p->pages);
2577	else {
2578		err = -ENOMEM;
2579		spin_unlock(&swap_lock);
2580		goto out_dput;
2581	}
2582	del_from_avail_list(p);
 
 
2583	spin_lock(&p->lock);
2584	if (p->prio < 0) {
2585		struct swap_info_struct *si = p;
2586		int nid;
2587
2588		plist_for_each_entry_continue(si, &swap_active_head, list) {
2589			si->prio++;
2590			si->list.prio--;
2591			for_each_node(nid) {
2592				if (si->avail_lists[nid].prio != 1)
2593					si->avail_lists[nid].prio--;
2594			}
2595		}
2596		least_priority++;
2597	}
2598	plist_del(&p->list, &swap_active_head);
2599	atomic_long_sub(p->pages, &nr_swap_pages);
2600	total_swap_pages -= p->pages;
2601	p->flags &= ~SWP_WRITEOK;
2602	spin_unlock(&p->lock);
2603	spin_unlock(&swap_lock);
2604
2605	disable_swap_slots_cache_lock();
2606
2607	set_current_oom_origin();
2608	err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
2609	clear_current_oom_origin();
2610
2611	if (err) {
2612		/* re-insert swap space back into swap_list */
2613		reinsert_swap_info(p);
2614		reenable_swap_slots_cache_unlock();
2615		goto out_dput;
2616	}
2617
2618	reenable_swap_slots_cache_unlock();
2619
2620	flush_work(&p->discard_work);
2621
2622	destroy_swap_extents(p);
2623	if (p->flags & SWP_CONTINUED)
2624		free_swap_count_continuations(p);
2625
2626	if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
2627		atomic_dec(&nr_rotate_swap);
2628
2629	mutex_lock(&swapon_mutex);
2630	spin_lock(&swap_lock);
2631	spin_lock(&p->lock);
2632	drain_mmlist();
2633
2634	/* wait for anyone still in scan_swap_map */
2635	p->highest_bit = 0;		/* cuts scans short */
2636	while (p->flags >= SWP_SCANNING) {
2637		spin_unlock(&p->lock);
2638		spin_unlock(&swap_lock);
2639		schedule_timeout_uninterruptible(1);
2640		spin_lock(&swap_lock);
2641		spin_lock(&p->lock);
2642	}
2643
2644	swap_file = p->swap_file;
2645	old_block_size = p->old_block_size;
2646	p->swap_file = NULL;
2647	p->max = 0;
2648	swap_map = p->swap_map;
2649	p->swap_map = NULL;
2650	cluster_info = p->cluster_info;
2651	p->cluster_info = NULL;
2652	frontswap_map = frontswap_map_get(p);
2653	spin_unlock(&p->lock);
2654	spin_unlock(&swap_lock);
2655	frontswap_invalidate_area(p->type);
2656	frontswap_map_set(p, NULL);
2657	mutex_unlock(&swapon_mutex);
2658	free_percpu(p->percpu_cluster);
2659	p->percpu_cluster = NULL;
2660	vfree(swap_map);
2661	kvfree(cluster_info);
2662	kvfree(frontswap_map);
2663	/* Destroy swap account information */
2664	swap_cgroup_swapoff(p->type);
2665	exit_swap_address_space(p->type);
2666
2667	inode = mapping->host;
2668	if (S_ISBLK(inode->i_mode)) {
2669		struct block_device *bdev = I_BDEV(inode);
2670		set_blocksize(bdev, old_block_size);
2671		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2672	} else {
2673		inode_lock(inode);
2674		inode->i_flags &= ~S_SWAPFILE;
2675		inode_unlock(inode);
2676	}
2677	filp_close(swap_file, NULL);
2678
2679	/*
2680	 * Clear the SWP_USED flag after all resources are freed so that swapon
2681	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2682	 * not hold p->lock after we cleared its SWP_WRITEOK.
2683	 */
2684	spin_lock(&swap_lock);
2685	p->flags = 0;
2686	spin_unlock(&swap_lock);
2687
2688	err = 0;
2689	atomic_inc(&proc_poll_event);
2690	wake_up_interruptible(&proc_poll_wait);
2691
2692out_dput:
2693	filp_close(victim, NULL);
2694out:
2695	putname(pathname);
2696	return err;
2697}
2698
2699#ifdef CONFIG_PROC_FS
2700static __poll_t swaps_poll(struct file *file, poll_table *wait)
2701{
2702	struct seq_file *seq = file->private_data;
2703
2704	poll_wait(file, &proc_poll_wait, wait);
2705
2706	if (seq->poll_event != atomic_read(&proc_poll_event)) {
2707		seq->poll_event = atomic_read(&proc_poll_event);
2708		return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2709	}
2710
2711	return EPOLLIN | EPOLLRDNORM;
2712}
2713
2714/* iterator */
2715static void *swap_start(struct seq_file *swap, loff_t *pos)
2716{
2717	struct swap_info_struct *si;
2718	int type;
2719	loff_t l = *pos;
2720
2721	mutex_lock(&swapon_mutex);
2722
2723	if (!l)
2724		return SEQ_START_TOKEN;
2725
2726	for (type = 0; type < nr_swapfiles; type++) {
2727		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
2728		si = swap_info[type];
2729		if (!(si->flags & SWP_USED) || !si->swap_map)
2730			continue;
2731		if (!--l)
2732			return si;
2733	}
2734
2735	return NULL;
2736}
2737
2738static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2739{
2740	struct swap_info_struct *si = v;
2741	int type;
2742
2743	if (v == SEQ_START_TOKEN)
2744		type = 0;
2745	else
2746		type = si->type + 1;
2747
2748	for (; type < nr_swapfiles; type++) {
2749		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
2750		si = swap_info[type];
2751		if (!(si->flags & SWP_USED) || !si->swap_map)
2752			continue;
2753		++*pos;
2754		return si;
2755	}
2756
2757	return NULL;
2758}
2759
2760static void swap_stop(struct seq_file *swap, void *v)
2761{
2762	mutex_unlock(&swapon_mutex);
2763}
2764
2765static int swap_show(struct seq_file *swap, void *v)
2766{
2767	struct swap_info_struct *si = v;
2768	struct file *file;
2769	int len;
2770
2771	if (si == SEQ_START_TOKEN) {
2772		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
2773		return 0;
2774	}
2775
2776	file = si->swap_file;
2777	len = seq_file_path(swap, file, " \t\n\\");
2778	seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
2779			len < 40 ? 40 - len : 1, " ",
2780			S_ISBLK(file_inode(file)->i_mode) ?
2781				"partition" : "file\t",
2782			si->pages << (PAGE_SHIFT - 10),
2783			si->inuse_pages << (PAGE_SHIFT - 10),
2784			si->prio);
2785	return 0;
2786}
2787
2788static const struct seq_operations swaps_op = {
2789	.start =	swap_start,
2790	.next =		swap_next,
2791	.stop =		swap_stop,
2792	.show =		swap_show
2793};
2794
2795static int swaps_open(struct inode *inode, struct file *file)
2796{
2797	struct seq_file *seq;
2798	int ret;
2799
2800	ret = seq_open(file, &swaps_op);
2801	if (ret)
2802		return ret;
2803
2804	seq = file->private_data;
2805	seq->poll_event = atomic_read(&proc_poll_event);
2806	return 0;
2807}
2808
2809static const struct file_operations proc_swaps_operations = {
2810	.open		= swaps_open,
2811	.read		= seq_read,
2812	.llseek		= seq_lseek,
2813	.release	= seq_release,
2814	.poll		= swaps_poll,
2815};
2816
2817static int __init procswaps_init(void)
2818{
2819	proc_create("swaps", 0, NULL, &proc_swaps_operations);
2820	return 0;
2821}
2822__initcall(procswaps_init);
2823#endif /* CONFIG_PROC_FS */
2824
2825#ifdef MAX_SWAPFILES_CHECK
2826static int __init max_swapfiles_check(void)
2827{
2828	MAX_SWAPFILES_CHECK();
2829	return 0;
2830}
2831late_initcall(max_swapfiles_check);
2832#endif
2833
2834static struct swap_info_struct *alloc_swap_info(void)
2835{
2836	struct swap_info_struct *p;
2837	unsigned int type;
2838	int i;
2839
2840	p = kzalloc(sizeof(*p), GFP_KERNEL);
2841	if (!p)
2842		return ERR_PTR(-ENOMEM);
2843
2844	spin_lock(&swap_lock);
2845	for (type = 0; type < nr_swapfiles; type++) {
2846		if (!(swap_info[type]->flags & SWP_USED))
2847			break;
2848	}
2849	if (type >= MAX_SWAPFILES) {
2850		spin_unlock(&swap_lock);
2851		kfree(p);
2852		return ERR_PTR(-EPERM);
2853	}
2854	if (type >= nr_swapfiles) {
2855		p->type = type;
2856		swap_info[type] = p;
2857		/*
2858		 * Write swap_info[type] before nr_swapfiles, in case a
2859		 * racing procfs swap_start() or swap_next() is reading them.
2860		 * (We never shrink nr_swapfiles, we never free this entry.)
2861		 */
2862		smp_wmb();
2863		nr_swapfiles++;
2864	} else {
2865		kfree(p);
2866		p = swap_info[type];
2867		/*
2868		 * Do not memset this entry: a racing procfs swap_next()
2869		 * would be relying on p->type to remain valid.
2870		 */
2871	}
2872	INIT_LIST_HEAD(&p->first_swap_extent.list);
2873	plist_node_init(&p->list, 0);
2874	for_each_node(i)
2875		plist_node_init(&p->avail_lists[i], 0);
2876	p->flags = SWP_USED;
2877	spin_unlock(&swap_lock);
2878	spin_lock_init(&p->lock);
2879	spin_lock_init(&p->cont_lock);
2880
2881	return p;
2882}
2883
2884static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2885{
2886	int error;
2887
2888	if (S_ISBLK(inode->i_mode)) {
2889		p->bdev = bdgrab(I_BDEV(inode));
2890		error = blkdev_get(p->bdev,
2891				   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2892		if (error < 0) {
2893			p->bdev = NULL;
2894			return error;
2895		}
2896		p->old_block_size = block_size(p->bdev);
2897		error = set_blocksize(p->bdev, PAGE_SIZE);
2898		if (error < 0)
2899			return error;
2900		p->flags |= SWP_BLKDEV;
2901	} else if (S_ISREG(inode->i_mode)) {
2902		p->bdev = inode->i_sb->s_bdev;
2903		inode_lock(inode);
2904		if (IS_SWAPFILE(inode))
2905			return -EBUSY;
2906	} else
2907		return -EINVAL;
2908
2909	return 0;
2910}
2911
2912static unsigned long read_swap_header(struct swap_info_struct *p,
2913					union swap_header *swap_header,
2914					struct inode *inode)
2915{
2916	int i;
2917	unsigned long maxpages;
2918	unsigned long swapfilepages;
2919	unsigned long last_page;
2920
2921	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2922		pr_err("Unable to find swap-space signature\n");
2923		return 0;
2924	}
2925
2926	/* swap partition endianess hack... */
2927	if (swab32(swap_header->info.version) == 1) {
2928		swab32s(&swap_header->info.version);
2929		swab32s(&swap_header->info.last_page);
2930		swab32s(&swap_header->info.nr_badpages);
2931		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2932			return 0;
2933		for (i = 0; i < swap_header->info.nr_badpages; i++)
2934			swab32s(&swap_header->info.badpages[i]);
2935	}
2936	/* Check the swap header's sub-version */
2937	if (swap_header->info.version != 1) {
2938		pr_warn("Unable to handle swap header version %d\n",
2939			swap_header->info.version);
2940		return 0;
2941	}
2942
2943	p->lowest_bit  = 1;
2944	p->cluster_next = 1;
2945	p->cluster_nr = 0;
2946
2947	/*
2948	 * Find out how many pages are allowed for a single swap
2949	 * device. There are two limiting factors: 1) the number
2950	 * of bits for the swap offset in the swp_entry_t type, and
2951	 * 2) the number of bits in the swap pte as defined by the
2952	 * different architectures. In order to find the
2953	 * largest possible bit mask, a swap entry with swap type 0
2954	 * and swap offset ~0UL is created, encoded to a swap pte,
2955	 * decoded to a swp_entry_t again, and finally the swap
2956	 * offset is extracted. This will mask all the bits from
2957	 * the initial ~0UL mask that can't be encoded in either
2958	 * the swp_entry_t or the architecture definition of a
2959	 * swap pte.
2960	 */
2961	maxpages = swp_offset(pte_to_swp_entry(
2962			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2963	last_page = swap_header->info.last_page;
2964	if (!last_page) {
2965		pr_warn("Empty swap-file\n");
2966		return 0;
2967	}
2968	if (last_page > maxpages) {
2969		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2970			maxpages << (PAGE_SHIFT - 10),
2971			last_page << (PAGE_SHIFT - 10));
2972	}
2973	if (maxpages > last_page) {
2974		maxpages = last_page + 1;
2975		/* p->max is an unsigned int: don't overflow it */
2976		if ((unsigned int)maxpages == 0)
2977			maxpages = UINT_MAX;
2978	}
2979	p->highest_bit = maxpages - 1;
2980
2981	if (!maxpages)
2982		return 0;
2983	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2984	if (swapfilepages && maxpages > swapfilepages) {
2985		pr_warn("Swap area shorter than signature indicates\n");
2986		return 0;
2987	}
2988	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
2989		return 0;
2990	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2991		return 0;
2992
2993	return maxpages;
2994}
2995
2996#define SWAP_CLUSTER_INFO_COLS						\
2997	DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
2998#define SWAP_CLUSTER_SPACE_COLS						\
2999	DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3000#define SWAP_CLUSTER_COLS						\
3001	max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3002
3003static int setup_swap_map_and_extents(struct swap_info_struct *p,
3004					union swap_header *swap_header,
3005					unsigned char *swap_map,
3006					struct swap_cluster_info *cluster_info,
3007					unsigned long maxpages,
3008					sector_t *span)
3009{
3010	unsigned int j, k;
3011	unsigned int nr_good_pages;
3012	int nr_extents;
3013	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3014	unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
3015	unsigned long i, idx;
3016
3017	nr_good_pages = maxpages - 1;	/* omit header page */
3018
3019	cluster_list_init(&p->free_clusters);
3020	cluster_list_init(&p->discard_clusters);
 
 
3021
3022	for (i = 0; i < swap_header->info.nr_badpages; i++) {
3023		unsigned int page_nr = swap_header->info.badpages[i];
3024		if (page_nr == 0 || page_nr > swap_header->info.last_page)
3025			return -EINVAL;
3026		if (page_nr < maxpages) {
3027			swap_map[page_nr] = SWAP_MAP_BAD;
3028			nr_good_pages--;
3029			/*
3030			 * Haven't marked the cluster free yet, no list
3031			 * operation involved
3032			 */
3033			inc_cluster_info_page(p, cluster_info, page_nr);
3034		}
3035	}
3036
3037	/* Haven't marked the cluster free yet, no list operation involved */
3038	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3039		inc_cluster_info_page(p, cluster_info, i);
3040
3041	if (nr_good_pages) {
3042		swap_map[0] = SWAP_MAP_BAD;
3043		/*
3044		 * Not mark the cluster free yet, no list
3045		 * operation involved
3046		 */
3047		inc_cluster_info_page(p, cluster_info, 0);
3048		p->max = maxpages;
3049		p->pages = nr_good_pages;
3050		nr_extents = setup_swap_extents(p, span);
3051		if (nr_extents < 0)
3052			return nr_extents;
3053		nr_good_pages = p->pages;
3054	}
3055	if (!nr_good_pages) {
3056		pr_warn("Empty swap-file\n");
3057		return -EINVAL;
3058	}
3059
3060	if (!cluster_info)
3061		return nr_extents;
3062
3063
3064	/*
3065	 * Reduce false cache line sharing between cluster_info and
3066	 * sharing same address space.
3067	 */
3068	for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3069		j = (k + col) % SWAP_CLUSTER_COLS;
3070		for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3071			idx = i * SWAP_CLUSTER_COLS + j;
3072			if (idx >= nr_clusters)
3073				continue;
3074			if (cluster_count(&cluster_info[idx]))
3075				continue;
3076			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
3077			cluster_list_add_tail(&p->free_clusters, cluster_info,
3078					      idx);
 
 
 
 
 
 
 
 
 
 
 
3079		}
 
 
 
3080	}
3081	return nr_extents;
3082}
3083
3084/*
3085 * Helper to sys_swapon determining if a given swap
3086 * backing device queue supports DISCARD operations.
3087 */
3088static bool swap_discardable(struct swap_info_struct *si)
3089{
3090	struct request_queue *q = bdev_get_queue(si->bdev);
3091
3092	if (!q || !blk_queue_discard(q))
3093		return false;
3094
3095	return true;
3096}
3097
3098SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3099{
3100	struct swap_info_struct *p;
3101	struct filename *name;
3102	struct file *swap_file = NULL;
3103	struct address_space *mapping;
3104	int prio;
3105	int error;
3106	union swap_header *swap_header;
3107	int nr_extents;
3108	sector_t span;
3109	unsigned long maxpages;
3110	unsigned char *swap_map = NULL;
3111	struct swap_cluster_info *cluster_info = NULL;
3112	unsigned long *frontswap_map = NULL;
3113	struct page *page = NULL;
3114	struct inode *inode = NULL;
3115	bool inced_nr_rotate_swap = false;
3116
3117	if (swap_flags & ~SWAP_FLAGS_VALID)
3118		return -EINVAL;
3119
3120	if (!capable(CAP_SYS_ADMIN))
3121		return -EPERM;
3122
3123	if (!swap_avail_heads)
3124		return -ENOMEM;
3125
3126	p = alloc_swap_info();
3127	if (IS_ERR(p))
3128		return PTR_ERR(p);
3129
3130	INIT_WORK(&p->discard_work, swap_discard_work);
3131
3132	name = getname(specialfile);
3133	if (IS_ERR(name)) {
3134		error = PTR_ERR(name);
3135		name = NULL;
3136		goto bad_swap;
3137	}
3138	swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3139	if (IS_ERR(swap_file)) {
3140		error = PTR_ERR(swap_file);
3141		swap_file = NULL;
3142		goto bad_swap;
3143	}
3144
3145	p->swap_file = swap_file;
3146	mapping = swap_file->f_mapping;
3147	inode = mapping->host;
3148
3149	/* If S_ISREG(inode->i_mode) will do inode_lock(inode); */
3150	error = claim_swapfile(p, inode);
3151	if (unlikely(error))
3152		goto bad_swap;
3153
3154	/*
3155	 * Read the swap header.
3156	 */
3157	if (!mapping->a_ops->readpage) {
3158		error = -EINVAL;
3159		goto bad_swap;
3160	}
3161	page = read_mapping_page(mapping, 0, swap_file);
3162	if (IS_ERR(page)) {
3163		error = PTR_ERR(page);
3164		goto bad_swap;
3165	}
3166	swap_header = kmap(page);
3167
3168	maxpages = read_swap_header(p, swap_header, inode);
3169	if (unlikely(!maxpages)) {
3170		error = -EINVAL;
3171		goto bad_swap;
3172	}
3173
3174	/* OK, set up the swap map and apply the bad block list */
3175	swap_map = vzalloc(maxpages);
3176	if (!swap_map) {
3177		error = -ENOMEM;
3178		goto bad_swap;
3179	}
3180
3181	if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
3182		p->flags |= SWP_STABLE_WRITES;
3183
3184	if (bdi_cap_synchronous_io(inode_to_bdi(inode)))
3185		p->flags |= SWP_SYNCHRONOUS_IO;
3186
3187	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
3188		int cpu;
3189		unsigned long ci, nr_cluster;
3190
3191		p->flags |= SWP_SOLIDSTATE;
3192		/*
3193		 * select a random position to start with to help wear leveling
3194		 * SSD
3195		 */
3196		p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
3197		nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3198
3199		cluster_info = kvzalloc(nr_cluster * sizeof(*cluster_info),
3200					GFP_KERNEL);
3201		if (!cluster_info) {
3202			error = -ENOMEM;
3203			goto bad_swap;
3204		}
3205
3206		for (ci = 0; ci < nr_cluster; ci++)
3207			spin_lock_init(&((cluster_info + ci)->lock));
3208
3209		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3210		if (!p->percpu_cluster) {
3211			error = -ENOMEM;
3212			goto bad_swap;
3213		}
3214		for_each_possible_cpu(cpu) {
3215			struct percpu_cluster *cluster;
3216			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3217			cluster_set_null(&cluster->index);
3218		}
3219	} else {
3220		atomic_inc(&nr_rotate_swap);
3221		inced_nr_rotate_swap = true;
3222	}
3223
3224	error = swap_cgroup_swapon(p->type, maxpages);
3225	if (error)
3226		goto bad_swap;
3227
3228	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3229		cluster_info, maxpages, &span);
3230	if (unlikely(nr_extents < 0)) {
3231		error = nr_extents;
3232		goto bad_swap;
3233	}
3234	/* frontswap enabled? set up bit-per-page map for frontswap */
3235	if (IS_ENABLED(CONFIG_FRONTSWAP))
3236		frontswap_map = kvzalloc(BITS_TO_LONGS(maxpages) * sizeof(long),
3237					 GFP_KERNEL);
3238
3239	if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
3240		/*
3241		 * When discard is enabled for swap with no particular
3242		 * policy flagged, we set all swap discard flags here in
3243		 * order to sustain backward compatibility with older
3244		 * swapon(8) releases.
3245		 */
3246		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3247			     SWP_PAGE_DISCARD);
3248
3249		/*
3250		 * By flagging sys_swapon, a sysadmin can tell us to
3251		 * either do single-time area discards only, or to just
3252		 * perform discards for released swap page-clusters.
3253		 * Now it's time to adjust the p->flags accordingly.
3254		 */
3255		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3256			p->flags &= ~SWP_PAGE_DISCARD;
3257		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3258			p->flags &= ~SWP_AREA_DISCARD;
3259
3260		/* issue a swapon-time discard if it's still required */
3261		if (p->flags & SWP_AREA_DISCARD) {
3262			int err = discard_swap(p);
3263			if (unlikely(err))
3264				pr_err("swapon: discard_swap(%p): %d\n",
3265					p, err);
3266		}
3267	}
3268
3269	error = init_swap_address_space(p->type, maxpages);
3270	if (error)
3271		goto bad_swap;
3272
3273	mutex_lock(&swapon_mutex);
3274	prio = -1;
3275	if (swap_flags & SWAP_FLAG_PREFER)
3276		prio =
3277		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3278	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
3279
3280	pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
3281		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
3282		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
3283		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3284		(p->flags & SWP_DISCARDABLE) ? "D" : "",
3285		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
3286		(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
3287		(frontswap_map) ? "FS" : "");
3288
3289	mutex_unlock(&swapon_mutex);
3290	atomic_inc(&proc_poll_event);
3291	wake_up_interruptible(&proc_poll_wait);
3292
3293	if (S_ISREG(inode->i_mode))
3294		inode->i_flags |= S_SWAPFILE;
3295	error = 0;
3296	goto out;
3297bad_swap:
3298	free_percpu(p->percpu_cluster);
3299	p->percpu_cluster = NULL;
3300	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
3301		set_blocksize(p->bdev, p->old_block_size);
3302		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3303	}
3304	destroy_swap_extents(p);
3305	swap_cgroup_swapoff(p->type);
3306	spin_lock(&swap_lock);
3307	p->swap_file = NULL;
3308	p->flags = 0;
3309	spin_unlock(&swap_lock);
3310	vfree(swap_map);
3311	kvfree(cluster_info);
3312	kvfree(frontswap_map);
3313	if (inced_nr_rotate_swap)
3314		atomic_dec(&nr_rotate_swap);
3315	if (swap_file) {
3316		if (inode && S_ISREG(inode->i_mode)) {
3317			inode_unlock(inode);
3318			inode = NULL;
3319		}
3320		filp_close(swap_file, NULL);
3321	}
3322out:
3323	if (page && !IS_ERR(page)) {
3324		kunmap(page);
3325		put_page(page);
3326	}
3327	if (name)
3328		putname(name);
3329	if (inode && S_ISREG(inode->i_mode))
3330		inode_unlock(inode);
3331	if (!error)
3332		enable_swap_slots_cache();
3333	return error;
3334}
3335
3336void si_swapinfo(struct sysinfo *val)
3337{
3338	unsigned int type;
3339	unsigned long nr_to_be_unused = 0;
3340
3341	spin_lock(&swap_lock);
3342	for (type = 0; type < nr_swapfiles; type++) {
3343		struct swap_info_struct *si = swap_info[type];
3344
3345		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3346			nr_to_be_unused += si->inuse_pages;
3347	}
3348	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3349	val->totalswap = total_swap_pages + nr_to_be_unused;
3350	spin_unlock(&swap_lock);
3351}
3352
3353/*
3354 * Verify that a swap entry is valid and increment its swap map count.
3355 *
3356 * Returns error code in following case.
3357 * - success -> 0
3358 * - swp_entry is invalid -> EINVAL
3359 * - swp_entry is migration entry -> EINVAL
3360 * - swap-cache reference is requested but there is already one. -> EEXIST
3361 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3362 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3363 */
3364static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3365{
3366	struct swap_info_struct *p;
3367	struct swap_cluster_info *ci;
3368	unsigned long offset, type;
3369	unsigned char count;
3370	unsigned char has_cache;
3371	int err = -EINVAL;
3372
3373	if (non_swap_entry(entry))
3374		goto out;
3375
3376	type = swp_type(entry);
3377	if (type >= nr_swapfiles)
3378		goto bad_file;
3379	p = swap_info[type];
3380	offset = swp_offset(entry);
3381	if (unlikely(offset >= p->max))
3382		goto out;
3383
3384	ci = lock_cluster_or_swap_info(p, offset);
 
 
3385
3386	count = p->swap_map[offset];
3387
3388	/*
3389	 * swapin_readahead() doesn't check if a swap entry is valid, so the
3390	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3391	 */
3392	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3393		err = -ENOENT;
3394		goto unlock_out;
3395	}
3396
3397	has_cache = count & SWAP_HAS_CACHE;
3398	count &= ~SWAP_HAS_CACHE;
3399	err = 0;
3400
3401	if (usage == SWAP_HAS_CACHE) {
3402
3403		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
3404		if (!has_cache && count)
3405			has_cache = SWAP_HAS_CACHE;
3406		else if (has_cache)		/* someone else added cache */
3407			err = -EEXIST;
3408		else				/* no users remaining */
3409			err = -ENOENT;
3410
3411	} else if (count || has_cache) {
3412
3413		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3414			count += usage;
3415		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3416			err = -EINVAL;
3417		else if (swap_count_continued(p, offset, count))
3418			count = COUNT_CONTINUED;
3419		else
3420			err = -ENOMEM;
3421	} else
3422		err = -ENOENT;			/* unused swap entry */
3423
3424	p->swap_map[offset] = count | has_cache;
3425
3426unlock_out:
3427	unlock_cluster_or_swap_info(p, ci);
3428out:
3429	return err;
3430
3431bad_file:
3432	pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
3433	goto out;
3434}
3435
3436/*
3437 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3438 * (in which case its reference count is never incremented).
3439 */
3440void swap_shmem_alloc(swp_entry_t entry)
3441{
3442	__swap_duplicate(entry, SWAP_MAP_SHMEM);
3443}
3444
3445/*
3446 * Increase reference count of swap entry by 1.
3447 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3448 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3449 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3450 * might occur if a page table entry has got corrupted.
3451 */
3452int swap_duplicate(swp_entry_t entry)
3453{
3454	int err = 0;
3455
3456	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3457		err = add_swap_count_continuation(entry, GFP_ATOMIC);
3458	return err;
3459}
3460
3461/*
3462 * @entry: swap entry for which we allocate swap cache.
3463 *
3464 * Called when allocating swap cache for existing swap entry,
3465 * This can return error codes. Returns 0 at success.
3466 * -EBUSY means there is a swap cache.
3467 * Note: return code is different from swap_duplicate().
3468 */
3469int swapcache_prepare(swp_entry_t entry)
3470{
3471	return __swap_duplicate(entry, SWAP_HAS_CACHE);
3472}
3473
3474struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3475{
3476	return swap_info[swp_type(entry)];
3477}
3478
3479struct swap_info_struct *page_swap_info(struct page *page)
3480{
3481	swp_entry_t entry = { .val = page_private(page) };
3482	return swp_swap_info(entry);
 
3483}
3484
3485/*
3486 * out-of-line __page_file_ methods to avoid include hell.
3487 */
3488struct address_space *__page_file_mapping(struct page *page)
3489{
 
3490	return page_swap_info(page)->swap_file->f_mapping;
3491}
3492EXPORT_SYMBOL_GPL(__page_file_mapping);
3493
3494pgoff_t __page_file_index(struct page *page)
3495{
3496	swp_entry_t swap = { .val = page_private(page) };
 
3497	return swp_offset(swap);
3498}
3499EXPORT_SYMBOL_GPL(__page_file_index);
3500
3501/*
3502 * add_swap_count_continuation - called when a swap count is duplicated
3503 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3504 * page of the original vmalloc'ed swap_map, to hold the continuation count
3505 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3506 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3507 *
3508 * These continuation pages are seldom referenced: the common paths all work
3509 * on the original swap_map, only referring to a continuation page when the
3510 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3511 *
3512 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3513 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3514 * can be called after dropping locks.
3515 */
3516int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3517{
3518	struct swap_info_struct *si;
3519	struct swap_cluster_info *ci;
3520	struct page *head;
3521	struct page *page;
3522	struct page *list_page;
3523	pgoff_t offset;
3524	unsigned char count;
3525
3526	/*
3527	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3528	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3529	 */
3530	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3531
3532	si = swap_info_get(entry);
3533	if (!si) {
3534		/*
3535		 * An acceptable race has occurred since the failing
3536		 * __swap_duplicate(): the swap entry has been freed,
3537		 * perhaps even the whole swap_map cleared for swapoff.
3538		 */
3539		goto outer;
3540	}
3541
3542	offset = swp_offset(entry);
3543
3544	ci = lock_cluster(si, offset);
3545
3546	count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
3547
3548	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3549		/*
3550		 * The higher the swap count, the more likely it is that tasks
3551		 * will race to add swap count continuation: we need to avoid
3552		 * over-provisioning.
3553		 */
3554		goto out;
3555	}
3556
3557	if (!page) {
3558		unlock_cluster(ci);
3559		spin_unlock(&si->lock);
3560		return -ENOMEM;
3561	}
3562
3563	/*
3564	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
3565	 * no architecture is using highmem pages for kernel page tables: so it
3566	 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
3567	 */
3568	head = vmalloc_to_page(si->swap_map + offset);
3569	offset &= ~PAGE_MASK;
3570
3571	spin_lock(&si->cont_lock);
3572	/*
3573	 * Page allocation does not initialize the page's lru field,
3574	 * but it does always reset its private field.
3575	 */
3576	if (!page_private(head)) {
3577		BUG_ON(count & COUNT_CONTINUED);
3578		INIT_LIST_HEAD(&head->lru);
3579		set_page_private(head, SWP_CONTINUED);
3580		si->flags |= SWP_CONTINUED;
3581	}
3582
3583	list_for_each_entry(list_page, &head->lru, lru) {
3584		unsigned char *map;
3585
3586		/*
3587		 * If the previous map said no continuation, but we've found
3588		 * a continuation page, free our allocation and use this one.
3589		 */
3590		if (!(count & COUNT_CONTINUED))
3591			goto out_unlock_cont;
3592
3593		map = kmap_atomic(list_page) + offset;
3594		count = *map;
3595		kunmap_atomic(map);
3596
3597		/*
3598		 * If this continuation count now has some space in it,
3599		 * free our allocation and use this one.
3600		 */
3601		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3602			goto out_unlock_cont;
3603	}
3604
3605	list_add_tail(&page->lru, &head->lru);
3606	page = NULL;			/* now it's attached, don't free it */
3607out_unlock_cont:
3608	spin_unlock(&si->cont_lock);
3609out:
3610	unlock_cluster(ci);
3611	spin_unlock(&si->lock);
3612outer:
3613	if (page)
3614		__free_page(page);
3615	return 0;
3616}
3617
3618/*
3619 * swap_count_continued - when the original swap_map count is incremented
3620 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3621 * into, carry if so, or else fail until a new continuation page is allocated;
3622 * when the original swap_map count is decremented from 0 with continuation,
3623 * borrow from the continuation and report whether it still holds more.
3624 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3625 * lock.
3626 */
3627static bool swap_count_continued(struct swap_info_struct *si,
3628				 pgoff_t offset, unsigned char count)
3629{
3630	struct page *head;
3631	struct page *page;
3632	unsigned char *map;
3633	bool ret;
3634
3635	head = vmalloc_to_page(si->swap_map + offset);
3636	if (page_private(head) != SWP_CONTINUED) {
3637		BUG_ON(count & COUNT_CONTINUED);
3638		return false;		/* need to add count continuation */
3639	}
3640
3641	spin_lock(&si->cont_lock);
3642	offset &= ~PAGE_MASK;
3643	page = list_entry(head->lru.next, struct page, lru);
3644	map = kmap_atomic(page) + offset;
3645
3646	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
3647		goto init_map;		/* jump over SWAP_CONT_MAX checks */
3648
3649	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3650		/*
3651		 * Think of how you add 1 to 999
3652		 */
3653		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3654			kunmap_atomic(map);
3655			page = list_entry(page->lru.next, struct page, lru);
3656			BUG_ON(page == head);
3657			map = kmap_atomic(page) + offset;
3658		}
3659		if (*map == SWAP_CONT_MAX) {
3660			kunmap_atomic(map);
3661			page = list_entry(page->lru.next, struct page, lru);
3662			if (page == head) {
3663				ret = false;	/* add count continuation */
3664				goto out;
3665			}
3666			map = kmap_atomic(page) + offset;
3667init_map:		*map = 0;		/* we didn't zero the page */
3668		}
3669		*map += 1;
3670		kunmap_atomic(map);
3671		page = list_entry(page->lru.prev, struct page, lru);
3672		while (page != head) {
3673			map = kmap_atomic(page) + offset;
3674			*map = COUNT_CONTINUED;
3675			kunmap_atomic(map);
3676			page = list_entry(page->lru.prev, struct page, lru);
3677		}
3678		ret = true;			/* incremented */
3679
3680	} else {				/* decrementing */
3681		/*
3682		 * Think of how you subtract 1 from 1000
3683		 */
3684		BUG_ON(count != COUNT_CONTINUED);
3685		while (*map == COUNT_CONTINUED) {
3686			kunmap_atomic(map);
3687			page = list_entry(page->lru.next, struct page, lru);
3688			BUG_ON(page == head);
3689			map = kmap_atomic(page) + offset;
3690		}
3691		BUG_ON(*map == 0);
3692		*map -= 1;
3693		if (*map == 0)
3694			count = 0;
3695		kunmap_atomic(map);
3696		page = list_entry(page->lru.prev, struct page, lru);
3697		while (page != head) {
3698			map = kmap_atomic(page) + offset;
3699			*map = SWAP_CONT_MAX | count;
3700			count = COUNT_CONTINUED;
3701			kunmap_atomic(map);
3702			page = list_entry(page->lru.prev, struct page, lru);
3703		}
3704		ret = count == COUNT_CONTINUED;
3705	}
3706out:
3707	spin_unlock(&si->cont_lock);
3708	return ret;
3709}
3710
3711/*
3712 * free_swap_count_continuations - swapoff free all the continuation pages
3713 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3714 */
3715static void free_swap_count_continuations(struct swap_info_struct *si)
3716{
3717	pgoff_t offset;
3718
3719	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3720		struct page *head;
3721		head = vmalloc_to_page(si->swap_map + offset);
3722		if (page_private(head)) {
3723			struct page *page, *next;
3724
3725			list_for_each_entry_safe(page, next, &head->lru, lru) {
3726				list_del(&page->lru);
3727				__free_page(page);
3728			}
3729		}
3730	}
3731}
3732
3733static int __init swapfile_init(void)
3734{
3735	int nid;
3736
3737	swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3738					 GFP_KERNEL);
3739	if (!swap_avail_heads) {
3740		pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3741		return -ENOMEM;
3742	}
3743
3744	for_each_node(nid)
3745		plist_head_init(&swap_avail_heads[nid]);
3746
3747	return 0;
3748}
3749subsys_initcall(swapfile_init);
v4.6
   1/*
   2 *  linux/mm/swapfile.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 */
   7
   8#include <linux/mm.h>
 
 
   9#include <linux/hugetlb.h>
  10#include <linux/mman.h>
  11#include <linux/slab.h>
  12#include <linux/kernel_stat.h>
  13#include <linux/swap.h>
  14#include <linux/vmalloc.h>
  15#include <linux/pagemap.h>
  16#include <linux/namei.h>
  17#include <linux/shmem_fs.h>
  18#include <linux/blkdev.h>
  19#include <linux/random.h>
  20#include <linux/writeback.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/init.h>
  24#include <linux/ksm.h>
  25#include <linux/rmap.h>
  26#include <linux/security.h>
  27#include <linux/backing-dev.h>
  28#include <linux/mutex.h>
  29#include <linux/capability.h>
  30#include <linux/syscalls.h>
  31#include <linux/memcontrol.h>
  32#include <linux/poll.h>
  33#include <linux/oom.h>
  34#include <linux/frontswap.h>
  35#include <linux/swapfile.h>
  36#include <linux/export.h>
 
 
  37
  38#include <asm/pgtable.h>
  39#include <asm/tlbflush.h>
  40#include <linux/swapops.h>
  41#include <linux/swap_cgroup.h>
  42
  43static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  44				 unsigned char);
  45static void free_swap_count_continuations(struct swap_info_struct *);
  46static sector_t map_swap_entry(swp_entry_t, struct block_device**);
  47
  48DEFINE_SPINLOCK(swap_lock);
  49static unsigned int nr_swapfiles;
  50atomic_long_t nr_swap_pages;
  51/*
  52 * Some modules use swappable objects and may try to swap them out under
  53 * memory pressure (via the shrinker). Before doing so, they may wish to
  54 * check to see if any swap space is available.
  55 */
  56EXPORT_SYMBOL_GPL(nr_swap_pages);
  57/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  58long total_swap_pages;
  59static int least_priority;
  60
  61static const char Bad_file[] = "Bad swap file entry ";
  62static const char Unused_file[] = "Unused swap file entry ";
  63static const char Bad_offset[] = "Bad swap offset entry ";
  64static const char Unused_offset[] = "Unused swap offset entry ";
  65
  66/*
  67 * all active swap_info_structs
  68 * protected with swap_lock, and ordered by priority.
  69 */
  70PLIST_HEAD(swap_active_head);
  71
  72/*
  73 * all available (active, not full) swap_info_structs
  74 * protected with swap_avail_lock, ordered by priority.
  75 * This is used by get_swap_page() instead of swap_active_head
  76 * because swap_active_head includes all swap_info_structs,
  77 * but get_swap_page() doesn't need to look at full ones.
  78 * This uses its own lock instead of swap_lock because when a
  79 * swap_info_struct changes between not-full/full, it needs to
  80 * add/remove itself to/from this list, but the swap_info_struct->lock
  81 * is held and the locking order requires swap_lock to be taken
  82 * before any swap_info_struct->lock.
  83 */
  84static PLIST_HEAD(swap_avail_head);
  85static DEFINE_SPINLOCK(swap_avail_lock);
  86
  87struct swap_info_struct *swap_info[MAX_SWAPFILES];
  88
  89static DEFINE_MUTEX(swapon_mutex);
  90
  91static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  92/* Activity counter to indicate that a swapon or swapoff has occurred */
  93static atomic_t proc_poll_event = ATOMIC_INIT(0);
  94
 
 
  95static inline unsigned char swap_count(unsigned char ent)
  96{
  97	return ent & ~SWAP_HAS_CACHE;	/* may include SWAP_HAS_CONT flag */
  98}
  99
 100/* returns 1 if swap entry is freed */
 101static int
 102__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
 103{
 104	swp_entry_t entry = swp_entry(si->type, offset);
 105	struct page *page;
 106	int ret = 0;
 107
 108	page = find_get_page(swap_address_space(entry), entry.val);
 109	if (!page)
 110		return 0;
 111	/*
 112	 * This function is called from scan_swap_map() and it's called
 113	 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
 114	 * We have to use trylock for avoiding deadlock. This is a special
 115	 * case and you should use try_to_free_swap() with explicit lock_page()
 116	 * in usual operations.
 117	 */
 118	if (trylock_page(page)) {
 119		ret = try_to_free_swap(page);
 120		unlock_page(page);
 121	}
 122	put_page(page);
 123	return ret;
 124}
 125
 126/*
 127 * swapon tell device that all the old swap contents can be discarded,
 128 * to allow the swap device to optimize its wear-levelling.
 129 */
 130static int discard_swap(struct swap_info_struct *si)
 131{
 132	struct swap_extent *se;
 133	sector_t start_block;
 134	sector_t nr_blocks;
 135	int err = 0;
 136
 137	/* Do not discard the swap header page! */
 138	se = &si->first_swap_extent;
 139	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 140	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 141	if (nr_blocks) {
 142		err = blkdev_issue_discard(si->bdev, start_block,
 143				nr_blocks, GFP_KERNEL, 0);
 144		if (err)
 145			return err;
 146		cond_resched();
 147	}
 148
 149	list_for_each_entry(se, &si->first_swap_extent.list, list) {
 150		start_block = se->start_block << (PAGE_SHIFT - 9);
 151		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 152
 153		err = blkdev_issue_discard(si->bdev, start_block,
 154				nr_blocks, GFP_KERNEL, 0);
 155		if (err)
 156			break;
 157
 158		cond_resched();
 159	}
 160	return err;		/* That will often be -EOPNOTSUPP */
 161}
 162
 163/*
 164 * swap allocation tell device that a cluster of swap can now be discarded,
 165 * to allow the swap device to optimize its wear-levelling.
 166 */
 167static void discard_swap_cluster(struct swap_info_struct *si,
 168				 pgoff_t start_page, pgoff_t nr_pages)
 169{
 170	struct swap_extent *se = si->curr_swap_extent;
 171	int found_extent = 0;
 172
 173	while (nr_pages) {
 174		if (se->start_page <= start_page &&
 175		    start_page < se->start_page + se->nr_pages) {
 176			pgoff_t offset = start_page - se->start_page;
 177			sector_t start_block = se->start_block + offset;
 178			sector_t nr_blocks = se->nr_pages - offset;
 179
 180			if (nr_blocks > nr_pages)
 181				nr_blocks = nr_pages;
 182			start_page += nr_blocks;
 183			nr_pages -= nr_blocks;
 184
 185			if (!found_extent++)
 186				si->curr_swap_extent = se;
 187
 188			start_block <<= PAGE_SHIFT - 9;
 189			nr_blocks <<= PAGE_SHIFT - 9;
 190			if (blkdev_issue_discard(si->bdev, start_block,
 191				    nr_blocks, GFP_NOIO, 0))
 192				break;
 193		}
 194
 195		se = list_next_entry(se, list);
 196	}
 197}
 198
 
 
 
 199#define SWAPFILE_CLUSTER	256
 
 200#define LATENCY_LIMIT		256
 201
 202static inline void cluster_set_flag(struct swap_cluster_info *info,
 203	unsigned int flag)
 204{
 205	info->flags = flag;
 206}
 207
 208static inline unsigned int cluster_count(struct swap_cluster_info *info)
 209{
 210	return info->data;
 211}
 212
 213static inline void cluster_set_count(struct swap_cluster_info *info,
 214				     unsigned int c)
 215{
 216	info->data = c;
 217}
 218
 219static inline void cluster_set_count_flag(struct swap_cluster_info *info,
 220					 unsigned int c, unsigned int f)
 221{
 222	info->flags = f;
 223	info->data = c;
 224}
 225
 226static inline unsigned int cluster_next(struct swap_cluster_info *info)
 227{
 228	return info->data;
 229}
 230
 231static inline void cluster_set_next(struct swap_cluster_info *info,
 232				    unsigned int n)
 233{
 234	info->data = n;
 235}
 236
 237static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 238					 unsigned int n, unsigned int f)
 239{
 240	info->flags = f;
 241	info->data = n;
 242}
 243
 244static inline bool cluster_is_free(struct swap_cluster_info *info)
 245{
 246	return info->flags & CLUSTER_FLAG_FREE;
 247}
 248
 249static inline bool cluster_is_null(struct swap_cluster_info *info)
 250{
 251	return info->flags & CLUSTER_FLAG_NEXT_NULL;
 252}
 253
 254static inline void cluster_set_null(struct swap_cluster_info *info)
 255{
 256	info->flags = CLUSTER_FLAG_NEXT_NULL;
 257	info->data = 0;
 258}
 259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 260/* Add a cluster to discard list and schedule it to do discard */
 261static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 262		unsigned int idx)
 263{
 264	/*
 265	 * If scan_swap_map() can't find a free cluster, it will check
 266	 * si->swap_map directly. To make sure the discarding cluster isn't
 267	 * taken by scan_swap_map(), mark the swap entries bad (occupied). It
 268	 * will be cleared after discard
 269	 */
 270	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 271			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 272
 273	if (cluster_is_null(&si->discard_cluster_head)) {
 274		cluster_set_next_flag(&si->discard_cluster_head,
 275						idx, 0);
 276		cluster_set_next_flag(&si->discard_cluster_tail,
 277						idx, 0);
 278	} else {
 279		unsigned int tail = cluster_next(&si->discard_cluster_tail);
 280		cluster_set_next(&si->cluster_info[tail], idx);
 281		cluster_set_next_flag(&si->discard_cluster_tail,
 282						idx, 0);
 283	}
 284
 285	schedule_work(&si->discard_work);
 286}
 287
 
 
 
 
 
 
 
 
 288/*
 289 * Doing discard actually. After a cluster discard is finished, the cluster
 290 * will be added to free cluster list. caller should hold si->lock.
 291*/
 292static void swap_do_scheduled_discard(struct swap_info_struct *si)
 293{
 294	struct swap_cluster_info *info;
 295	unsigned int idx;
 296
 297	info = si->cluster_info;
 298
 299	while (!cluster_is_null(&si->discard_cluster_head)) {
 300		idx = cluster_next(&si->discard_cluster_head);
 301
 302		cluster_set_next_flag(&si->discard_cluster_head,
 303						cluster_next(&info[idx]), 0);
 304		if (cluster_next(&si->discard_cluster_tail) == idx) {
 305			cluster_set_null(&si->discard_cluster_head);
 306			cluster_set_null(&si->discard_cluster_tail);
 307		}
 308		spin_unlock(&si->lock);
 309
 310		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 311				SWAPFILE_CLUSTER);
 312
 313		spin_lock(&si->lock);
 314		cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE);
 315		if (cluster_is_null(&si->free_cluster_head)) {
 316			cluster_set_next_flag(&si->free_cluster_head,
 317						idx, 0);
 318			cluster_set_next_flag(&si->free_cluster_tail,
 319						idx, 0);
 320		} else {
 321			unsigned int tail;
 322
 323			tail = cluster_next(&si->free_cluster_tail);
 324			cluster_set_next(&info[tail], idx);
 325			cluster_set_next_flag(&si->free_cluster_tail,
 326						idx, 0);
 327		}
 328		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 329				0, SWAPFILE_CLUSTER);
 
 330	}
 331}
 332
 333static void swap_discard_work(struct work_struct *work)
 334{
 335	struct swap_info_struct *si;
 336
 337	si = container_of(work, struct swap_info_struct, discard_work);
 338
 339	spin_lock(&si->lock);
 340	swap_do_scheduled_discard(si);
 341	spin_unlock(&si->lock);
 342}
 343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 344/*
 345 * The cluster corresponding to page_nr will be used. The cluster will be
 346 * removed from free cluster list and its usage counter will be increased.
 347 */
 348static void inc_cluster_info_page(struct swap_info_struct *p,
 349	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 350{
 351	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 352
 353	if (!cluster_info)
 354		return;
 355	if (cluster_is_free(&cluster_info[idx])) {
 356		VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx);
 357		cluster_set_next_flag(&p->free_cluster_head,
 358			cluster_next(&cluster_info[idx]), 0);
 359		if (cluster_next(&p->free_cluster_tail) == idx) {
 360			cluster_set_null(&p->free_cluster_tail);
 361			cluster_set_null(&p->free_cluster_head);
 362		}
 363		cluster_set_count_flag(&cluster_info[idx], 0, 0);
 364	}
 365
 366	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
 367	cluster_set_count(&cluster_info[idx],
 368		cluster_count(&cluster_info[idx]) + 1);
 369}
 370
 371/*
 372 * The cluster corresponding to page_nr decreases one usage. If the usage
 373 * counter becomes 0, which means no page in the cluster is in using, we can
 374 * optionally discard the cluster and add it to free cluster list.
 375 */
 376static void dec_cluster_info_page(struct swap_info_struct *p,
 377	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 378{
 379	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 380
 381	if (!cluster_info)
 382		return;
 383
 384	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
 385	cluster_set_count(&cluster_info[idx],
 386		cluster_count(&cluster_info[idx]) - 1);
 387
 388	if (cluster_count(&cluster_info[idx]) == 0) {
 389		/*
 390		 * If the swap is discardable, prepare discard the cluster
 391		 * instead of free it immediately. The cluster will be freed
 392		 * after discard.
 393		 */
 394		if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 395				 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 396			swap_cluster_schedule_discard(p, idx);
 397			return;
 398		}
 399
 400		cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
 401		if (cluster_is_null(&p->free_cluster_head)) {
 402			cluster_set_next_flag(&p->free_cluster_head, idx, 0);
 403			cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
 404		} else {
 405			unsigned int tail = cluster_next(&p->free_cluster_tail);
 406			cluster_set_next(&cluster_info[tail], idx);
 407			cluster_set_next_flag(&p->free_cluster_tail, idx, 0);
 408		}
 409	}
 410}
 411
 412/*
 413 * It's possible scan_swap_map() uses a free cluster in the middle of free
 414 * cluster list. Avoiding such abuse to avoid list corruption.
 415 */
 416static bool
 417scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
 418	unsigned long offset)
 419{
 420	struct percpu_cluster *percpu_cluster;
 421	bool conflict;
 422
 423	offset /= SWAPFILE_CLUSTER;
 424	conflict = !cluster_is_null(&si->free_cluster_head) &&
 425		offset != cluster_next(&si->free_cluster_head) &&
 426		cluster_is_free(&si->cluster_info[offset]);
 427
 428	if (!conflict)
 429		return false;
 430
 431	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
 432	cluster_set_null(&percpu_cluster->index);
 433	return true;
 434}
 435
 436/*
 437 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
 438 * might involve allocating a new cluster for current CPU too.
 439 */
 440static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
 441	unsigned long *offset, unsigned long *scan_base)
 442{
 443	struct percpu_cluster *cluster;
 
 444	bool found_free;
 445	unsigned long tmp;
 446
 447new_cluster:
 448	cluster = this_cpu_ptr(si->percpu_cluster);
 449	if (cluster_is_null(&cluster->index)) {
 450		if (!cluster_is_null(&si->free_cluster_head)) {
 451			cluster->index = si->free_cluster_head;
 452			cluster->next = cluster_next(&cluster->index) *
 453					SWAPFILE_CLUSTER;
 454		} else if (!cluster_is_null(&si->discard_cluster_head)) {
 455			/*
 456			 * we don't have free cluster but have some clusters in
 457			 * discarding, do discard now and reclaim them
 458			 */
 459			swap_do_scheduled_discard(si);
 460			*scan_base = *offset = si->cluster_next;
 461			goto new_cluster;
 462		} else
 463			return;
 464	}
 465
 466	found_free = false;
 467
 468	/*
 469	 * Other CPUs can use our cluster if they can't find a free cluster,
 470	 * check if there is still free entry in the cluster
 471	 */
 472	tmp = cluster->next;
 473	while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) *
 474	       SWAPFILE_CLUSTER) {
 
 
 
 
 
 
 475		if (!si->swap_map[tmp]) {
 476			found_free = true;
 477			break;
 478		}
 479		tmp++;
 480	}
 
 481	if (!found_free) {
 482		cluster_set_null(&cluster->index);
 483		goto new_cluster;
 484	}
 485	cluster->next = tmp + 1;
 486	*offset = tmp;
 487	*scan_base = tmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 488}
 489
 490static unsigned long scan_swap_map(struct swap_info_struct *si,
 491				   unsigned char usage)
 
 492{
 
 493	unsigned long offset;
 494	unsigned long scan_base;
 495	unsigned long last_in_cluster = 0;
 496	int latency_ration = LATENCY_LIMIT;
 
 
 
 
 497
 498	/*
 499	 * We try to cluster swap pages by allocating them sequentially
 500	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 501	 * way, however, we resort to first-free allocation, starting
 502	 * a new cluster.  This prevents us from scattering swap pages
 503	 * all over the entire swap partition, so that we reduce
 504	 * overall disk seek times between swap pages.  -- sct
 505	 * But we do now try to find an empty cluster.  -Andrea
 506	 * And we let swap pages go all over an SSD partition.  Hugh
 507	 */
 508
 509	si->flags += SWP_SCANNING;
 510	scan_base = offset = si->cluster_next;
 511
 512	/* SSD algorithm */
 513	if (si->cluster_info) {
 514		scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
 515		goto checks;
 
 
 516	}
 517
 518	if (unlikely(!si->cluster_nr--)) {
 519		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 520			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 521			goto checks;
 522		}
 523
 524		spin_unlock(&si->lock);
 525
 526		/*
 527		 * If seek is expensive, start searching for new cluster from
 528		 * start of partition, to minimize the span of allocated swap.
 529		 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
 530		 * case, just handled by scan_swap_map_try_ssd_cluster() above.
 531		 */
 532		scan_base = offset = si->lowest_bit;
 533		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 534
 535		/* Locate the first empty (unaligned) cluster */
 536		for (; last_in_cluster <= si->highest_bit; offset++) {
 537			if (si->swap_map[offset])
 538				last_in_cluster = offset + SWAPFILE_CLUSTER;
 539			else if (offset == last_in_cluster) {
 540				spin_lock(&si->lock);
 541				offset -= SWAPFILE_CLUSTER - 1;
 542				si->cluster_next = offset;
 543				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 544				goto checks;
 545			}
 546			if (unlikely(--latency_ration < 0)) {
 547				cond_resched();
 548				latency_ration = LATENCY_LIMIT;
 549			}
 550		}
 551
 552		offset = scan_base;
 553		spin_lock(&si->lock);
 554		si->cluster_nr = SWAPFILE_CLUSTER - 1;
 555	}
 556
 557checks:
 558	if (si->cluster_info) {
 559		while (scan_swap_map_ssd_cluster_conflict(si, offset))
 560			scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
 
 
 
 
 
 
 561	}
 562	if (!(si->flags & SWP_WRITEOK))
 563		goto no_page;
 564	if (!si->highest_bit)
 565		goto no_page;
 566	if (offset > si->highest_bit)
 567		scan_base = offset = si->lowest_bit;
 568
 
 569	/* reuse swap entry of cache-only swap if not busy. */
 570	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 571		int swap_was_freed;
 
 572		spin_unlock(&si->lock);
 573		swap_was_freed = __try_to_reclaim_swap(si, offset);
 574		spin_lock(&si->lock);
 575		/* entry was freed successfully, try to use this again */
 576		if (swap_was_freed)
 577			goto checks;
 578		goto scan; /* check next one */
 579	}
 580
 581	if (si->swap_map[offset])
 582		goto scan;
 583
 584	if (offset == si->lowest_bit)
 585		si->lowest_bit++;
 586	if (offset == si->highest_bit)
 587		si->highest_bit--;
 588	si->inuse_pages++;
 589	if (si->inuse_pages == si->pages) {
 590		si->lowest_bit = si->max;
 591		si->highest_bit = 0;
 592		spin_lock(&swap_avail_lock);
 593		plist_del(&si->avail_list, &swap_avail_head);
 594		spin_unlock(&swap_avail_lock);
 595	}
 596	si->swap_map[offset] = usage;
 597	inc_cluster_info_page(si, si->cluster_info, offset);
 
 
 
 598	si->cluster_next = offset + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 599	si->flags -= SWP_SCANNING;
 600
 601	return offset;
 602
 603scan:
 604	spin_unlock(&si->lock);
 605	while (++offset <= si->highest_bit) {
 606		if (!si->swap_map[offset]) {
 607			spin_lock(&si->lock);
 608			goto checks;
 609		}
 610		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 611			spin_lock(&si->lock);
 612			goto checks;
 613		}
 614		if (unlikely(--latency_ration < 0)) {
 615			cond_resched();
 616			latency_ration = LATENCY_LIMIT;
 617		}
 618	}
 619	offset = si->lowest_bit;
 620	while (offset < scan_base) {
 621		if (!si->swap_map[offset]) {
 622			spin_lock(&si->lock);
 623			goto checks;
 624		}
 625		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 626			spin_lock(&si->lock);
 627			goto checks;
 628		}
 629		if (unlikely(--latency_ration < 0)) {
 630			cond_resched();
 631			latency_ration = LATENCY_LIMIT;
 632		}
 633		offset++;
 634	}
 635	spin_lock(&si->lock);
 636
 637no_page:
 638	si->flags -= SWP_SCANNING;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 639	return 0;
 640}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 641
 642swp_entry_t get_swap_page(void)
 643{
 
 644	struct swap_info_struct *si, *next;
 645	pgoff_t offset;
 
 
 
 
 
 646
 647	if (atomic_long_read(&nr_swap_pages) <= 0)
 
 648		goto noswap;
 649	atomic_long_dec(&nr_swap_pages);
 
 
 
 
 
 
 
 650
 651	spin_lock(&swap_avail_lock);
 652
 653start_over:
 654	plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) {
 
 655		/* requeue si to after same-priority siblings */
 656		plist_requeue(&si->avail_list, &swap_avail_head);
 657		spin_unlock(&swap_avail_lock);
 658		spin_lock(&si->lock);
 659		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
 660			spin_lock(&swap_avail_lock);
 661			if (plist_node_empty(&si->avail_list)) {
 662				spin_unlock(&si->lock);
 663				goto nextsi;
 664			}
 665			WARN(!si->highest_bit,
 666			     "swap_info %d in list but !highest_bit\n",
 667			     si->type);
 668			WARN(!(si->flags & SWP_WRITEOK),
 669			     "swap_info %d in list but !SWP_WRITEOK\n",
 670			     si->type);
 671			plist_del(&si->avail_list, &swap_avail_head);
 672			spin_unlock(&si->lock);
 673			goto nextsi;
 674		}
 675
 676		/* This is called for allocating swap entry for cache */
 677		offset = scan_swap_map(si, SWAP_HAS_CACHE);
 
 
 
 678		spin_unlock(&si->lock);
 679		if (offset)
 680			return swp_entry(si->type, offset);
 681		pr_debug("scan_swap_map of si %d failed to find offset\n",
 682		       si->type);
 
 683		spin_lock(&swap_avail_lock);
 684nextsi:
 685		/*
 686		 * if we got here, it's likely that si was almost full before,
 687		 * and since scan_swap_map() can drop the si->lock, multiple
 688		 * callers probably all tried to get a page from the same si
 689		 * and it filled up before we could get one; or, the si filled
 690		 * up between us dropping swap_avail_lock and taking si->lock.
 691		 * Since we dropped the swap_avail_lock, the swap_avail_head
 692		 * list may have been modified; so if next is still in the
 693		 * swap_avail_head list then try it, otherwise start over.
 
 694		 */
 695		if (plist_node_empty(&next->avail_list))
 696			goto start_over;
 697	}
 698
 699	spin_unlock(&swap_avail_lock);
 700
 701	atomic_long_inc(&nr_swap_pages);
 
 
 
 702noswap:
 703	return (swp_entry_t) {0};
 704}
 705
 706/* The only caller of this function is now suspend routine */
 707swp_entry_t get_swap_page_of_type(int type)
 708{
 709	struct swap_info_struct *si;
 710	pgoff_t offset;
 711
 712	si = swap_info[type];
 713	spin_lock(&si->lock);
 714	if (si && (si->flags & SWP_WRITEOK)) {
 715		atomic_long_dec(&nr_swap_pages);
 716		/* This is called for allocating swap entry, not cache */
 717		offset = scan_swap_map(si, 1);
 718		if (offset) {
 719			spin_unlock(&si->lock);
 720			return swp_entry(type, offset);
 721		}
 722		atomic_long_inc(&nr_swap_pages);
 723	}
 724	spin_unlock(&si->lock);
 725	return (swp_entry_t) {0};
 726}
 727
 728static struct swap_info_struct *swap_info_get(swp_entry_t entry)
 729{
 730	struct swap_info_struct *p;
 731	unsigned long offset, type;
 732
 733	if (!entry.val)
 734		goto out;
 735	type = swp_type(entry);
 736	if (type >= nr_swapfiles)
 737		goto bad_nofile;
 738	p = swap_info[type];
 739	if (!(p->flags & SWP_USED))
 740		goto bad_device;
 741	offset = swp_offset(entry);
 742	if (offset >= p->max)
 743		goto bad_offset;
 744	if (!p->swap_map[offset])
 745		goto bad_free;
 746	spin_lock(&p->lock);
 747	return p;
 748
 749bad_free:
 750	pr_err("swap_free: %s%08lx\n", Unused_offset, entry.val);
 751	goto out;
 752bad_offset:
 753	pr_err("swap_free: %s%08lx\n", Bad_offset, entry.val);
 754	goto out;
 755bad_device:
 756	pr_err("swap_free: %s%08lx\n", Unused_file, entry.val);
 757	goto out;
 758bad_nofile:
 759	pr_err("swap_free: %s%08lx\n", Bad_file, entry.val);
 760out:
 761	return NULL;
 762}
 763
 764static unsigned char swap_entry_free(struct swap_info_struct *p,
 765				     swp_entry_t entry, unsigned char usage)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766{
 
 767	unsigned long offset = swp_offset(entry);
 768	unsigned char count;
 769	unsigned char has_cache;
 770
 
 
 771	count = p->swap_map[offset];
 
 772	has_cache = count & SWAP_HAS_CACHE;
 773	count &= ~SWAP_HAS_CACHE;
 774
 775	if (usage == SWAP_HAS_CACHE) {
 776		VM_BUG_ON(!has_cache);
 777		has_cache = 0;
 778	} else if (count == SWAP_MAP_SHMEM) {
 779		/*
 780		 * Or we could insist on shmem.c using a special
 781		 * swap_shmem_free() and free_shmem_swap_and_cache()...
 782		 */
 783		count = 0;
 784	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
 785		if (count == COUNT_CONTINUED) {
 786			if (swap_count_continued(p, offset, count))
 787				count = SWAP_MAP_MAX | COUNT_CONTINUED;
 788			else
 789				count = SWAP_MAP_MAX;
 790		} else
 791			count--;
 792	}
 793
 794	usage = count | has_cache;
 795	p->swap_map[offset] = usage;
 796
 797	/* free if no reference */
 798	if (!usage) {
 799		mem_cgroup_uncharge_swap(entry);
 800		dec_cluster_info_page(p, p->cluster_info, offset);
 801		if (offset < p->lowest_bit)
 802			p->lowest_bit = offset;
 803		if (offset > p->highest_bit) {
 804			bool was_full = !p->highest_bit;
 805			p->highest_bit = offset;
 806			if (was_full && (p->flags & SWP_WRITEOK)) {
 807				spin_lock(&swap_avail_lock);
 808				WARN_ON(!plist_node_empty(&p->avail_list));
 809				if (plist_node_empty(&p->avail_list))
 810					plist_add(&p->avail_list,
 811						  &swap_avail_head);
 812				spin_unlock(&swap_avail_lock);
 813			}
 814		}
 815		atomic_long_inc(&nr_swap_pages);
 816		p->inuse_pages--;
 817		frontswap_invalidate_page(p->type, offset);
 818		if (p->flags & SWP_BLKDEV) {
 819			struct gendisk *disk = p->bdev->bd_disk;
 820			if (disk->fops->swap_slot_free_notify)
 821				disk->fops->swap_slot_free_notify(p->bdev,
 822								  offset);
 823		}
 824	}
 825
 826	return usage;
 827}
 828
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 829/*
 830 * Caller has made sure that the swap device corresponding to entry
 831 * is still around or has not been recycled.
 832 */
 833void swap_free(swp_entry_t entry)
 834{
 835	struct swap_info_struct *p;
 836
 837	p = swap_info_get(entry);
 838	if (p) {
 839		swap_entry_free(p, entry, 1);
 840		spin_unlock(&p->lock);
 841	}
 842}
 843
 844/*
 845 * Called after dropping swapcache to decrease refcnt to swap entries.
 846 */
 847void swapcache_free(swp_entry_t entry)
 848{
 849	struct swap_info_struct *p;
 850
 851	p = swap_info_get(entry);
 852	if (p) {
 853		swap_entry_free(p, entry, SWAP_HAS_CACHE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 854		spin_unlock(&p->lock);
 855	}
 856}
 857
 858/*
 859 * How many references to page are currently swapped out?
 860 * This does not give an exact answer when swap count is continued,
 861 * but does include the high COUNT_CONTINUED flag to allow for that.
 862 */
 863int page_swapcount(struct page *page)
 864{
 865	int count = 0;
 866	struct swap_info_struct *p;
 
 867	swp_entry_t entry;
 
 868
 869	entry.val = page_private(page);
 870	p = swap_info_get(entry);
 871	if (p) {
 872		count = swap_count(p->swap_map[swp_offset(entry)]);
 873		spin_unlock(&p->lock);
 
 
 874	}
 875	return count;
 876}
 877
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 878/*
 879 * How many references to @entry are currently swapped out?
 880 * This considers COUNT_CONTINUED so it returns exact answer.
 881 */
 882int swp_swapcount(swp_entry_t entry)
 883{
 884	int count, tmp_count, n;
 885	struct swap_info_struct *p;
 
 886	struct page *page;
 887	pgoff_t offset;
 888	unsigned char *map;
 889
 890	p = swap_info_get(entry);
 891	if (!p)
 892		return 0;
 893
 894	count = swap_count(p->swap_map[swp_offset(entry)]);
 
 
 
 
 895	if (!(count & COUNT_CONTINUED))
 896		goto out;
 897
 898	count &= ~COUNT_CONTINUED;
 899	n = SWAP_MAP_MAX + 1;
 900
 901	offset = swp_offset(entry);
 902	page = vmalloc_to_page(p->swap_map + offset);
 903	offset &= ~PAGE_MASK;
 904	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
 905
 906	do {
 907		page = list_next_entry(page, lru);
 908		map = kmap_atomic(page);
 909		tmp_count = map[offset];
 910		kunmap_atomic(map);
 911
 912		count += (tmp_count & ~COUNT_CONTINUED) * n;
 913		n *= (SWAP_CONT_MAX + 1);
 914	} while (tmp_count & COUNT_CONTINUED);
 915out:
 916	spin_unlock(&p->lock);
 917	return count;
 918}
 919
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920/*
 921 * We can write to an anon page without COW if there are no other references
 922 * to it.  And as a side-effect, free up its swap: because the old content
 923 * on disk will never be read, and seeking back there to write new content
 924 * later would only waste time away from clustering.
 925 *
 926 * NOTE: total_mapcount should not be relied upon by the caller if
 927 * reuse_swap_page() returns false, but it may be always overwritten
 928 * (see the other implementation for CONFIG_SWAP=n).
 929 */
 930bool reuse_swap_page(struct page *page, int *total_mapcount)
 931{
 932	int count;
 933
 934	VM_BUG_ON_PAGE(!PageLocked(page), page);
 935	if (unlikely(PageKsm(page)))
 936		return false;
 937	count = page_trans_huge_mapcount(page, total_mapcount);
 938	if (count <= 1 && PageSwapCache(page)) {
 939		count += page_swapcount(page);
 940		if (count == 1 && !PageWriteback(page)) {
 
 
 
 
 
 
 941			delete_from_swap_cache(page);
 942			SetPageDirty(page);
 
 
 
 
 
 
 
 
 
 
 
 943		}
 944	}
 
 945	return count <= 1;
 946}
 947
 948/*
 949 * If swap is getting full, or if there are no more mappings of this page,
 950 * then try_to_free_swap is called to free its swap space.
 951 */
 952int try_to_free_swap(struct page *page)
 953{
 954	VM_BUG_ON_PAGE(!PageLocked(page), page);
 955
 956	if (!PageSwapCache(page))
 957		return 0;
 958	if (PageWriteback(page))
 959		return 0;
 960	if (page_swapcount(page))
 961		return 0;
 962
 963	/*
 964	 * Once hibernation has begun to create its image of memory,
 965	 * there's a danger that one of the calls to try_to_free_swap()
 966	 * - most probably a call from __try_to_reclaim_swap() while
 967	 * hibernation is allocating its own swap pages for the image,
 968	 * but conceivably even a call from memory reclaim - will free
 969	 * the swap from a page which has already been recorded in the
 970	 * image as a clean swapcache page, and then reuse its swap for
 971	 * another page of the image.  On waking from hibernation, the
 972	 * original page might be freed under memory pressure, then
 973	 * later read back in from swap, now with the wrong data.
 974	 *
 975	 * Hibernation suspends storage while it is writing the image
 976	 * to disk so check that here.
 977	 */
 978	if (pm_suspended_storage())
 979		return 0;
 980
 
 981	delete_from_swap_cache(page);
 982	SetPageDirty(page);
 983	return 1;
 984}
 985
 986/*
 987 * Free the swap entry like above, but also try to
 988 * free the page cache entry if it is the last user.
 989 */
 990int free_swap_and_cache(swp_entry_t entry)
 991{
 992	struct swap_info_struct *p;
 993	struct page *page = NULL;
 
 994
 995	if (non_swap_entry(entry))
 996		return 1;
 997
 998	p = swap_info_get(entry);
 999	if (p) {
1000		if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
 
 
1001			page = find_get_page(swap_address_space(entry),
1002						entry.val);
1003			if (page && !trylock_page(page)) {
1004				put_page(page);
1005				page = NULL;
1006			}
1007		}
1008		spin_unlock(&p->lock);
1009	}
1010	if (page) {
1011		/*
1012		 * Not mapped elsewhere, or swap space full? Free it!
1013		 * Also recheck PageSwapCache now page is locked (above).
1014		 */
1015		if (PageSwapCache(page) && !PageWriteback(page) &&
1016		    (!page_mapped(page) || mem_cgroup_swap_full(page))) {
 
 
1017			delete_from_swap_cache(page);
1018			SetPageDirty(page);
1019		}
1020		unlock_page(page);
1021		put_page(page);
1022	}
1023	return p != NULL;
1024}
1025
1026#ifdef CONFIG_HIBERNATION
1027/*
1028 * Find the swap type that corresponds to given device (if any).
1029 *
1030 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1031 * from 0, in which the swap header is expected to be located.
1032 *
1033 * This is needed for the suspend to disk (aka swsusp).
1034 */
1035int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
1036{
1037	struct block_device *bdev = NULL;
1038	int type;
1039
1040	if (device)
1041		bdev = bdget(device);
1042
1043	spin_lock(&swap_lock);
1044	for (type = 0; type < nr_swapfiles; type++) {
1045		struct swap_info_struct *sis = swap_info[type];
1046
1047		if (!(sis->flags & SWP_WRITEOK))
1048			continue;
1049
1050		if (!bdev) {
1051			if (bdev_p)
1052				*bdev_p = bdgrab(sis->bdev);
1053
1054			spin_unlock(&swap_lock);
1055			return type;
1056		}
1057		if (bdev == sis->bdev) {
1058			struct swap_extent *se = &sis->first_swap_extent;
1059
1060			if (se->start_block == offset) {
1061				if (bdev_p)
1062					*bdev_p = bdgrab(sis->bdev);
1063
1064				spin_unlock(&swap_lock);
1065				bdput(bdev);
1066				return type;
1067			}
1068		}
1069	}
1070	spin_unlock(&swap_lock);
1071	if (bdev)
1072		bdput(bdev);
1073
1074	return -ENODEV;
1075}
1076
1077/*
1078 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1079 * corresponding to given index in swap_info (swap type).
1080 */
1081sector_t swapdev_block(int type, pgoff_t offset)
1082{
1083	struct block_device *bdev;
1084
1085	if ((unsigned int)type >= nr_swapfiles)
1086		return 0;
1087	if (!(swap_info[type]->flags & SWP_WRITEOK))
1088		return 0;
1089	return map_swap_entry(swp_entry(type, offset), &bdev);
1090}
1091
1092/*
1093 * Return either the total number of swap pages of given type, or the number
1094 * of free pages of that type (depending on @free)
1095 *
1096 * This is needed for software suspend
1097 */
1098unsigned int count_swap_pages(int type, int free)
1099{
1100	unsigned int n = 0;
1101
1102	spin_lock(&swap_lock);
1103	if ((unsigned int)type < nr_swapfiles) {
1104		struct swap_info_struct *sis = swap_info[type];
1105
1106		spin_lock(&sis->lock);
1107		if (sis->flags & SWP_WRITEOK) {
1108			n = sis->pages;
1109			if (free)
1110				n -= sis->inuse_pages;
1111		}
1112		spin_unlock(&sis->lock);
1113	}
1114	spin_unlock(&swap_lock);
1115	return n;
1116}
1117#endif /* CONFIG_HIBERNATION */
1118
1119static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1120{
1121	return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
1122}
1123
1124/*
1125 * No need to decide whether this PTE shares the swap entry with others,
1126 * just let do_wp_page work it out if a write is requested later - to
1127 * force COW, vm_page_prot omits write permission from any private vma.
1128 */
1129static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1130		unsigned long addr, swp_entry_t entry, struct page *page)
1131{
1132	struct page *swapcache;
1133	struct mem_cgroup *memcg;
1134	spinlock_t *ptl;
1135	pte_t *pte;
1136	int ret = 1;
1137
1138	swapcache = page;
1139	page = ksm_might_need_to_copy(page, vma, addr);
1140	if (unlikely(!page))
1141		return -ENOMEM;
1142
1143	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
1144				&memcg, false)) {
1145		ret = -ENOMEM;
1146		goto out_nolock;
1147	}
1148
1149	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1150	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1151		mem_cgroup_cancel_charge(page, memcg, false);
1152		ret = 0;
1153		goto out;
1154	}
1155
1156	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1157	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1158	get_page(page);
1159	set_pte_at(vma->vm_mm, addr, pte,
1160		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
1161	if (page == swapcache) {
1162		page_add_anon_rmap(page, vma, addr, false);
1163		mem_cgroup_commit_charge(page, memcg, true, false);
1164	} else { /* ksm created a completely new copy */
1165		page_add_new_anon_rmap(page, vma, addr, false);
1166		mem_cgroup_commit_charge(page, memcg, false, false);
1167		lru_cache_add_active_or_unevictable(page, vma);
1168	}
1169	swap_free(entry);
1170	/*
1171	 * Move the page to the active list so it is not
1172	 * immediately swapped out again after swapon.
1173	 */
1174	activate_page(page);
1175out:
1176	pte_unmap_unlock(pte, ptl);
1177out_nolock:
1178	if (page != swapcache) {
1179		unlock_page(page);
1180		put_page(page);
1181	}
1182	return ret;
1183}
1184
1185static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1186				unsigned long addr, unsigned long end,
1187				swp_entry_t entry, struct page *page)
1188{
1189	pte_t swp_pte = swp_entry_to_pte(entry);
1190	pte_t *pte;
1191	int ret = 0;
1192
1193	/*
1194	 * We don't actually need pte lock while scanning for swp_pte: since
1195	 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
1196	 * page table while we're scanning; though it could get zapped, and on
1197	 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
1198	 * of unmatched parts which look like swp_pte, so unuse_pte must
1199	 * recheck under pte lock.  Scanning without pte lock lets it be
1200	 * preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
1201	 */
1202	pte = pte_offset_map(pmd, addr);
1203	do {
1204		/*
1205		 * swapoff spends a _lot_ of time in this loop!
1206		 * Test inline before going to call unuse_pte.
1207		 */
1208		if (unlikely(pte_same_as_swp(*pte, swp_pte))) {
1209			pte_unmap(pte);
1210			ret = unuse_pte(vma, pmd, addr, entry, page);
1211			if (ret)
1212				goto out;
1213			pte = pte_offset_map(pmd, addr);
1214		}
1215	} while (pte++, addr += PAGE_SIZE, addr != end);
1216	pte_unmap(pte - 1);
1217out:
1218	return ret;
1219}
1220
1221static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1222				unsigned long addr, unsigned long end,
1223				swp_entry_t entry, struct page *page)
1224{
1225	pmd_t *pmd;
1226	unsigned long next;
1227	int ret;
1228
1229	pmd = pmd_offset(pud, addr);
1230	do {
 
1231		next = pmd_addr_end(addr, end);
1232		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1233			continue;
1234		ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
1235		if (ret)
1236			return ret;
1237	} while (pmd++, addr = next, addr != end);
1238	return 0;
1239}
1240
1241static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
1242				unsigned long addr, unsigned long end,
1243				swp_entry_t entry, struct page *page)
1244{
1245	pud_t *pud;
1246	unsigned long next;
1247	int ret;
1248
1249	pud = pud_offset(pgd, addr);
1250	do {
1251		next = pud_addr_end(addr, end);
1252		if (pud_none_or_clear_bad(pud))
1253			continue;
1254		ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
1255		if (ret)
1256			return ret;
1257	} while (pud++, addr = next, addr != end);
1258	return 0;
1259}
1260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1261static int unuse_vma(struct vm_area_struct *vma,
1262				swp_entry_t entry, struct page *page)
1263{
1264	pgd_t *pgd;
1265	unsigned long addr, end, next;
1266	int ret;
1267
1268	if (page_anon_vma(page)) {
1269		addr = page_address_in_vma(page, vma);
1270		if (addr == -EFAULT)
1271			return 0;
1272		else
1273			end = addr + PAGE_SIZE;
1274	} else {
1275		addr = vma->vm_start;
1276		end = vma->vm_end;
1277	}
1278
1279	pgd = pgd_offset(vma->vm_mm, addr);
1280	do {
1281		next = pgd_addr_end(addr, end);
1282		if (pgd_none_or_clear_bad(pgd))
1283			continue;
1284		ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
1285		if (ret)
1286			return ret;
1287	} while (pgd++, addr = next, addr != end);
1288	return 0;
1289}
1290
1291static int unuse_mm(struct mm_struct *mm,
1292				swp_entry_t entry, struct page *page)
1293{
1294	struct vm_area_struct *vma;
1295	int ret = 0;
1296
1297	if (!down_read_trylock(&mm->mmap_sem)) {
1298		/*
1299		 * Activate page so shrink_inactive_list is unlikely to unmap
1300		 * its ptes while lock is dropped, so swapoff can make progress.
1301		 */
1302		activate_page(page);
1303		unlock_page(page);
1304		down_read(&mm->mmap_sem);
1305		lock_page(page);
1306	}
1307	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1308		if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
1309			break;
 
1310	}
1311	up_read(&mm->mmap_sem);
1312	return (ret < 0)? ret: 0;
1313}
1314
1315/*
1316 * Scan swap_map (or frontswap_map if frontswap parameter is true)
1317 * from current position to next entry still in use.
1318 * Recycle to start on reaching the end, returning 0 when empty.
1319 */
1320static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1321					unsigned int prev, bool frontswap)
1322{
1323	unsigned int max = si->max;
1324	unsigned int i = prev;
1325	unsigned char count;
1326
1327	/*
1328	 * No need for swap_lock here: we're just looking
1329	 * for whether an entry is in use, not modifying it; false
1330	 * hits are okay, and sys_swapoff() has already prevented new
1331	 * allocations from this area (while holding swap_lock).
1332	 */
1333	for (;;) {
1334		if (++i >= max) {
1335			if (!prev) {
1336				i = 0;
1337				break;
1338			}
1339			/*
1340			 * No entries in use at top of swap_map,
1341			 * loop back to start and recheck there.
1342			 */
1343			max = prev + 1;
1344			prev = 0;
1345			i = 1;
1346		}
1347		if (frontswap) {
1348			if (frontswap_test(si, i))
1349				break;
1350			else
1351				continue;
1352		}
1353		count = READ_ONCE(si->swap_map[i]);
1354		if (count && swap_count(count) != SWAP_MAP_BAD)
1355			break;
 
 
 
1356	}
1357	return i;
1358}
1359
1360/*
1361 * We completely avoid races by reading each swap page in advance,
1362 * and then search for the process using it.  All the necessary
1363 * page table adjustments can then be made atomically.
1364 *
1365 * if the boolean frontswap is true, only unuse pages_to_unuse pages;
1366 * pages_to_unuse==0 means all pages; ignored if frontswap is false
1367 */
1368int try_to_unuse(unsigned int type, bool frontswap,
1369		 unsigned long pages_to_unuse)
1370{
1371	struct swap_info_struct *si = swap_info[type];
1372	struct mm_struct *start_mm;
1373	volatile unsigned char *swap_map; /* swap_map is accessed without
1374					   * locking. Mark it as volatile
1375					   * to prevent compiler doing
1376					   * something odd.
1377					   */
1378	unsigned char swcount;
1379	struct page *page;
1380	swp_entry_t entry;
1381	unsigned int i = 0;
1382	int retval = 0;
1383
1384	/*
1385	 * When searching mms for an entry, a good strategy is to
1386	 * start at the first mm we freed the previous entry from
1387	 * (though actually we don't notice whether we or coincidence
1388	 * freed the entry).  Initialize this start_mm with a hold.
1389	 *
1390	 * A simpler strategy would be to start at the last mm we
1391	 * freed the previous entry from; but that would take less
1392	 * advantage of mmlist ordering, which clusters forked mms
1393	 * together, child after parent.  If we race with dup_mmap(), we
1394	 * prefer to resolve parent before child, lest we miss entries
1395	 * duplicated after we scanned child: using last mm would invert
1396	 * that.
1397	 */
1398	start_mm = &init_mm;
1399	atomic_inc(&init_mm.mm_users);
1400
1401	/*
1402	 * Keep on scanning until all entries have gone.  Usually,
1403	 * one pass through swap_map is enough, but not necessarily:
1404	 * there are races when an instance of an entry might be missed.
1405	 */
1406	while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
1407		if (signal_pending(current)) {
1408			retval = -EINTR;
1409			break;
1410		}
1411
1412		/*
1413		 * Get a page for the entry, using the existing swap
1414		 * cache page if there is one.  Otherwise, get a clean
1415		 * page and read the swap into it.
1416		 */
1417		swap_map = &si->swap_map[i];
1418		entry = swp_entry(type, i);
1419		page = read_swap_cache_async(entry,
1420					GFP_HIGHUSER_MOVABLE, NULL, 0);
1421		if (!page) {
1422			/*
1423			 * Either swap_duplicate() failed because entry
1424			 * has been freed independently, and will not be
1425			 * reused since sys_swapoff() already disabled
1426			 * allocation from here, or alloc_page() failed.
1427			 */
1428			swcount = *swap_map;
1429			/*
1430			 * We don't hold lock here, so the swap entry could be
1431			 * SWAP_MAP_BAD (when the cluster is discarding).
1432			 * Instead of fail out, We can just skip the swap
1433			 * entry because swapoff will wait for discarding
1434			 * finish anyway.
1435			 */
1436			if (!swcount || swcount == SWAP_MAP_BAD)
1437				continue;
1438			retval = -ENOMEM;
1439			break;
1440		}
1441
1442		/*
1443		 * Don't hold on to start_mm if it looks like exiting.
1444		 */
1445		if (atomic_read(&start_mm->mm_users) == 1) {
1446			mmput(start_mm);
1447			start_mm = &init_mm;
1448			atomic_inc(&init_mm.mm_users);
1449		}
1450
1451		/*
1452		 * Wait for and lock page.  When do_swap_page races with
1453		 * try_to_unuse, do_swap_page can handle the fault much
1454		 * faster than try_to_unuse can locate the entry.  This
1455		 * apparently redundant "wait_on_page_locked" lets try_to_unuse
1456		 * defer to do_swap_page in such a case - in some tests,
1457		 * do_swap_page and try_to_unuse repeatedly compete.
1458		 */
1459		wait_on_page_locked(page);
1460		wait_on_page_writeback(page);
1461		lock_page(page);
1462		wait_on_page_writeback(page);
1463
1464		/*
1465		 * Remove all references to entry.
1466		 */
1467		swcount = *swap_map;
1468		if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1469			retval = shmem_unuse(entry, page);
1470			/* page has already been unlocked and released */
1471			if (retval < 0)
1472				break;
1473			continue;
1474		}
1475		if (swap_count(swcount) && start_mm != &init_mm)
1476			retval = unuse_mm(start_mm, entry, page);
1477
1478		if (swap_count(*swap_map)) {
1479			int set_start_mm = (*swap_map >= swcount);
1480			struct list_head *p = &start_mm->mmlist;
1481			struct mm_struct *new_start_mm = start_mm;
1482			struct mm_struct *prev_mm = start_mm;
1483			struct mm_struct *mm;
1484
1485			atomic_inc(&new_start_mm->mm_users);
1486			atomic_inc(&prev_mm->mm_users);
1487			spin_lock(&mmlist_lock);
1488			while (swap_count(*swap_map) && !retval &&
1489					(p = p->next) != &start_mm->mmlist) {
1490				mm = list_entry(p, struct mm_struct, mmlist);
1491				if (!atomic_inc_not_zero(&mm->mm_users))
1492					continue;
1493				spin_unlock(&mmlist_lock);
1494				mmput(prev_mm);
1495				prev_mm = mm;
1496
1497				cond_resched();
1498
1499				swcount = *swap_map;
1500				if (!swap_count(swcount)) /* any usage ? */
1501					;
1502				else if (mm == &init_mm)
1503					set_start_mm = 1;
1504				else
1505					retval = unuse_mm(mm, entry, page);
1506
1507				if (set_start_mm && *swap_map < swcount) {
1508					mmput(new_start_mm);
1509					atomic_inc(&mm->mm_users);
1510					new_start_mm = mm;
1511					set_start_mm = 0;
1512				}
1513				spin_lock(&mmlist_lock);
1514			}
1515			spin_unlock(&mmlist_lock);
1516			mmput(prev_mm);
1517			mmput(start_mm);
1518			start_mm = new_start_mm;
1519		}
1520		if (retval) {
1521			unlock_page(page);
1522			put_page(page);
1523			break;
1524		}
1525
1526		/*
1527		 * If a reference remains (rare), we would like to leave
1528		 * the page in the swap cache; but try_to_unmap could
1529		 * then re-duplicate the entry once we drop page lock,
1530		 * so we might loop indefinitely; also, that page could
1531		 * not be swapped out to other storage meanwhile.  So:
1532		 * delete from cache even if there's another reference,
1533		 * after ensuring that the data has been saved to disk -
1534		 * since if the reference remains (rarer), it will be
1535		 * read from disk into another page.  Splitting into two
1536		 * pages would be incorrect if swap supported "shared
1537		 * private" pages, but they are handled by tmpfs files.
1538		 *
1539		 * Given how unuse_vma() targets one particular offset
1540		 * in an anon_vma, once the anon_vma has been determined,
1541		 * this splitting happens to be just what is needed to
1542		 * handle where KSM pages have been swapped out: re-reading
1543		 * is unnecessarily slow, but we can fix that later on.
1544		 */
1545		if (swap_count(*swap_map) &&
1546		     PageDirty(page) && PageSwapCache(page)) {
1547			struct writeback_control wbc = {
1548				.sync_mode = WB_SYNC_NONE,
1549			};
1550
1551			swap_writepage(page, &wbc);
1552			lock_page(page);
1553			wait_on_page_writeback(page);
1554		}
1555
1556		/*
1557		 * It is conceivable that a racing task removed this page from
1558		 * swap cache just before we acquired the page lock at the top,
1559		 * or while we dropped it in unuse_mm().  The page might even
1560		 * be back in swap cache on another swap area: that we must not
1561		 * delete, since it may not have been written out to swap yet.
1562		 */
1563		if (PageSwapCache(page) &&
1564		    likely(page_private(page) == entry.val))
1565			delete_from_swap_cache(page);
 
1566
1567		/*
1568		 * So we could skip searching mms once swap count went
1569		 * to 1, we did not mark any present ptes as dirty: must
1570		 * mark page dirty so shrink_page_list will preserve it.
1571		 */
1572		SetPageDirty(page);
1573		unlock_page(page);
1574		put_page(page);
1575
1576		/*
1577		 * Make sure that we aren't completely killing
1578		 * interactive performance.
1579		 */
1580		cond_resched();
1581		if (frontswap && pages_to_unuse > 0) {
1582			if (!--pages_to_unuse)
1583				break;
1584		}
1585	}
1586
1587	mmput(start_mm);
1588	return retval;
1589}
1590
1591/*
1592 * After a successful try_to_unuse, if no swap is now in use, we know
1593 * we can empty the mmlist.  swap_lock must be held on entry and exit.
1594 * Note that mmlist_lock nests inside swap_lock, and an mm must be
1595 * added to the mmlist just after page_duplicate - before would be racy.
1596 */
1597static void drain_mmlist(void)
1598{
1599	struct list_head *p, *next;
1600	unsigned int type;
1601
1602	for (type = 0; type < nr_swapfiles; type++)
1603		if (swap_info[type]->inuse_pages)
1604			return;
1605	spin_lock(&mmlist_lock);
1606	list_for_each_safe(p, next, &init_mm.mmlist)
1607		list_del_init(p);
1608	spin_unlock(&mmlist_lock);
1609}
1610
1611/*
1612 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1613 * corresponds to page offset for the specified swap entry.
1614 * Note that the type of this function is sector_t, but it returns page offset
1615 * into the bdev, not sector offset.
1616 */
1617static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1618{
1619	struct swap_info_struct *sis;
1620	struct swap_extent *start_se;
1621	struct swap_extent *se;
1622	pgoff_t offset;
1623
1624	sis = swap_info[swp_type(entry)];
1625	*bdev = sis->bdev;
1626
1627	offset = swp_offset(entry);
1628	start_se = sis->curr_swap_extent;
1629	se = start_se;
1630
1631	for ( ; ; ) {
1632		if (se->start_page <= offset &&
1633				offset < (se->start_page + se->nr_pages)) {
1634			return se->start_block + (offset - se->start_page);
1635		}
1636		se = list_next_entry(se, list);
1637		sis->curr_swap_extent = se;
1638		BUG_ON(se == start_se);		/* It *must* be present */
1639	}
1640}
1641
1642/*
1643 * Returns the page offset into bdev for the specified page's swap entry.
1644 */
1645sector_t map_swap_page(struct page *page, struct block_device **bdev)
1646{
1647	swp_entry_t entry;
1648	entry.val = page_private(page);
1649	return map_swap_entry(entry, bdev);
1650}
1651
1652/*
1653 * Free all of a swapdev's extent information
1654 */
1655static void destroy_swap_extents(struct swap_info_struct *sis)
1656{
1657	while (!list_empty(&sis->first_swap_extent.list)) {
1658		struct swap_extent *se;
1659
1660		se = list_first_entry(&sis->first_swap_extent.list,
1661				struct swap_extent, list);
1662		list_del(&se->list);
1663		kfree(se);
1664	}
1665
1666	if (sis->flags & SWP_FILE) {
1667		struct file *swap_file = sis->swap_file;
1668		struct address_space *mapping = swap_file->f_mapping;
1669
1670		sis->flags &= ~SWP_FILE;
1671		mapping->a_ops->swap_deactivate(swap_file);
1672	}
1673}
1674
1675/*
1676 * Add a block range (and the corresponding page range) into this swapdev's
1677 * extent list.  The extent list is kept sorted in page order.
1678 *
1679 * This function rather assumes that it is called in ascending page order.
1680 */
1681int
1682add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1683		unsigned long nr_pages, sector_t start_block)
1684{
1685	struct swap_extent *se;
1686	struct swap_extent *new_se;
1687	struct list_head *lh;
1688
1689	if (start_page == 0) {
1690		se = &sis->first_swap_extent;
1691		sis->curr_swap_extent = se;
1692		se->start_page = 0;
1693		se->nr_pages = nr_pages;
1694		se->start_block = start_block;
1695		return 1;
1696	} else {
1697		lh = sis->first_swap_extent.list.prev;	/* Highest extent */
1698		se = list_entry(lh, struct swap_extent, list);
1699		BUG_ON(se->start_page + se->nr_pages != start_page);
1700		if (se->start_block + se->nr_pages == start_block) {
1701			/* Merge it */
1702			se->nr_pages += nr_pages;
1703			return 0;
1704		}
1705	}
1706
1707	/*
1708	 * No merge.  Insert a new extent, preserving ordering.
1709	 */
1710	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1711	if (new_se == NULL)
1712		return -ENOMEM;
1713	new_se->start_page = start_page;
1714	new_se->nr_pages = nr_pages;
1715	new_se->start_block = start_block;
1716
1717	list_add_tail(&new_se->list, &sis->first_swap_extent.list);
1718	return 1;
1719}
1720
1721/*
1722 * A `swap extent' is a simple thing which maps a contiguous range of pages
1723 * onto a contiguous range of disk blocks.  An ordered list of swap extents
1724 * is built at swapon time and is then used at swap_writepage/swap_readpage
1725 * time for locating where on disk a page belongs.
1726 *
1727 * If the swapfile is an S_ISBLK block device, a single extent is installed.
1728 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1729 * swap files identically.
1730 *
1731 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1732 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
1733 * swapfiles are handled *identically* after swapon time.
1734 *
1735 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1736 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
1737 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1738 * requirements, they are simply tossed out - we will never use those blocks
1739 * for swapping.
1740 *
1741 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This
1742 * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1743 * which will scribble on the fs.
1744 *
1745 * The amount of disk space which a single swap extent represents varies.
1746 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
1747 * extents in the list.  To avoid much list walking, we cache the previous
1748 * search location in `curr_swap_extent', and start new searches from there.
1749 * This is extremely effective.  The average number of iterations in
1750 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
1751 */
1752static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1753{
1754	struct file *swap_file = sis->swap_file;
1755	struct address_space *mapping = swap_file->f_mapping;
1756	struct inode *inode = mapping->host;
1757	int ret;
1758
1759	if (S_ISBLK(inode->i_mode)) {
1760		ret = add_swap_extent(sis, 0, sis->max, 0);
1761		*span = sis->pages;
1762		return ret;
1763	}
1764
1765	if (mapping->a_ops->swap_activate) {
1766		ret = mapping->a_ops->swap_activate(sis, swap_file, span);
1767		if (!ret) {
1768			sis->flags |= SWP_FILE;
1769			ret = add_swap_extent(sis, 0, sis->max, 0);
1770			*span = sis->pages;
1771		}
1772		return ret;
1773	}
1774
1775	return generic_swapfile_activate(sis, swap_file, span);
1776}
1777
 
 
 
 
 
 
 
 
 
 
 
 
1778static void _enable_swap_info(struct swap_info_struct *p, int prio,
1779				unsigned char *swap_map,
1780				struct swap_cluster_info *cluster_info)
1781{
 
 
1782	if (prio >= 0)
1783		p->prio = prio;
1784	else
1785		p->prio = --least_priority;
1786	/*
1787	 * the plist prio is negated because plist ordering is
1788	 * low-to-high, while swap ordering is high-to-low
1789	 */
1790	p->list.prio = -p->prio;
1791	p->avail_list.prio = -p->prio;
 
 
 
 
 
 
 
 
 
1792	p->swap_map = swap_map;
1793	p->cluster_info = cluster_info;
1794	p->flags |= SWP_WRITEOK;
1795	atomic_long_add(p->pages, &nr_swap_pages);
1796	total_swap_pages += p->pages;
1797
1798	assert_spin_locked(&swap_lock);
1799	/*
1800	 * both lists are plists, and thus priority ordered.
1801	 * swap_active_head needs to be priority ordered for swapoff(),
1802	 * which on removal of any swap_info_struct with an auto-assigned
1803	 * (i.e. negative) priority increments the auto-assigned priority
1804	 * of any lower-priority swap_info_structs.
1805	 * swap_avail_head needs to be priority ordered for get_swap_page(),
1806	 * which allocates swap pages from the highest available priority
1807	 * swap_info_struct.
1808	 */
1809	plist_add(&p->list, &swap_active_head);
1810	spin_lock(&swap_avail_lock);
1811	plist_add(&p->avail_list, &swap_avail_head);
1812	spin_unlock(&swap_avail_lock);
1813}
1814
1815static void enable_swap_info(struct swap_info_struct *p, int prio,
1816				unsigned char *swap_map,
1817				struct swap_cluster_info *cluster_info,
1818				unsigned long *frontswap_map)
1819{
1820	frontswap_init(p->type, frontswap_map);
1821	spin_lock(&swap_lock);
1822	spin_lock(&p->lock);
1823	 _enable_swap_info(p, prio, swap_map, cluster_info);
1824	spin_unlock(&p->lock);
1825	spin_unlock(&swap_lock);
1826}
1827
1828static void reinsert_swap_info(struct swap_info_struct *p)
1829{
1830	spin_lock(&swap_lock);
1831	spin_lock(&p->lock);
1832	_enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
1833	spin_unlock(&p->lock);
1834	spin_unlock(&swap_lock);
1835}
1836
 
 
 
 
 
 
 
 
 
 
 
1837SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1838{
1839	struct swap_info_struct *p = NULL;
1840	unsigned char *swap_map;
1841	struct swap_cluster_info *cluster_info;
1842	unsigned long *frontswap_map;
1843	struct file *swap_file, *victim;
1844	struct address_space *mapping;
1845	struct inode *inode;
1846	struct filename *pathname;
1847	int err, found = 0;
1848	unsigned int old_block_size;
1849
1850	if (!capable(CAP_SYS_ADMIN))
1851		return -EPERM;
1852
1853	BUG_ON(!current->mm);
1854
1855	pathname = getname(specialfile);
1856	if (IS_ERR(pathname))
1857		return PTR_ERR(pathname);
1858
1859	victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
1860	err = PTR_ERR(victim);
1861	if (IS_ERR(victim))
1862		goto out;
1863
1864	mapping = victim->f_mapping;
1865	spin_lock(&swap_lock);
1866	plist_for_each_entry(p, &swap_active_head, list) {
1867		if (p->flags & SWP_WRITEOK) {
1868			if (p->swap_file->f_mapping == mapping) {
1869				found = 1;
1870				break;
1871			}
1872		}
1873	}
1874	if (!found) {
1875		err = -EINVAL;
1876		spin_unlock(&swap_lock);
1877		goto out_dput;
1878	}
1879	if (!security_vm_enough_memory_mm(current->mm, p->pages))
1880		vm_unacct_memory(p->pages);
1881	else {
1882		err = -ENOMEM;
1883		spin_unlock(&swap_lock);
1884		goto out_dput;
1885	}
1886	spin_lock(&swap_avail_lock);
1887	plist_del(&p->avail_list, &swap_avail_head);
1888	spin_unlock(&swap_avail_lock);
1889	spin_lock(&p->lock);
1890	if (p->prio < 0) {
1891		struct swap_info_struct *si = p;
 
1892
1893		plist_for_each_entry_continue(si, &swap_active_head, list) {
1894			si->prio++;
1895			si->list.prio--;
1896			si->avail_list.prio--;
 
 
 
1897		}
1898		least_priority++;
1899	}
1900	plist_del(&p->list, &swap_active_head);
1901	atomic_long_sub(p->pages, &nr_swap_pages);
1902	total_swap_pages -= p->pages;
1903	p->flags &= ~SWP_WRITEOK;
1904	spin_unlock(&p->lock);
1905	spin_unlock(&swap_lock);
1906
 
 
1907	set_current_oom_origin();
1908	err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
1909	clear_current_oom_origin();
1910
1911	if (err) {
1912		/* re-insert swap space back into swap_list */
1913		reinsert_swap_info(p);
 
1914		goto out_dput;
1915	}
1916
 
 
1917	flush_work(&p->discard_work);
1918
1919	destroy_swap_extents(p);
1920	if (p->flags & SWP_CONTINUED)
1921		free_swap_count_continuations(p);
1922
 
 
 
1923	mutex_lock(&swapon_mutex);
1924	spin_lock(&swap_lock);
1925	spin_lock(&p->lock);
1926	drain_mmlist();
1927
1928	/* wait for anyone still in scan_swap_map */
1929	p->highest_bit = 0;		/* cuts scans short */
1930	while (p->flags >= SWP_SCANNING) {
1931		spin_unlock(&p->lock);
1932		spin_unlock(&swap_lock);
1933		schedule_timeout_uninterruptible(1);
1934		spin_lock(&swap_lock);
1935		spin_lock(&p->lock);
1936	}
1937
1938	swap_file = p->swap_file;
1939	old_block_size = p->old_block_size;
1940	p->swap_file = NULL;
1941	p->max = 0;
1942	swap_map = p->swap_map;
1943	p->swap_map = NULL;
1944	cluster_info = p->cluster_info;
1945	p->cluster_info = NULL;
1946	frontswap_map = frontswap_map_get(p);
1947	spin_unlock(&p->lock);
1948	spin_unlock(&swap_lock);
1949	frontswap_invalidate_area(p->type);
1950	frontswap_map_set(p, NULL);
1951	mutex_unlock(&swapon_mutex);
1952	free_percpu(p->percpu_cluster);
1953	p->percpu_cluster = NULL;
1954	vfree(swap_map);
1955	vfree(cluster_info);
1956	vfree(frontswap_map);
1957	/* Destroy swap account information */
1958	swap_cgroup_swapoff(p->type);
 
1959
1960	inode = mapping->host;
1961	if (S_ISBLK(inode->i_mode)) {
1962		struct block_device *bdev = I_BDEV(inode);
1963		set_blocksize(bdev, old_block_size);
1964		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1965	} else {
1966		inode_lock(inode);
1967		inode->i_flags &= ~S_SWAPFILE;
1968		inode_unlock(inode);
1969	}
1970	filp_close(swap_file, NULL);
1971
1972	/*
1973	 * Clear the SWP_USED flag after all resources are freed so that swapon
1974	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
1975	 * not hold p->lock after we cleared its SWP_WRITEOK.
1976	 */
1977	spin_lock(&swap_lock);
1978	p->flags = 0;
1979	spin_unlock(&swap_lock);
1980
1981	err = 0;
1982	atomic_inc(&proc_poll_event);
1983	wake_up_interruptible(&proc_poll_wait);
1984
1985out_dput:
1986	filp_close(victim, NULL);
1987out:
1988	putname(pathname);
1989	return err;
1990}
1991
1992#ifdef CONFIG_PROC_FS
1993static unsigned swaps_poll(struct file *file, poll_table *wait)
1994{
1995	struct seq_file *seq = file->private_data;
1996
1997	poll_wait(file, &proc_poll_wait, wait);
1998
1999	if (seq->poll_event != atomic_read(&proc_poll_event)) {
2000		seq->poll_event = atomic_read(&proc_poll_event);
2001		return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
2002	}
2003
2004	return POLLIN | POLLRDNORM;
2005}
2006
2007/* iterator */
2008static void *swap_start(struct seq_file *swap, loff_t *pos)
2009{
2010	struct swap_info_struct *si;
2011	int type;
2012	loff_t l = *pos;
2013
2014	mutex_lock(&swapon_mutex);
2015
2016	if (!l)
2017		return SEQ_START_TOKEN;
2018
2019	for (type = 0; type < nr_swapfiles; type++) {
2020		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
2021		si = swap_info[type];
2022		if (!(si->flags & SWP_USED) || !si->swap_map)
2023			continue;
2024		if (!--l)
2025			return si;
2026	}
2027
2028	return NULL;
2029}
2030
2031static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2032{
2033	struct swap_info_struct *si = v;
2034	int type;
2035
2036	if (v == SEQ_START_TOKEN)
2037		type = 0;
2038	else
2039		type = si->type + 1;
2040
2041	for (; type < nr_swapfiles; type++) {
2042		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
2043		si = swap_info[type];
2044		if (!(si->flags & SWP_USED) || !si->swap_map)
2045			continue;
2046		++*pos;
2047		return si;
2048	}
2049
2050	return NULL;
2051}
2052
2053static void swap_stop(struct seq_file *swap, void *v)
2054{
2055	mutex_unlock(&swapon_mutex);
2056}
2057
2058static int swap_show(struct seq_file *swap, void *v)
2059{
2060	struct swap_info_struct *si = v;
2061	struct file *file;
2062	int len;
2063
2064	if (si == SEQ_START_TOKEN) {
2065		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
2066		return 0;
2067	}
2068
2069	file = si->swap_file;
2070	len = seq_file_path(swap, file, " \t\n\\");
2071	seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
2072			len < 40 ? 40 - len : 1, " ",
2073			S_ISBLK(file_inode(file)->i_mode) ?
2074				"partition" : "file\t",
2075			si->pages << (PAGE_SHIFT - 10),
2076			si->inuse_pages << (PAGE_SHIFT - 10),
2077			si->prio);
2078	return 0;
2079}
2080
2081static const struct seq_operations swaps_op = {
2082	.start =	swap_start,
2083	.next =		swap_next,
2084	.stop =		swap_stop,
2085	.show =		swap_show
2086};
2087
2088static int swaps_open(struct inode *inode, struct file *file)
2089{
2090	struct seq_file *seq;
2091	int ret;
2092
2093	ret = seq_open(file, &swaps_op);
2094	if (ret)
2095		return ret;
2096
2097	seq = file->private_data;
2098	seq->poll_event = atomic_read(&proc_poll_event);
2099	return 0;
2100}
2101
2102static const struct file_operations proc_swaps_operations = {
2103	.open		= swaps_open,
2104	.read		= seq_read,
2105	.llseek		= seq_lseek,
2106	.release	= seq_release,
2107	.poll		= swaps_poll,
2108};
2109
2110static int __init procswaps_init(void)
2111{
2112	proc_create("swaps", 0, NULL, &proc_swaps_operations);
2113	return 0;
2114}
2115__initcall(procswaps_init);
2116#endif /* CONFIG_PROC_FS */
2117
2118#ifdef MAX_SWAPFILES_CHECK
2119static int __init max_swapfiles_check(void)
2120{
2121	MAX_SWAPFILES_CHECK();
2122	return 0;
2123}
2124late_initcall(max_swapfiles_check);
2125#endif
2126
2127static struct swap_info_struct *alloc_swap_info(void)
2128{
2129	struct swap_info_struct *p;
2130	unsigned int type;
 
2131
2132	p = kzalloc(sizeof(*p), GFP_KERNEL);
2133	if (!p)
2134		return ERR_PTR(-ENOMEM);
2135
2136	spin_lock(&swap_lock);
2137	for (type = 0; type < nr_swapfiles; type++) {
2138		if (!(swap_info[type]->flags & SWP_USED))
2139			break;
2140	}
2141	if (type >= MAX_SWAPFILES) {
2142		spin_unlock(&swap_lock);
2143		kfree(p);
2144		return ERR_PTR(-EPERM);
2145	}
2146	if (type >= nr_swapfiles) {
2147		p->type = type;
2148		swap_info[type] = p;
2149		/*
2150		 * Write swap_info[type] before nr_swapfiles, in case a
2151		 * racing procfs swap_start() or swap_next() is reading them.
2152		 * (We never shrink nr_swapfiles, we never free this entry.)
2153		 */
2154		smp_wmb();
2155		nr_swapfiles++;
2156	} else {
2157		kfree(p);
2158		p = swap_info[type];
2159		/*
2160		 * Do not memset this entry: a racing procfs swap_next()
2161		 * would be relying on p->type to remain valid.
2162		 */
2163	}
2164	INIT_LIST_HEAD(&p->first_swap_extent.list);
2165	plist_node_init(&p->list, 0);
2166	plist_node_init(&p->avail_list, 0);
 
2167	p->flags = SWP_USED;
2168	spin_unlock(&swap_lock);
2169	spin_lock_init(&p->lock);
 
2170
2171	return p;
2172}
2173
2174static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2175{
2176	int error;
2177
2178	if (S_ISBLK(inode->i_mode)) {
2179		p->bdev = bdgrab(I_BDEV(inode));
2180		error = blkdev_get(p->bdev,
2181				   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2182		if (error < 0) {
2183			p->bdev = NULL;
2184			return error;
2185		}
2186		p->old_block_size = block_size(p->bdev);
2187		error = set_blocksize(p->bdev, PAGE_SIZE);
2188		if (error < 0)
2189			return error;
2190		p->flags |= SWP_BLKDEV;
2191	} else if (S_ISREG(inode->i_mode)) {
2192		p->bdev = inode->i_sb->s_bdev;
2193		inode_lock(inode);
2194		if (IS_SWAPFILE(inode))
2195			return -EBUSY;
2196	} else
2197		return -EINVAL;
2198
2199	return 0;
2200}
2201
2202static unsigned long read_swap_header(struct swap_info_struct *p,
2203					union swap_header *swap_header,
2204					struct inode *inode)
2205{
2206	int i;
2207	unsigned long maxpages;
2208	unsigned long swapfilepages;
2209	unsigned long last_page;
2210
2211	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2212		pr_err("Unable to find swap-space signature\n");
2213		return 0;
2214	}
2215
2216	/* swap partition endianess hack... */
2217	if (swab32(swap_header->info.version) == 1) {
2218		swab32s(&swap_header->info.version);
2219		swab32s(&swap_header->info.last_page);
2220		swab32s(&swap_header->info.nr_badpages);
 
 
2221		for (i = 0; i < swap_header->info.nr_badpages; i++)
2222			swab32s(&swap_header->info.badpages[i]);
2223	}
2224	/* Check the swap header's sub-version */
2225	if (swap_header->info.version != 1) {
2226		pr_warn("Unable to handle swap header version %d\n",
2227			swap_header->info.version);
2228		return 0;
2229	}
2230
2231	p->lowest_bit  = 1;
2232	p->cluster_next = 1;
2233	p->cluster_nr = 0;
2234
2235	/*
2236	 * Find out how many pages are allowed for a single swap
2237	 * device. There are two limiting factors: 1) the number
2238	 * of bits for the swap offset in the swp_entry_t type, and
2239	 * 2) the number of bits in the swap pte as defined by the
2240	 * different architectures. In order to find the
2241	 * largest possible bit mask, a swap entry with swap type 0
2242	 * and swap offset ~0UL is created, encoded to a swap pte,
2243	 * decoded to a swp_entry_t again, and finally the swap
2244	 * offset is extracted. This will mask all the bits from
2245	 * the initial ~0UL mask that can't be encoded in either
2246	 * the swp_entry_t or the architecture definition of a
2247	 * swap pte.
2248	 */
2249	maxpages = swp_offset(pte_to_swp_entry(
2250			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2251	last_page = swap_header->info.last_page;
 
 
 
 
2252	if (last_page > maxpages) {
2253		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2254			maxpages << (PAGE_SHIFT - 10),
2255			last_page << (PAGE_SHIFT - 10));
2256	}
2257	if (maxpages > last_page) {
2258		maxpages = last_page + 1;
2259		/* p->max is an unsigned int: don't overflow it */
2260		if ((unsigned int)maxpages == 0)
2261			maxpages = UINT_MAX;
2262	}
2263	p->highest_bit = maxpages - 1;
2264
2265	if (!maxpages)
2266		return 0;
2267	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2268	if (swapfilepages && maxpages > swapfilepages) {
2269		pr_warn("Swap area shorter than signature indicates\n");
2270		return 0;
2271	}
2272	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
2273		return 0;
2274	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2275		return 0;
2276
2277	return maxpages;
2278}
2279
 
 
 
 
 
 
 
2280static int setup_swap_map_and_extents(struct swap_info_struct *p,
2281					union swap_header *swap_header,
2282					unsigned char *swap_map,
2283					struct swap_cluster_info *cluster_info,
2284					unsigned long maxpages,
2285					sector_t *span)
2286{
2287	int i;
2288	unsigned int nr_good_pages;
2289	int nr_extents;
2290	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
2291	unsigned long idx = p->cluster_next / SWAPFILE_CLUSTER;
 
2292
2293	nr_good_pages = maxpages - 1;	/* omit header page */
2294
2295	cluster_set_null(&p->free_cluster_head);
2296	cluster_set_null(&p->free_cluster_tail);
2297	cluster_set_null(&p->discard_cluster_head);
2298	cluster_set_null(&p->discard_cluster_tail);
2299
2300	for (i = 0; i < swap_header->info.nr_badpages; i++) {
2301		unsigned int page_nr = swap_header->info.badpages[i];
2302		if (page_nr == 0 || page_nr > swap_header->info.last_page)
2303			return -EINVAL;
2304		if (page_nr < maxpages) {
2305			swap_map[page_nr] = SWAP_MAP_BAD;
2306			nr_good_pages--;
2307			/*
2308			 * Haven't marked the cluster free yet, no list
2309			 * operation involved
2310			 */
2311			inc_cluster_info_page(p, cluster_info, page_nr);
2312		}
2313	}
2314
2315	/* Haven't marked the cluster free yet, no list operation involved */
2316	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
2317		inc_cluster_info_page(p, cluster_info, i);
2318
2319	if (nr_good_pages) {
2320		swap_map[0] = SWAP_MAP_BAD;
2321		/*
2322		 * Not mark the cluster free yet, no list
2323		 * operation involved
2324		 */
2325		inc_cluster_info_page(p, cluster_info, 0);
2326		p->max = maxpages;
2327		p->pages = nr_good_pages;
2328		nr_extents = setup_swap_extents(p, span);
2329		if (nr_extents < 0)
2330			return nr_extents;
2331		nr_good_pages = p->pages;
2332	}
2333	if (!nr_good_pages) {
2334		pr_warn("Empty swap-file\n");
2335		return -EINVAL;
2336	}
2337
2338	if (!cluster_info)
2339		return nr_extents;
2340
2341	for (i = 0; i < nr_clusters; i++) {
2342		if (!cluster_count(&cluster_info[idx])) {
 
 
 
 
 
 
 
 
 
 
 
2343			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
2344			if (cluster_is_null(&p->free_cluster_head)) {
2345				cluster_set_next_flag(&p->free_cluster_head,
2346								idx, 0);
2347				cluster_set_next_flag(&p->free_cluster_tail,
2348								idx, 0);
2349			} else {
2350				unsigned int tail;
2351
2352				tail = cluster_next(&p->free_cluster_tail);
2353				cluster_set_next(&cluster_info[tail], idx);
2354				cluster_set_next_flag(&p->free_cluster_tail,
2355								idx, 0);
2356			}
2357		}
2358		idx++;
2359		if (idx == nr_clusters)
2360			idx = 0;
2361	}
2362	return nr_extents;
2363}
2364
2365/*
2366 * Helper to sys_swapon determining if a given swap
2367 * backing device queue supports DISCARD operations.
2368 */
2369static bool swap_discardable(struct swap_info_struct *si)
2370{
2371	struct request_queue *q = bdev_get_queue(si->bdev);
2372
2373	if (!q || !blk_queue_discard(q))
2374		return false;
2375
2376	return true;
2377}
2378
2379SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2380{
2381	struct swap_info_struct *p;
2382	struct filename *name;
2383	struct file *swap_file = NULL;
2384	struct address_space *mapping;
2385	int prio;
2386	int error;
2387	union swap_header *swap_header;
2388	int nr_extents;
2389	sector_t span;
2390	unsigned long maxpages;
2391	unsigned char *swap_map = NULL;
2392	struct swap_cluster_info *cluster_info = NULL;
2393	unsigned long *frontswap_map = NULL;
2394	struct page *page = NULL;
2395	struct inode *inode = NULL;
 
2396
2397	if (swap_flags & ~SWAP_FLAGS_VALID)
2398		return -EINVAL;
2399
2400	if (!capable(CAP_SYS_ADMIN))
2401		return -EPERM;
2402
 
 
 
2403	p = alloc_swap_info();
2404	if (IS_ERR(p))
2405		return PTR_ERR(p);
2406
2407	INIT_WORK(&p->discard_work, swap_discard_work);
2408
2409	name = getname(specialfile);
2410	if (IS_ERR(name)) {
2411		error = PTR_ERR(name);
2412		name = NULL;
2413		goto bad_swap;
2414	}
2415	swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
2416	if (IS_ERR(swap_file)) {
2417		error = PTR_ERR(swap_file);
2418		swap_file = NULL;
2419		goto bad_swap;
2420	}
2421
2422	p->swap_file = swap_file;
2423	mapping = swap_file->f_mapping;
2424	inode = mapping->host;
2425
2426	/* If S_ISREG(inode->i_mode) will do inode_lock(inode); */
2427	error = claim_swapfile(p, inode);
2428	if (unlikely(error))
2429		goto bad_swap;
2430
2431	/*
2432	 * Read the swap header.
2433	 */
2434	if (!mapping->a_ops->readpage) {
2435		error = -EINVAL;
2436		goto bad_swap;
2437	}
2438	page = read_mapping_page(mapping, 0, swap_file);
2439	if (IS_ERR(page)) {
2440		error = PTR_ERR(page);
2441		goto bad_swap;
2442	}
2443	swap_header = kmap(page);
2444
2445	maxpages = read_swap_header(p, swap_header, inode);
2446	if (unlikely(!maxpages)) {
2447		error = -EINVAL;
2448		goto bad_swap;
2449	}
2450
2451	/* OK, set up the swap map and apply the bad block list */
2452	swap_map = vzalloc(maxpages);
2453	if (!swap_map) {
2454		error = -ENOMEM;
2455		goto bad_swap;
2456	}
 
 
 
 
 
 
 
2457	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2458		int cpu;
 
2459
2460		p->flags |= SWP_SOLIDSTATE;
2461		/*
2462		 * select a random position to start with to help wear leveling
2463		 * SSD
2464		 */
2465		p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
 
2466
2467		cluster_info = vzalloc(DIV_ROUND_UP(maxpages,
2468			SWAPFILE_CLUSTER) * sizeof(*cluster_info));
2469		if (!cluster_info) {
2470			error = -ENOMEM;
2471			goto bad_swap;
2472		}
 
 
 
 
2473		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
2474		if (!p->percpu_cluster) {
2475			error = -ENOMEM;
2476			goto bad_swap;
2477		}
2478		for_each_possible_cpu(cpu) {
2479			struct percpu_cluster *cluster;
2480			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
2481			cluster_set_null(&cluster->index);
2482		}
 
 
 
2483	}
2484
2485	error = swap_cgroup_swapon(p->type, maxpages);
2486	if (error)
2487		goto bad_swap;
2488
2489	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2490		cluster_info, maxpages, &span);
2491	if (unlikely(nr_extents < 0)) {
2492		error = nr_extents;
2493		goto bad_swap;
2494	}
2495	/* frontswap enabled? set up bit-per-page map for frontswap */
2496	if (frontswap_enabled)
2497		frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
 
2498
2499	if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
2500		/*
2501		 * When discard is enabled for swap with no particular
2502		 * policy flagged, we set all swap discard flags here in
2503		 * order to sustain backward compatibility with older
2504		 * swapon(8) releases.
2505		 */
2506		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
2507			     SWP_PAGE_DISCARD);
2508
2509		/*
2510		 * By flagging sys_swapon, a sysadmin can tell us to
2511		 * either do single-time area discards only, or to just
2512		 * perform discards for released swap page-clusters.
2513		 * Now it's time to adjust the p->flags accordingly.
2514		 */
2515		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
2516			p->flags &= ~SWP_PAGE_DISCARD;
2517		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
2518			p->flags &= ~SWP_AREA_DISCARD;
2519
2520		/* issue a swapon-time discard if it's still required */
2521		if (p->flags & SWP_AREA_DISCARD) {
2522			int err = discard_swap(p);
2523			if (unlikely(err))
2524				pr_err("swapon: discard_swap(%p): %d\n",
2525					p, err);
2526		}
2527	}
2528
 
 
 
 
2529	mutex_lock(&swapon_mutex);
2530	prio = -1;
2531	if (swap_flags & SWAP_FLAG_PREFER)
2532		prio =
2533		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2534	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
2535
2536	pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
2537		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
2538		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2539		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2540		(p->flags & SWP_DISCARDABLE) ? "D" : "",
2541		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
2542		(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
2543		(frontswap_map) ? "FS" : "");
2544
2545	mutex_unlock(&swapon_mutex);
2546	atomic_inc(&proc_poll_event);
2547	wake_up_interruptible(&proc_poll_wait);
2548
2549	if (S_ISREG(inode->i_mode))
2550		inode->i_flags |= S_SWAPFILE;
2551	error = 0;
2552	goto out;
2553bad_swap:
2554	free_percpu(p->percpu_cluster);
2555	p->percpu_cluster = NULL;
2556	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
2557		set_blocksize(p->bdev, p->old_block_size);
2558		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2559	}
2560	destroy_swap_extents(p);
2561	swap_cgroup_swapoff(p->type);
2562	spin_lock(&swap_lock);
2563	p->swap_file = NULL;
2564	p->flags = 0;
2565	spin_unlock(&swap_lock);
2566	vfree(swap_map);
2567	vfree(cluster_info);
 
 
 
2568	if (swap_file) {
2569		if (inode && S_ISREG(inode->i_mode)) {
2570			inode_unlock(inode);
2571			inode = NULL;
2572		}
2573		filp_close(swap_file, NULL);
2574	}
2575out:
2576	if (page && !IS_ERR(page)) {
2577		kunmap(page);
2578		put_page(page);
2579	}
2580	if (name)
2581		putname(name);
2582	if (inode && S_ISREG(inode->i_mode))
2583		inode_unlock(inode);
 
 
2584	return error;
2585}
2586
2587void si_swapinfo(struct sysinfo *val)
2588{
2589	unsigned int type;
2590	unsigned long nr_to_be_unused = 0;
2591
2592	spin_lock(&swap_lock);
2593	for (type = 0; type < nr_swapfiles; type++) {
2594		struct swap_info_struct *si = swap_info[type];
2595
2596		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2597			nr_to_be_unused += si->inuse_pages;
2598	}
2599	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
2600	val->totalswap = total_swap_pages + nr_to_be_unused;
2601	spin_unlock(&swap_lock);
2602}
2603
2604/*
2605 * Verify that a swap entry is valid and increment its swap map count.
2606 *
2607 * Returns error code in following case.
2608 * - success -> 0
2609 * - swp_entry is invalid -> EINVAL
2610 * - swp_entry is migration entry -> EINVAL
2611 * - swap-cache reference is requested but there is already one. -> EEXIST
2612 * - swap-cache reference is requested but the entry is not used. -> ENOENT
2613 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2614 */
2615static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2616{
2617	struct swap_info_struct *p;
 
2618	unsigned long offset, type;
2619	unsigned char count;
2620	unsigned char has_cache;
2621	int err = -EINVAL;
2622
2623	if (non_swap_entry(entry))
2624		goto out;
2625
2626	type = swp_type(entry);
2627	if (type >= nr_swapfiles)
2628		goto bad_file;
2629	p = swap_info[type];
2630	offset = swp_offset(entry);
 
 
2631
2632	spin_lock(&p->lock);
2633	if (unlikely(offset >= p->max))
2634		goto unlock_out;
2635
2636	count = p->swap_map[offset];
2637
2638	/*
2639	 * swapin_readahead() doesn't check if a swap entry is valid, so the
2640	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
2641	 */
2642	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
2643		err = -ENOENT;
2644		goto unlock_out;
2645	}
2646
2647	has_cache = count & SWAP_HAS_CACHE;
2648	count &= ~SWAP_HAS_CACHE;
2649	err = 0;
2650
2651	if (usage == SWAP_HAS_CACHE) {
2652
2653		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
2654		if (!has_cache && count)
2655			has_cache = SWAP_HAS_CACHE;
2656		else if (has_cache)		/* someone else added cache */
2657			err = -EEXIST;
2658		else				/* no users remaining */
2659			err = -ENOENT;
2660
2661	} else if (count || has_cache) {
2662
2663		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2664			count += usage;
2665		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2666			err = -EINVAL;
2667		else if (swap_count_continued(p, offset, count))
2668			count = COUNT_CONTINUED;
2669		else
2670			err = -ENOMEM;
2671	} else
2672		err = -ENOENT;			/* unused swap entry */
2673
2674	p->swap_map[offset] = count | has_cache;
2675
2676unlock_out:
2677	spin_unlock(&p->lock);
2678out:
2679	return err;
2680
2681bad_file:
2682	pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
2683	goto out;
2684}
2685
2686/*
2687 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2688 * (in which case its reference count is never incremented).
2689 */
2690void swap_shmem_alloc(swp_entry_t entry)
2691{
2692	__swap_duplicate(entry, SWAP_MAP_SHMEM);
2693}
2694
2695/*
2696 * Increase reference count of swap entry by 1.
2697 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
2698 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
2699 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
2700 * might occur if a page table entry has got corrupted.
2701 */
2702int swap_duplicate(swp_entry_t entry)
2703{
2704	int err = 0;
2705
2706	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2707		err = add_swap_count_continuation(entry, GFP_ATOMIC);
2708	return err;
2709}
2710
2711/*
2712 * @entry: swap entry for which we allocate swap cache.
2713 *
2714 * Called when allocating swap cache for existing swap entry,
2715 * This can return error codes. Returns 0 at success.
2716 * -EBUSY means there is a swap cache.
2717 * Note: return code is different from swap_duplicate().
2718 */
2719int swapcache_prepare(swp_entry_t entry)
2720{
2721	return __swap_duplicate(entry, SWAP_HAS_CACHE);
2722}
2723
 
 
 
 
 
2724struct swap_info_struct *page_swap_info(struct page *page)
2725{
2726	swp_entry_t swap = { .val = page_private(page) };
2727	BUG_ON(!PageSwapCache(page));
2728	return swap_info[swp_type(swap)];
2729}
2730
2731/*
2732 * out-of-line __page_file_ methods to avoid include hell.
2733 */
2734struct address_space *__page_file_mapping(struct page *page)
2735{
2736	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
2737	return page_swap_info(page)->swap_file->f_mapping;
2738}
2739EXPORT_SYMBOL_GPL(__page_file_mapping);
2740
2741pgoff_t __page_file_index(struct page *page)
2742{
2743	swp_entry_t swap = { .val = page_private(page) };
2744	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
2745	return swp_offset(swap);
2746}
2747EXPORT_SYMBOL_GPL(__page_file_index);
2748
2749/*
2750 * add_swap_count_continuation - called when a swap count is duplicated
2751 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2752 * page of the original vmalloc'ed swap_map, to hold the continuation count
2753 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
2754 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2755 *
2756 * These continuation pages are seldom referenced: the common paths all work
2757 * on the original swap_map, only referring to a continuation page when the
2758 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2759 *
2760 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2761 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2762 * can be called after dropping locks.
2763 */
2764int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2765{
2766	struct swap_info_struct *si;
 
2767	struct page *head;
2768	struct page *page;
2769	struct page *list_page;
2770	pgoff_t offset;
2771	unsigned char count;
2772
2773	/*
2774	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
2775	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
2776	 */
2777	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2778
2779	si = swap_info_get(entry);
2780	if (!si) {
2781		/*
2782		 * An acceptable race has occurred since the failing
2783		 * __swap_duplicate(): the swap entry has been freed,
2784		 * perhaps even the whole swap_map cleared for swapoff.
2785		 */
2786		goto outer;
2787	}
2788
2789	offset = swp_offset(entry);
 
 
 
2790	count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
2791
2792	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2793		/*
2794		 * The higher the swap count, the more likely it is that tasks
2795		 * will race to add swap count continuation: we need to avoid
2796		 * over-provisioning.
2797		 */
2798		goto out;
2799	}
2800
2801	if (!page) {
 
2802		spin_unlock(&si->lock);
2803		return -ENOMEM;
2804	}
2805
2806	/*
2807	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2808	 * no architecture is using highmem pages for kernel page tables: so it
2809	 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
2810	 */
2811	head = vmalloc_to_page(si->swap_map + offset);
2812	offset &= ~PAGE_MASK;
2813
 
2814	/*
2815	 * Page allocation does not initialize the page's lru field,
2816	 * but it does always reset its private field.
2817	 */
2818	if (!page_private(head)) {
2819		BUG_ON(count & COUNT_CONTINUED);
2820		INIT_LIST_HEAD(&head->lru);
2821		set_page_private(head, SWP_CONTINUED);
2822		si->flags |= SWP_CONTINUED;
2823	}
2824
2825	list_for_each_entry(list_page, &head->lru, lru) {
2826		unsigned char *map;
2827
2828		/*
2829		 * If the previous map said no continuation, but we've found
2830		 * a continuation page, free our allocation and use this one.
2831		 */
2832		if (!(count & COUNT_CONTINUED))
2833			goto out;
2834
2835		map = kmap_atomic(list_page) + offset;
2836		count = *map;
2837		kunmap_atomic(map);
2838
2839		/*
2840		 * If this continuation count now has some space in it,
2841		 * free our allocation and use this one.
2842		 */
2843		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2844			goto out;
2845	}
2846
2847	list_add_tail(&page->lru, &head->lru);
2848	page = NULL;			/* now it's attached, don't free it */
 
 
2849out:
 
2850	spin_unlock(&si->lock);
2851outer:
2852	if (page)
2853		__free_page(page);
2854	return 0;
2855}
2856
2857/*
2858 * swap_count_continued - when the original swap_map count is incremented
2859 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2860 * into, carry if so, or else fail until a new continuation page is allocated;
2861 * when the original swap_map count is decremented from 0 with continuation,
2862 * borrow from the continuation and report whether it still holds more.
2863 * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
 
2864 */
2865static bool swap_count_continued(struct swap_info_struct *si,
2866				 pgoff_t offset, unsigned char count)
2867{
2868	struct page *head;
2869	struct page *page;
2870	unsigned char *map;
 
2871
2872	head = vmalloc_to_page(si->swap_map + offset);
2873	if (page_private(head) != SWP_CONTINUED) {
2874		BUG_ON(count & COUNT_CONTINUED);
2875		return false;		/* need to add count continuation */
2876	}
2877
 
2878	offset &= ~PAGE_MASK;
2879	page = list_entry(head->lru.next, struct page, lru);
2880	map = kmap_atomic(page) + offset;
2881
2882	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
2883		goto init_map;		/* jump over SWAP_CONT_MAX checks */
2884
2885	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2886		/*
2887		 * Think of how you add 1 to 999
2888		 */
2889		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2890			kunmap_atomic(map);
2891			page = list_entry(page->lru.next, struct page, lru);
2892			BUG_ON(page == head);
2893			map = kmap_atomic(page) + offset;
2894		}
2895		if (*map == SWAP_CONT_MAX) {
2896			kunmap_atomic(map);
2897			page = list_entry(page->lru.next, struct page, lru);
2898			if (page == head)
2899				return false;	/* add count continuation */
 
 
2900			map = kmap_atomic(page) + offset;
2901init_map:		*map = 0;		/* we didn't zero the page */
2902		}
2903		*map += 1;
2904		kunmap_atomic(map);
2905		page = list_entry(page->lru.prev, struct page, lru);
2906		while (page != head) {
2907			map = kmap_atomic(page) + offset;
2908			*map = COUNT_CONTINUED;
2909			kunmap_atomic(map);
2910			page = list_entry(page->lru.prev, struct page, lru);
2911		}
2912		return true;			/* incremented */
2913
2914	} else {				/* decrementing */
2915		/*
2916		 * Think of how you subtract 1 from 1000
2917		 */
2918		BUG_ON(count != COUNT_CONTINUED);
2919		while (*map == COUNT_CONTINUED) {
2920			kunmap_atomic(map);
2921			page = list_entry(page->lru.next, struct page, lru);
2922			BUG_ON(page == head);
2923			map = kmap_atomic(page) + offset;
2924		}
2925		BUG_ON(*map == 0);
2926		*map -= 1;
2927		if (*map == 0)
2928			count = 0;
2929		kunmap_atomic(map);
2930		page = list_entry(page->lru.prev, struct page, lru);
2931		while (page != head) {
2932			map = kmap_atomic(page) + offset;
2933			*map = SWAP_CONT_MAX | count;
2934			count = COUNT_CONTINUED;
2935			kunmap_atomic(map);
2936			page = list_entry(page->lru.prev, struct page, lru);
2937		}
2938		return count == COUNT_CONTINUED;
2939	}
 
 
 
2940}
2941
2942/*
2943 * free_swap_count_continuations - swapoff free all the continuation pages
2944 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2945 */
2946static void free_swap_count_continuations(struct swap_info_struct *si)
2947{
2948	pgoff_t offset;
2949
2950	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2951		struct page *head;
2952		head = vmalloc_to_page(si->swap_map + offset);
2953		if (page_private(head)) {
2954			struct page *page, *next;
2955
2956			list_for_each_entry_safe(page, next, &head->lru, lru) {
2957				list_del(&page->lru);
2958				__free_page(page);
2959			}
2960		}
2961	}
2962}