Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/swapfile.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *  Swap reorganised 29.12.95, Stephen Tweedie
   7 */
   8
   9#include <linux/mm.h>
  10#include <linux/sched/mm.h>
  11#include <linux/sched/task.h>
  12#include <linux/hugetlb.h>
  13#include <linux/mman.h>
  14#include <linux/slab.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/swap.h>
  17#include <linux/vmalloc.h>
  18#include <linux/pagemap.h>
  19#include <linux/namei.h>
  20#include <linux/shmem_fs.h>
  21#include <linux/blkdev.h>
  22#include <linux/random.h>
  23#include <linux/writeback.h>
  24#include <linux/proc_fs.h>
  25#include <linux/seq_file.h>
  26#include <linux/init.h>
  27#include <linux/ksm.h>
  28#include <linux/rmap.h>
  29#include <linux/security.h>
  30#include <linux/backing-dev.h>
  31#include <linux/mutex.h>
  32#include <linux/capability.h>
  33#include <linux/syscalls.h>
  34#include <linux/memcontrol.h>
  35#include <linux/poll.h>
  36#include <linux/oom.h>
  37#include <linux/frontswap.h>
  38#include <linux/swapfile.h>
  39#include <linux/export.h>
  40#include <linux/swap_slots.h>
  41#include <linux/sort.h>
  42
  43#include <asm/pgtable.h>
  44#include <asm/tlbflush.h>
  45#include <linux/swapops.h>
  46#include <linux/swap_cgroup.h>
  47
  48static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  49				 unsigned char);
  50static void free_swap_count_continuations(struct swap_info_struct *);
  51static sector_t map_swap_entry(swp_entry_t, struct block_device**);
  52
  53DEFINE_SPINLOCK(swap_lock);
  54static unsigned int nr_swapfiles;
  55atomic_long_t nr_swap_pages;
  56/*
  57 * Some modules use swappable objects and may try to swap them out under
  58 * memory pressure (via the shrinker). Before doing so, they may wish to
  59 * check to see if any swap space is available.
  60 */
  61EXPORT_SYMBOL_GPL(nr_swap_pages);
  62/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  63long total_swap_pages;
  64static int least_priority = -1;
  65
  66static const char Bad_file[] = "Bad swap file entry ";
  67static const char Unused_file[] = "Unused swap file entry ";
  68static const char Bad_offset[] = "Bad swap offset entry ";
  69static const char Unused_offset[] = "Unused swap offset entry ";
  70
  71/*
  72 * all active swap_info_structs
  73 * protected with swap_lock, and ordered by priority.
  74 */
  75PLIST_HEAD(swap_active_head);
  76
  77/*
  78 * all available (active, not full) swap_info_structs
  79 * protected with swap_avail_lock, ordered by priority.
  80 * This is used by get_swap_page() instead of swap_active_head
  81 * because swap_active_head includes all swap_info_structs,
  82 * but get_swap_page() doesn't need to look at full ones.
  83 * This uses its own lock instead of swap_lock because when a
  84 * swap_info_struct changes between not-full/full, it needs to
  85 * add/remove itself to/from this list, but the swap_info_struct->lock
  86 * is held and the locking order requires swap_lock to be taken
  87 * before any swap_info_struct->lock.
  88 */
  89static struct plist_head *swap_avail_heads;
  90static DEFINE_SPINLOCK(swap_avail_lock);
  91
  92struct swap_info_struct *swap_info[MAX_SWAPFILES];
  93
  94static DEFINE_MUTEX(swapon_mutex);
  95
  96static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  97/* Activity counter to indicate that a swapon or swapoff has occurred */
  98static atomic_t proc_poll_event = ATOMIC_INIT(0);
  99
 100atomic_t nr_rotate_swap = ATOMIC_INIT(0);
 101
 102static struct swap_info_struct *swap_type_to_swap_info(int type)
 103{
 104	if (type >= READ_ONCE(nr_swapfiles))
 105		return NULL;
 106
 107	smp_rmb();	/* Pairs with smp_wmb in alloc_swap_info. */
 108	return READ_ONCE(swap_info[type]);
 109}
 110
 111static inline unsigned char swap_count(unsigned char ent)
 112{
 113	return ent & ~SWAP_HAS_CACHE;	/* may include COUNT_CONTINUED flag */
 114}
 115
 116/* Reclaim the swap entry anyway if possible */
 117#define TTRS_ANYWAY		0x1
 118/*
 119 * Reclaim the swap entry if there are no more mappings of the
 120 * corresponding page
 121 */
 122#define TTRS_UNMAPPED		0x2
 123/* Reclaim the swap entry if swap is getting full*/
 124#define TTRS_FULL		0x4
 125
 126/* returns 1 if swap entry is freed */
 127static int __try_to_reclaim_swap(struct swap_info_struct *si,
 128				 unsigned long offset, unsigned long flags)
 129{
 130	swp_entry_t entry = swp_entry(si->type, offset);
 131	struct page *page;
 132	int ret = 0;
 133
 134	page = find_get_page(swap_address_space(entry), offset);
 135	if (!page)
 136		return 0;
 137	/*
 138	 * When this function is called from scan_swap_map_slots() and it's
 139	 * called by vmscan.c at reclaiming pages. So, we hold a lock on a page,
 140	 * here. We have to use trylock for avoiding deadlock. This is a special
 141	 * case and you should use try_to_free_swap() with explicit lock_page()
 142	 * in usual operations.
 143	 */
 144	if (trylock_page(page)) {
 145		if ((flags & TTRS_ANYWAY) ||
 146		    ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
 147		    ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
 148			ret = try_to_free_swap(page);
 149		unlock_page(page);
 150	}
 151	put_page(page);
 152	return ret;
 153}
 154
 155static inline struct swap_extent *first_se(struct swap_info_struct *sis)
 156{
 157	struct rb_node *rb = rb_first(&sis->swap_extent_root);
 158	return rb_entry(rb, struct swap_extent, rb_node);
 159}
 160
 161static inline struct swap_extent *next_se(struct swap_extent *se)
 162{
 163	struct rb_node *rb = rb_next(&se->rb_node);
 164	return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
 165}
 166
 167/*
 168 * swapon tell device that all the old swap contents can be discarded,
 169 * to allow the swap device to optimize its wear-levelling.
 170 */
 171static int discard_swap(struct swap_info_struct *si)
 172{
 173	struct swap_extent *se;
 174	sector_t start_block;
 175	sector_t nr_blocks;
 176	int err = 0;
 177
 178	/* Do not discard the swap header page! */
 179	se = first_se(si);
 180	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 181	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 182	if (nr_blocks) {
 183		err = blkdev_issue_discard(si->bdev, start_block,
 184				nr_blocks, GFP_KERNEL, 0);
 185		if (err)
 186			return err;
 187		cond_resched();
 188	}
 189
 190	for (se = next_se(se); se; se = next_se(se)) {
 191		start_block = se->start_block << (PAGE_SHIFT - 9);
 192		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 193
 194		err = blkdev_issue_discard(si->bdev, start_block,
 195				nr_blocks, GFP_KERNEL, 0);
 196		if (err)
 197			break;
 198
 199		cond_resched();
 200	}
 201	return err;		/* That will often be -EOPNOTSUPP */
 202}
 203
 204static struct swap_extent *
 205offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
 206{
 207	struct swap_extent *se;
 208	struct rb_node *rb;
 209
 210	rb = sis->swap_extent_root.rb_node;
 211	while (rb) {
 212		se = rb_entry(rb, struct swap_extent, rb_node);
 213		if (offset < se->start_page)
 214			rb = rb->rb_left;
 215		else if (offset >= se->start_page + se->nr_pages)
 216			rb = rb->rb_right;
 217		else
 218			return se;
 219	}
 220	/* It *must* be present */
 221	BUG();
 222}
 223
 224/*
 225 * swap allocation tell device that a cluster of swap can now be discarded,
 226 * to allow the swap device to optimize its wear-levelling.
 227 */
 228static void discard_swap_cluster(struct swap_info_struct *si,
 229				 pgoff_t start_page, pgoff_t nr_pages)
 230{
 231	struct swap_extent *se = offset_to_swap_extent(si, start_page);
 
 232
 233	while (nr_pages) {
 234		pgoff_t offset = start_page - se->start_page;
 235		sector_t start_block = se->start_block + offset;
 236		sector_t nr_blocks = se->nr_pages - offset;
 237
 238		if (nr_blocks > nr_pages)
 239			nr_blocks = nr_pages;
 240		start_page += nr_blocks;
 241		nr_pages -= nr_blocks;
 242
 243		start_block <<= PAGE_SHIFT - 9;
 244		nr_blocks <<= PAGE_SHIFT - 9;
 245		if (blkdev_issue_discard(si->bdev, start_block,
 246					nr_blocks, GFP_NOIO, 0))
 247			break;
 
 
 
 
 
 
 248
 249		se = next_se(se);
 250	}
 251}
 252
 253#ifdef CONFIG_THP_SWAP
 254#define SWAPFILE_CLUSTER	HPAGE_PMD_NR
 255
 256#define swap_entry_size(size)	(size)
 257#else
 258#define SWAPFILE_CLUSTER	256
 259
 260/*
 261 * Define swap_entry_size() as constant to let compiler to optimize
 262 * out some code if !CONFIG_THP_SWAP
 263 */
 264#define swap_entry_size(size)	1
 265#endif
 266#define LATENCY_LIMIT		256
 267
 268static inline void cluster_set_flag(struct swap_cluster_info *info,
 269	unsigned int flag)
 270{
 271	info->flags = flag;
 272}
 273
 274static inline unsigned int cluster_count(struct swap_cluster_info *info)
 275{
 276	return info->data;
 277}
 278
 279static inline void cluster_set_count(struct swap_cluster_info *info,
 280				     unsigned int c)
 281{
 282	info->data = c;
 283}
 284
 285static inline void cluster_set_count_flag(struct swap_cluster_info *info,
 286					 unsigned int c, unsigned int f)
 287{
 288	info->flags = f;
 289	info->data = c;
 290}
 291
 292static inline unsigned int cluster_next(struct swap_cluster_info *info)
 293{
 294	return info->data;
 295}
 296
 297static inline void cluster_set_next(struct swap_cluster_info *info,
 298				    unsigned int n)
 299{
 300	info->data = n;
 301}
 302
 303static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 304					 unsigned int n, unsigned int f)
 305{
 306	info->flags = f;
 307	info->data = n;
 308}
 309
 310static inline bool cluster_is_free(struct swap_cluster_info *info)
 311{
 312	return info->flags & CLUSTER_FLAG_FREE;
 313}
 314
 315static inline bool cluster_is_null(struct swap_cluster_info *info)
 316{
 317	return info->flags & CLUSTER_FLAG_NEXT_NULL;
 318}
 319
 320static inline void cluster_set_null(struct swap_cluster_info *info)
 321{
 322	info->flags = CLUSTER_FLAG_NEXT_NULL;
 323	info->data = 0;
 324}
 325
 326static inline bool cluster_is_huge(struct swap_cluster_info *info)
 327{
 328	if (IS_ENABLED(CONFIG_THP_SWAP))
 329		return info->flags & CLUSTER_FLAG_HUGE;
 330	return false;
 331}
 332
 333static inline void cluster_clear_huge(struct swap_cluster_info *info)
 334{
 335	info->flags &= ~CLUSTER_FLAG_HUGE;
 336}
 337
 338static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
 339						     unsigned long offset)
 340{
 341	struct swap_cluster_info *ci;
 342
 343	ci = si->cluster_info;
 344	if (ci) {
 345		ci += offset / SWAPFILE_CLUSTER;
 346		spin_lock(&ci->lock);
 347	}
 348	return ci;
 349}
 350
 351static inline void unlock_cluster(struct swap_cluster_info *ci)
 352{
 353	if (ci)
 354		spin_unlock(&ci->lock);
 355}
 356
 357/*
 358 * Determine the locking method in use for this device.  Return
 359 * swap_cluster_info if SSD-style cluster-based locking is in place.
 360 */
 361static inline struct swap_cluster_info *lock_cluster_or_swap_info(
 362		struct swap_info_struct *si, unsigned long offset)
 363{
 364	struct swap_cluster_info *ci;
 365
 366	/* Try to use fine-grained SSD-style locking if available: */
 367	ci = lock_cluster(si, offset);
 368	/* Otherwise, fall back to traditional, coarse locking: */
 369	if (!ci)
 370		spin_lock(&si->lock);
 371
 372	return ci;
 373}
 374
 375static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
 376					       struct swap_cluster_info *ci)
 377{
 378	if (ci)
 379		unlock_cluster(ci);
 380	else
 381		spin_unlock(&si->lock);
 382}
 383
 384static inline bool cluster_list_empty(struct swap_cluster_list *list)
 385{
 386	return cluster_is_null(&list->head);
 387}
 388
 389static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
 390{
 391	return cluster_next(&list->head);
 392}
 393
 394static void cluster_list_init(struct swap_cluster_list *list)
 395{
 396	cluster_set_null(&list->head);
 397	cluster_set_null(&list->tail);
 398}
 399
 400static void cluster_list_add_tail(struct swap_cluster_list *list,
 401				  struct swap_cluster_info *ci,
 402				  unsigned int idx)
 403{
 404	if (cluster_list_empty(list)) {
 405		cluster_set_next_flag(&list->head, idx, 0);
 406		cluster_set_next_flag(&list->tail, idx, 0);
 407	} else {
 408		struct swap_cluster_info *ci_tail;
 409		unsigned int tail = cluster_next(&list->tail);
 410
 411		/*
 412		 * Nested cluster lock, but both cluster locks are
 413		 * only acquired when we held swap_info_struct->lock
 414		 */
 415		ci_tail = ci + tail;
 416		spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
 417		cluster_set_next(ci_tail, idx);
 418		spin_unlock(&ci_tail->lock);
 419		cluster_set_next_flag(&list->tail, idx, 0);
 420	}
 421}
 422
 423static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
 424					   struct swap_cluster_info *ci)
 425{
 426	unsigned int idx;
 427
 428	idx = cluster_next(&list->head);
 429	if (cluster_next(&list->tail) == idx) {
 430		cluster_set_null(&list->head);
 431		cluster_set_null(&list->tail);
 432	} else
 433		cluster_set_next_flag(&list->head,
 434				      cluster_next(&ci[idx]), 0);
 435
 436	return idx;
 437}
 438
 439/* Add a cluster to discard list and schedule it to do discard */
 440static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 441		unsigned int idx)
 442{
 443	/*
 444	 * If scan_swap_map() can't find a free cluster, it will check
 445	 * si->swap_map directly. To make sure the discarding cluster isn't
 446	 * taken by scan_swap_map(), mark the swap entries bad (occupied). It
 447	 * will be cleared after discard
 448	 */
 449	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 450			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 451
 452	cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
 453
 454	schedule_work(&si->discard_work);
 455}
 456
 457static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
 458{
 459	struct swap_cluster_info *ci = si->cluster_info;
 460
 461	cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
 462	cluster_list_add_tail(&si->free_clusters, ci, idx);
 463}
 464
 465/*
 466 * Doing discard actually. After a cluster discard is finished, the cluster
 467 * will be added to free cluster list. caller should hold si->lock.
 468*/
 469static void swap_do_scheduled_discard(struct swap_info_struct *si)
 470{
 471	struct swap_cluster_info *info, *ci;
 472	unsigned int idx;
 473
 474	info = si->cluster_info;
 475
 476	while (!cluster_list_empty(&si->discard_clusters)) {
 477		idx = cluster_list_del_first(&si->discard_clusters, info);
 478		spin_unlock(&si->lock);
 479
 480		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 481				SWAPFILE_CLUSTER);
 482
 483		spin_lock(&si->lock);
 484		ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
 485		__free_cluster(si, idx);
 486		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 487				0, SWAPFILE_CLUSTER);
 488		unlock_cluster(ci);
 489	}
 490}
 491
 492static void swap_discard_work(struct work_struct *work)
 493{
 494	struct swap_info_struct *si;
 495
 496	si = container_of(work, struct swap_info_struct, discard_work);
 497
 498	spin_lock(&si->lock);
 499	swap_do_scheduled_discard(si);
 500	spin_unlock(&si->lock);
 501}
 502
 503static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
 504{
 505	struct swap_cluster_info *ci = si->cluster_info;
 506
 507	VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
 508	cluster_list_del_first(&si->free_clusters, ci);
 509	cluster_set_count_flag(ci + idx, 0, 0);
 510}
 511
 512static void free_cluster(struct swap_info_struct *si, unsigned long idx)
 513{
 514	struct swap_cluster_info *ci = si->cluster_info + idx;
 515
 516	VM_BUG_ON(cluster_count(ci) != 0);
 517	/*
 518	 * If the swap is discardable, prepare discard the cluster
 519	 * instead of free it immediately. The cluster will be freed
 520	 * after discard.
 521	 */
 522	if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 523	    (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 524		swap_cluster_schedule_discard(si, idx);
 525		return;
 526	}
 527
 528	__free_cluster(si, idx);
 529}
 530
 531/*
 532 * The cluster corresponding to page_nr will be used. The cluster will be
 533 * removed from free cluster list and its usage counter will be increased.
 534 */
 535static void inc_cluster_info_page(struct swap_info_struct *p,
 536	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 537{
 538	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 539
 540	if (!cluster_info)
 541		return;
 542	if (cluster_is_free(&cluster_info[idx]))
 543		alloc_cluster(p, idx);
 
 
 
 544
 545	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
 546	cluster_set_count(&cluster_info[idx],
 547		cluster_count(&cluster_info[idx]) + 1);
 548}
 549
 550/*
 551 * The cluster corresponding to page_nr decreases one usage. If the usage
 552 * counter becomes 0, which means no page in the cluster is in using, we can
 553 * optionally discard the cluster and add it to free cluster list.
 554 */
 555static void dec_cluster_info_page(struct swap_info_struct *p,
 556	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 557{
 558	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 559
 560	if (!cluster_info)
 561		return;
 562
 563	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
 564	cluster_set_count(&cluster_info[idx],
 565		cluster_count(&cluster_info[idx]) - 1);
 566
 567	if (cluster_count(&cluster_info[idx]) == 0)
 568		free_cluster(p, idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 569}
 570
 571/*
 572 * It's possible scan_swap_map() uses a free cluster in the middle of free
 573 * cluster list. Avoiding such abuse to avoid list corruption.
 574 */
 575static bool
 576scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
 577	unsigned long offset)
 578{
 579	struct percpu_cluster *percpu_cluster;
 580	bool conflict;
 581
 582	offset /= SWAPFILE_CLUSTER;
 583	conflict = !cluster_list_empty(&si->free_clusters) &&
 584		offset != cluster_list_first(&si->free_clusters) &&
 585		cluster_is_free(&si->cluster_info[offset]);
 586
 587	if (!conflict)
 588		return false;
 589
 590	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
 591	cluster_set_null(&percpu_cluster->index);
 592	return true;
 593}
 594
 595/*
 596 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
 597 * might involve allocating a new cluster for current CPU too.
 598 */
 599static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
 600	unsigned long *offset, unsigned long *scan_base)
 601{
 602	struct percpu_cluster *cluster;
 603	struct swap_cluster_info *ci;
 604	bool found_free;
 605	unsigned long tmp, max;
 606
 607new_cluster:
 608	cluster = this_cpu_ptr(si->percpu_cluster);
 609	if (cluster_is_null(&cluster->index)) {
 610		if (!cluster_list_empty(&si->free_clusters)) {
 611			cluster->index = si->free_clusters.head;
 612			cluster->next = cluster_next(&cluster->index) *
 613					SWAPFILE_CLUSTER;
 614		} else if (!cluster_list_empty(&si->discard_clusters)) {
 615			/*
 616			 * we don't have free cluster but have some clusters in
 617			 * discarding, do discard now and reclaim them
 618			 */
 619			swap_do_scheduled_discard(si);
 620			*scan_base = *offset = si->cluster_next;
 621			goto new_cluster;
 622		} else
 623			return false;
 624	}
 625
 626	found_free = false;
 627
 628	/*
 629	 * Other CPUs can use our cluster if they can't find a free cluster,
 630	 * check if there is still free entry in the cluster
 631	 */
 632	tmp = cluster->next;
 633	max = min_t(unsigned long, si->max,
 634		    (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
 635	if (tmp >= max) {
 636		cluster_set_null(&cluster->index);
 637		goto new_cluster;
 638	}
 639	ci = lock_cluster(si, tmp);
 640	while (tmp < max) {
 641		if (!si->swap_map[tmp]) {
 642			found_free = true;
 643			break;
 644		}
 645		tmp++;
 646	}
 647	unlock_cluster(ci);
 648	if (!found_free) {
 649		cluster_set_null(&cluster->index);
 650		goto new_cluster;
 651	}
 652	cluster->next = tmp + 1;
 653	*offset = tmp;
 654	*scan_base = tmp;
 655	return found_free;
 656}
 657
 658static void __del_from_avail_list(struct swap_info_struct *p)
 659{
 660	int nid;
 661
 662	for_each_node(nid)
 663		plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
 664}
 665
 666static void del_from_avail_list(struct swap_info_struct *p)
 667{
 668	spin_lock(&swap_avail_lock);
 669	__del_from_avail_list(p);
 670	spin_unlock(&swap_avail_lock);
 671}
 672
 673static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
 674			     unsigned int nr_entries)
 675{
 676	unsigned int end = offset + nr_entries - 1;
 677
 678	if (offset == si->lowest_bit)
 679		si->lowest_bit += nr_entries;
 680	if (end == si->highest_bit)
 681		si->highest_bit -= nr_entries;
 682	si->inuse_pages += nr_entries;
 683	if (si->inuse_pages == si->pages) {
 684		si->lowest_bit = si->max;
 685		si->highest_bit = 0;
 686		del_from_avail_list(si);
 687	}
 688}
 689
 690static void add_to_avail_list(struct swap_info_struct *p)
 691{
 692	int nid;
 693
 694	spin_lock(&swap_avail_lock);
 695	for_each_node(nid) {
 696		WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
 697		plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
 698	}
 699	spin_unlock(&swap_avail_lock);
 700}
 701
 702static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
 703			    unsigned int nr_entries)
 704{
 705	unsigned long end = offset + nr_entries - 1;
 706	void (*swap_slot_free_notify)(struct block_device *, unsigned long);
 707
 708	if (offset < si->lowest_bit)
 709		si->lowest_bit = offset;
 710	if (end > si->highest_bit) {
 711		bool was_full = !si->highest_bit;
 712
 713		si->highest_bit = end;
 714		if (was_full && (si->flags & SWP_WRITEOK))
 715			add_to_avail_list(si);
 716	}
 717	atomic_long_add(nr_entries, &nr_swap_pages);
 718	si->inuse_pages -= nr_entries;
 719	if (si->flags & SWP_BLKDEV)
 720		swap_slot_free_notify =
 721			si->bdev->bd_disk->fops->swap_slot_free_notify;
 722	else
 723		swap_slot_free_notify = NULL;
 724	while (offset <= end) {
 725		frontswap_invalidate_page(si->type, offset);
 726		if (swap_slot_free_notify)
 727			swap_slot_free_notify(si->bdev, offset);
 728		offset++;
 729	}
 730}
 731
 732static int scan_swap_map_slots(struct swap_info_struct *si,
 733			       unsigned char usage, int nr,
 734			       swp_entry_t slots[])
 735{
 736	struct swap_cluster_info *ci;
 737	unsigned long offset;
 738	unsigned long scan_base;
 739	unsigned long last_in_cluster = 0;
 740	int latency_ration = LATENCY_LIMIT;
 741	int n_ret = 0;
 742
 743	if (nr > SWAP_BATCH)
 744		nr = SWAP_BATCH;
 745
 746	/*
 747	 * We try to cluster swap pages by allocating them sequentially
 748	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 749	 * way, however, we resort to first-free allocation, starting
 750	 * a new cluster.  This prevents us from scattering swap pages
 751	 * all over the entire swap partition, so that we reduce
 752	 * overall disk seek times between swap pages.  -- sct
 753	 * But we do now try to find an empty cluster.  -Andrea
 754	 * And we let swap pages go all over an SSD partition.  Hugh
 755	 */
 756
 757	si->flags += SWP_SCANNING;
 758	scan_base = offset = si->cluster_next;
 759
 760	/* SSD algorithm */
 761	if (si->cluster_info) {
 762		if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 763			goto checks;
 764		else
 765			goto scan;
 766	}
 767
 768	if (unlikely(!si->cluster_nr--)) {
 769		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 770			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 771			goto checks;
 772		}
 773
 774		spin_unlock(&si->lock);
 775
 776		/*
 777		 * If seek is expensive, start searching for new cluster from
 778		 * start of partition, to minimize the span of allocated swap.
 779		 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
 780		 * case, just handled by scan_swap_map_try_ssd_cluster() above.
 781		 */
 782		scan_base = offset = si->lowest_bit;
 783		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 784
 785		/* Locate the first empty (unaligned) cluster */
 786		for (; last_in_cluster <= si->highest_bit; offset++) {
 787			if (si->swap_map[offset])
 788				last_in_cluster = offset + SWAPFILE_CLUSTER;
 789			else if (offset == last_in_cluster) {
 790				spin_lock(&si->lock);
 791				offset -= SWAPFILE_CLUSTER - 1;
 792				si->cluster_next = offset;
 793				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 794				goto checks;
 795			}
 796			if (unlikely(--latency_ration < 0)) {
 797				cond_resched();
 798				latency_ration = LATENCY_LIMIT;
 799			}
 800		}
 801
 802		offset = scan_base;
 803		spin_lock(&si->lock);
 804		si->cluster_nr = SWAPFILE_CLUSTER - 1;
 805	}
 806
 807checks:
 808	if (si->cluster_info) {
 809		while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
 810		/* take a break if we already got some slots */
 811			if (n_ret)
 812				goto done;
 813			if (!scan_swap_map_try_ssd_cluster(si, &offset,
 814							&scan_base))
 815				goto scan;
 816		}
 817	}
 818	if (!(si->flags & SWP_WRITEOK))
 819		goto no_page;
 820	if (!si->highest_bit)
 821		goto no_page;
 822	if (offset > si->highest_bit)
 823		scan_base = offset = si->lowest_bit;
 824
 825	ci = lock_cluster(si, offset);
 826	/* reuse swap entry of cache-only swap if not busy. */
 827	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 828		int swap_was_freed;
 829		unlock_cluster(ci);
 830		spin_unlock(&si->lock);
 831		swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
 832		spin_lock(&si->lock);
 833		/* entry was freed successfully, try to use this again */
 834		if (swap_was_freed)
 835			goto checks;
 836		goto scan; /* check next one */
 837	}
 838
 839	if (si->swap_map[offset]) {
 840		unlock_cluster(ci);
 841		if (!n_ret)
 842			goto scan;
 843		else
 844			goto done;
 
 
 
 
 
 
 
 
 845	}
 846	si->swap_map[offset] = usage;
 847	inc_cluster_info_page(si, si->cluster_info, offset);
 848	unlock_cluster(ci);
 849
 850	swap_range_alloc(si, offset, 1);
 851	si->cluster_next = offset + 1;
 852	slots[n_ret++] = swp_entry(si->type, offset);
 853
 854	/* got enough slots or reach max slots? */
 855	if ((n_ret == nr) || (offset >= si->highest_bit))
 856		goto done;
 857
 858	/* search for next available slot */
 859
 860	/* time to take a break? */
 861	if (unlikely(--latency_ration < 0)) {
 862		if (n_ret)
 863			goto done;
 864		spin_unlock(&si->lock);
 865		cond_resched();
 866		spin_lock(&si->lock);
 867		latency_ration = LATENCY_LIMIT;
 868	}
 869
 870	/* try to get more slots in cluster */
 871	if (si->cluster_info) {
 872		if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 873			goto checks;
 874		else
 875			goto done;
 876	}
 877	/* non-ssd case */
 878	++offset;
 879
 880	/* non-ssd case, still more slots in cluster? */
 881	if (si->cluster_nr && !si->swap_map[offset]) {
 882		--si->cluster_nr;
 883		goto checks;
 884	}
 885
 886done:
 887	si->flags -= SWP_SCANNING;
 888	return n_ret;
 
 889
 890scan:
 891	spin_unlock(&si->lock);
 892	while (++offset <= si->highest_bit) {
 893		if (!si->swap_map[offset]) {
 894			spin_lock(&si->lock);
 895			goto checks;
 896		}
 897		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 898			spin_lock(&si->lock);
 899			goto checks;
 900		}
 901		if (unlikely(--latency_ration < 0)) {
 902			cond_resched();
 903			latency_ration = LATENCY_LIMIT;
 904		}
 905	}
 906	offset = si->lowest_bit;
 907	while (offset < scan_base) {
 908		if (!si->swap_map[offset]) {
 909			spin_lock(&si->lock);
 910			goto checks;
 911		}
 912		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 913			spin_lock(&si->lock);
 914			goto checks;
 915		}
 916		if (unlikely(--latency_ration < 0)) {
 917			cond_resched();
 918			latency_ration = LATENCY_LIMIT;
 919		}
 920		offset++;
 921	}
 922	spin_lock(&si->lock);
 923
 924no_page:
 925	si->flags -= SWP_SCANNING;
 926	return n_ret;
 927}
 928
 929static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
 930{
 931	unsigned long idx;
 932	struct swap_cluster_info *ci;
 933	unsigned long offset, i;
 934	unsigned char *map;
 935
 936	/*
 937	 * Should not even be attempting cluster allocations when huge
 938	 * page swap is disabled.  Warn and fail the allocation.
 939	 */
 940	if (!IS_ENABLED(CONFIG_THP_SWAP)) {
 941		VM_WARN_ON_ONCE(1);
 942		return 0;
 943	}
 944
 945	if (cluster_list_empty(&si->free_clusters))
 946		return 0;
 947
 948	idx = cluster_list_first(&si->free_clusters);
 949	offset = idx * SWAPFILE_CLUSTER;
 950	ci = lock_cluster(si, offset);
 951	alloc_cluster(si, idx);
 952	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
 953
 954	map = si->swap_map + offset;
 955	for (i = 0; i < SWAPFILE_CLUSTER; i++)
 956		map[i] = SWAP_HAS_CACHE;
 957	unlock_cluster(ci);
 958	swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
 959	*slot = swp_entry(si->type, offset);
 960
 961	return 1;
 962}
 963
 964static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
 965{
 966	unsigned long offset = idx * SWAPFILE_CLUSTER;
 967	struct swap_cluster_info *ci;
 968
 969	ci = lock_cluster(si, offset);
 970	memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
 971	cluster_set_count_flag(ci, 0, 0);
 972	free_cluster(si, idx);
 973	unlock_cluster(ci);
 974	swap_range_free(si, offset, SWAPFILE_CLUSTER);
 975}
 976
 977static unsigned long scan_swap_map(struct swap_info_struct *si,
 978				   unsigned char usage)
 979{
 980	swp_entry_t entry;
 981	int n_ret;
 982
 983	n_ret = scan_swap_map_slots(si, usage, 1, &entry);
 984
 985	if (n_ret)
 986		return swp_offset(entry);
 987	else
 988		return 0;
 989
 990}
 991
 992int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
 993{
 994	unsigned long size = swap_entry_size(entry_size);
 995	struct swap_info_struct *si, *next;
 996	long avail_pgs;
 997	int n_ret = 0;
 998	int node;
 999
1000	/* Only single cluster request supported */
1001	WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
1002
1003	avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1004	if (avail_pgs <= 0)
1005		goto noswap;
1006
1007	if (n_goal > SWAP_BATCH)
1008		n_goal = SWAP_BATCH;
1009
1010	if (n_goal > avail_pgs)
1011		n_goal = avail_pgs;
1012
1013	atomic_long_sub(n_goal * size, &nr_swap_pages);
1014
1015	spin_lock(&swap_avail_lock);
1016
1017start_over:
1018	node = numa_node_id();
1019	plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1020		/* requeue si to after same-priority siblings */
1021		plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1022		spin_unlock(&swap_avail_lock);
1023		spin_lock(&si->lock);
1024		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1025			spin_lock(&swap_avail_lock);
1026			if (plist_node_empty(&si->avail_lists[node])) {
1027				spin_unlock(&si->lock);
1028				goto nextsi;
1029			}
1030			WARN(!si->highest_bit,
1031			     "swap_info %d in list but !highest_bit\n",
1032			     si->type);
1033			WARN(!(si->flags & SWP_WRITEOK),
1034			     "swap_info %d in list but !SWP_WRITEOK\n",
1035			     si->type);
1036			__del_from_avail_list(si);
1037			spin_unlock(&si->lock);
1038			goto nextsi;
1039		}
1040		if (size == SWAPFILE_CLUSTER) {
1041			if (!(si->flags & SWP_FS))
1042				n_ret = swap_alloc_cluster(si, swp_entries);
1043		} else
1044			n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1045						    n_goal, swp_entries);
1046		spin_unlock(&si->lock);
1047		if (n_ret || size == SWAPFILE_CLUSTER)
1048			goto check_out;
1049		pr_debug("scan_swap_map of si %d failed to find offset\n",
1050			si->type);
1051
1052		spin_lock(&swap_avail_lock);
1053nextsi:
1054		/*
1055		 * if we got here, it's likely that si was almost full before,
1056		 * and since scan_swap_map() can drop the si->lock, multiple
1057		 * callers probably all tried to get a page from the same si
1058		 * and it filled up before we could get one; or, the si filled
1059		 * up between us dropping swap_avail_lock and taking si->lock.
1060		 * Since we dropped the swap_avail_lock, the swap_avail_head
1061		 * list may have been modified; so if next is still in the
1062		 * swap_avail_head list then try it, otherwise start over
1063		 * if we have not gotten any slots.
1064		 */
1065		if (plist_node_empty(&next->avail_lists[node]))
1066			goto start_over;
1067	}
1068
1069	spin_unlock(&swap_avail_lock);
1070
1071check_out:
1072	if (n_ret < n_goal)
1073		atomic_long_add((long)(n_goal - n_ret) * size,
1074				&nr_swap_pages);
1075noswap:
1076	return n_ret;
1077}
1078
1079/* The only caller of this function is now suspend routine */
1080swp_entry_t get_swap_page_of_type(int type)
1081{
1082	struct swap_info_struct *si = swap_type_to_swap_info(type);
1083	pgoff_t offset;
1084
1085	if (!si)
1086		goto fail;
1087
1088	spin_lock(&si->lock);
1089	if (si->flags & SWP_WRITEOK) {
1090		atomic_long_dec(&nr_swap_pages);
1091		/* This is called for allocating swap entry, not cache */
1092		offset = scan_swap_map(si, 1);
1093		if (offset) {
1094			spin_unlock(&si->lock);
1095			return swp_entry(type, offset);
1096		}
1097		atomic_long_inc(&nr_swap_pages);
1098	}
1099	spin_unlock(&si->lock);
1100fail:
1101	return (swp_entry_t) {0};
1102}
1103
1104static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
1105{
1106	struct swap_info_struct *p;
1107	unsigned long offset;
1108
1109	if (!entry.val)
1110		goto out;
1111	p = swp_swap_info(entry);
1112	if (!p)
1113		goto bad_nofile;
 
1114	if (!(p->flags & SWP_USED))
1115		goto bad_device;
1116	offset = swp_offset(entry);
1117	if (offset >= p->max)
1118		goto bad_offset;
 
 
 
1119	return p;
1120
 
 
 
1121bad_offset:
1122	pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val);
1123	goto out;
1124bad_device:
1125	pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val);
1126	goto out;
1127bad_nofile:
1128	pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val);
1129out:
1130	return NULL;
1131}
1132
1133static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1134{
1135	struct swap_info_struct *p;
1136
1137	p = __swap_info_get(entry);
1138	if (!p)
1139		goto out;
1140	if (!p->swap_map[swp_offset(entry)])
1141		goto bad_free;
1142	return p;
1143
1144bad_free:
1145	pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val);
1146	goto out;
1147out:
1148	return NULL;
1149}
1150
1151static struct swap_info_struct *swap_info_get(swp_entry_t entry)
1152{
1153	struct swap_info_struct *p;
1154
1155	p = _swap_info_get(entry);
1156	if (p)
1157		spin_lock(&p->lock);
1158	return p;
1159}
1160
1161static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1162					struct swap_info_struct *q)
1163{
1164	struct swap_info_struct *p;
1165
1166	p = _swap_info_get(entry);
1167
1168	if (p != q) {
1169		if (q != NULL)
1170			spin_unlock(&q->lock);
1171		if (p != NULL)
1172			spin_lock(&p->lock);
1173	}
1174	return p;
1175}
1176
1177static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
1178					      unsigned long offset,
1179					      unsigned char usage)
1180{
 
1181	unsigned char count;
1182	unsigned char has_cache;
1183
1184	count = p->swap_map[offset];
1185
1186	has_cache = count & SWAP_HAS_CACHE;
1187	count &= ~SWAP_HAS_CACHE;
1188
1189	if (usage == SWAP_HAS_CACHE) {
1190		VM_BUG_ON(!has_cache);
1191		has_cache = 0;
1192	} else if (count == SWAP_MAP_SHMEM) {
1193		/*
1194		 * Or we could insist on shmem.c using a special
1195		 * swap_shmem_free() and free_shmem_swap_and_cache()...
1196		 */
1197		count = 0;
1198	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1199		if (count == COUNT_CONTINUED) {
1200			if (swap_count_continued(p, offset, count))
1201				count = SWAP_MAP_MAX | COUNT_CONTINUED;
1202			else
1203				count = SWAP_MAP_MAX;
1204		} else
1205			count--;
1206	}
1207
1208	usage = count | has_cache;
1209	p->swap_map[offset] = usage ? : SWAP_HAS_CACHE;
1210
1211	return usage;
1212}
1213
1214/*
1215 * Check whether swap entry is valid in the swap device.  If so,
1216 * return pointer to swap_info_struct, and keep the swap entry valid
1217 * via preventing the swap device from being swapoff, until
1218 * put_swap_device() is called.  Otherwise return NULL.
1219 *
1220 * The entirety of the RCU read critical section must come before the
1221 * return from or after the call to synchronize_rcu() in
1222 * enable_swap_info() or swapoff().  So if "si->flags & SWP_VALID" is
1223 * true, the si->map, si->cluster_info, etc. must be valid in the
1224 * critical section.
1225 *
1226 * Notice that swapoff or swapoff+swapon can still happen before the
1227 * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock()
1228 * in put_swap_device() if there isn't any other way to prevent
1229 * swapoff, such as page lock, page table lock, etc.  The caller must
1230 * be prepared for that.  For example, the following situation is
1231 * possible.
1232 *
1233 *   CPU1				CPU2
1234 *   do_swap_page()
1235 *     ...				swapoff+swapon
1236 *     __read_swap_cache_async()
1237 *       swapcache_prepare()
1238 *         __swap_duplicate()
1239 *           // check swap_map
1240 *     // verify PTE not changed
1241 *
1242 * In __swap_duplicate(), the swap_map need to be checked before
1243 * changing partly because the specified swap entry may be for another
1244 * swap device which has been swapoff.  And in do_swap_page(), after
1245 * the page is read from the swap device, the PTE is verified not
1246 * changed with the page table locked to check whether the swap device
1247 * has been swapoff or swapoff+swapon.
1248 */
1249struct swap_info_struct *get_swap_device(swp_entry_t entry)
1250{
1251	struct swap_info_struct *si;
1252	unsigned long offset;
1253
1254	if (!entry.val)
1255		goto out;
1256	si = swp_swap_info(entry);
1257	if (!si)
1258		goto bad_nofile;
1259
1260	rcu_read_lock();
1261	if (!(si->flags & SWP_VALID))
1262		goto unlock_out;
1263	offset = swp_offset(entry);
1264	if (offset >= si->max)
1265		goto unlock_out;
1266
1267	return si;
1268bad_nofile:
1269	pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1270out:
1271	return NULL;
1272unlock_out:
1273	rcu_read_unlock();
1274	return NULL;
1275}
1276
1277static unsigned char __swap_entry_free(struct swap_info_struct *p,
1278				       swp_entry_t entry, unsigned char usage)
1279{
1280	struct swap_cluster_info *ci;
1281	unsigned long offset = swp_offset(entry);
1282
1283	ci = lock_cluster_or_swap_info(p, offset);
1284	usage = __swap_entry_free_locked(p, offset, usage);
1285	unlock_cluster_or_swap_info(p, ci);
1286	if (!usage)
1287		free_swap_slot(entry);
1288
1289	return usage;
1290}
1291
1292static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1293{
1294	struct swap_cluster_info *ci;
1295	unsigned long offset = swp_offset(entry);
1296	unsigned char count;
1297
1298	ci = lock_cluster(p, offset);
1299	count = p->swap_map[offset];
1300	VM_BUG_ON(count != SWAP_HAS_CACHE);
1301	p->swap_map[offset] = 0;
1302	dec_cluster_info_page(p, p->cluster_info, offset);
1303	unlock_cluster(ci);
1304
1305	mem_cgroup_uncharge_swap(entry, 1);
1306	swap_range_free(p, offset, 1);
1307}
1308
1309/*
1310 * Caller has made sure that the swap device corresponding to entry
1311 * is still around or has not been recycled.
1312 */
1313void swap_free(swp_entry_t entry)
1314{
1315	struct swap_info_struct *p;
1316
1317	p = _swap_info_get(entry);
1318	if (p)
1319		__swap_entry_free(p, entry, 1);
 
 
1320}
1321
1322/*
1323 * Called after dropping swapcache to decrease refcnt to swap entries.
1324 */
1325void put_swap_page(struct page *page, swp_entry_t entry)
1326{
1327	unsigned long offset = swp_offset(entry);
1328	unsigned long idx = offset / SWAPFILE_CLUSTER;
1329	struct swap_cluster_info *ci;
1330	struct swap_info_struct *si;
1331	unsigned char *map;
1332	unsigned int i, free_entries = 0;
1333	unsigned char val;
1334	int size = swap_entry_size(hpage_nr_pages(page));
1335
1336	si = _swap_info_get(entry);
1337	if (!si)
1338		return;
1339
1340	ci = lock_cluster_or_swap_info(si, offset);
1341	if (size == SWAPFILE_CLUSTER) {
1342		VM_BUG_ON(!cluster_is_huge(ci));
1343		map = si->swap_map + offset;
1344		for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1345			val = map[i];
1346			VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1347			if (val == SWAP_HAS_CACHE)
1348				free_entries++;
1349		}
1350		cluster_clear_huge(ci);
1351		if (free_entries == SWAPFILE_CLUSTER) {
1352			unlock_cluster_or_swap_info(si, ci);
1353			spin_lock(&si->lock);
1354			mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1355			swap_free_cluster(si, idx);
1356			spin_unlock(&si->lock);
1357			return;
1358		}
1359	}
1360	for (i = 0; i < size; i++, entry.val++) {
1361		if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1362			unlock_cluster_or_swap_info(si, ci);
1363			free_swap_slot(entry);
1364			if (i == size - 1)
1365				return;
1366			lock_cluster_or_swap_info(si, offset);
1367		}
1368	}
1369	unlock_cluster_or_swap_info(si, ci);
1370}
1371
1372#ifdef CONFIG_THP_SWAP
1373int split_swap_cluster(swp_entry_t entry)
1374{
1375	struct swap_info_struct *si;
1376	struct swap_cluster_info *ci;
1377	unsigned long offset = swp_offset(entry);
1378
1379	si = _swap_info_get(entry);
1380	if (!si)
1381		return -EBUSY;
1382	ci = lock_cluster(si, offset);
1383	cluster_clear_huge(ci);
1384	unlock_cluster(ci);
1385	return 0;
1386}
1387#endif
1388
1389static int swp_entry_cmp(const void *ent1, const void *ent2)
1390{
1391	const swp_entry_t *e1 = ent1, *e2 = ent2;
1392
1393	return (int)swp_type(*e1) - (int)swp_type(*e2);
1394}
1395
1396void swapcache_free_entries(swp_entry_t *entries, int n)
1397{
1398	struct swap_info_struct *p, *prev;
1399	int i;
1400
1401	if (n <= 0)
1402		return;
1403
1404	prev = NULL;
1405	p = NULL;
1406
1407	/*
1408	 * Sort swap entries by swap device, so each lock is only taken once.
1409	 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1410	 * so low that it isn't necessary to optimize further.
1411	 */
1412	if (nr_swapfiles > 1)
1413		sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1414	for (i = 0; i < n; ++i) {
1415		p = swap_info_get_cont(entries[i], prev);
1416		if (p)
1417			swap_entry_free(p, entries[i]);
1418		prev = p;
1419	}
1420	if (p)
1421		spin_unlock(&p->lock);
 
1422}
1423
1424/*
1425 * How many references to page are currently swapped out?
1426 * This does not give an exact answer when swap count is continued,
1427 * but does include the high COUNT_CONTINUED flag to allow for that.
1428 */
1429int page_swapcount(struct page *page)
1430{
1431	int count = 0;
1432	struct swap_info_struct *p;
1433	struct swap_cluster_info *ci;
1434	swp_entry_t entry;
1435	unsigned long offset;
1436
1437	entry.val = page_private(page);
1438	p = _swap_info_get(entry);
1439	if (p) {
1440		offset = swp_offset(entry);
1441		ci = lock_cluster_or_swap_info(p, offset);
1442		count = swap_count(p->swap_map[offset]);
1443		unlock_cluster_or_swap_info(p, ci);
1444	}
1445	return count;
1446}
1447
1448int __swap_count(swp_entry_t entry)
1449{
1450	struct swap_info_struct *si;
1451	pgoff_t offset = swp_offset(entry);
1452	int count = 0;
1453
1454	si = get_swap_device(entry);
1455	if (si) {
1456		count = swap_count(si->swap_map[offset]);
1457		put_swap_device(si);
1458	}
1459	return count;
1460}
1461
1462static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1463{
1464	int count = 0;
1465	pgoff_t offset = swp_offset(entry);
1466	struct swap_cluster_info *ci;
1467
1468	ci = lock_cluster_or_swap_info(si, offset);
1469	count = swap_count(si->swap_map[offset]);
1470	unlock_cluster_or_swap_info(si, ci);
1471	return count;
1472}
1473
1474/*
1475 * How many references to @entry are currently swapped out?
1476 * This does not give an exact answer when swap count is continued,
1477 * but does include the high COUNT_CONTINUED flag to allow for that.
1478 */
1479int __swp_swapcount(swp_entry_t entry)
1480{
1481	int count = 0;
1482	struct swap_info_struct *si;
1483
1484	si = get_swap_device(entry);
1485	if (si) {
1486		count = swap_swapcount(si, entry);
1487		put_swap_device(si);
1488	}
1489	return count;
1490}
1491
1492/*
1493 * How many references to @entry are currently swapped out?
1494 * This considers COUNT_CONTINUED so it returns exact answer.
1495 */
1496int swp_swapcount(swp_entry_t entry)
1497{
1498	int count, tmp_count, n;
1499	struct swap_info_struct *p;
1500	struct swap_cluster_info *ci;
1501	struct page *page;
1502	pgoff_t offset;
1503	unsigned char *map;
1504
1505	p = _swap_info_get(entry);
1506	if (!p)
1507		return 0;
1508
1509	offset = swp_offset(entry);
1510
1511	ci = lock_cluster_or_swap_info(p, offset);
1512
1513	count = swap_count(p->swap_map[offset]);
1514	if (!(count & COUNT_CONTINUED))
1515		goto out;
1516
1517	count &= ~COUNT_CONTINUED;
1518	n = SWAP_MAP_MAX + 1;
1519
 
1520	page = vmalloc_to_page(p->swap_map + offset);
1521	offset &= ~PAGE_MASK;
1522	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1523
1524	do {
1525		page = list_next_entry(page, lru);
1526		map = kmap_atomic(page);
1527		tmp_count = map[offset];
1528		kunmap_atomic(map);
1529
1530		count += (tmp_count & ~COUNT_CONTINUED) * n;
1531		n *= (SWAP_CONT_MAX + 1);
1532	} while (tmp_count & COUNT_CONTINUED);
1533out:
1534	unlock_cluster_or_swap_info(p, ci);
1535	return count;
1536}
1537
1538static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1539					 swp_entry_t entry)
1540{
1541	struct swap_cluster_info *ci;
1542	unsigned char *map = si->swap_map;
1543	unsigned long roffset = swp_offset(entry);
1544	unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1545	int i;
1546	bool ret = false;
1547
1548	ci = lock_cluster_or_swap_info(si, offset);
1549	if (!ci || !cluster_is_huge(ci)) {
1550		if (swap_count(map[roffset]))
1551			ret = true;
1552		goto unlock_out;
1553	}
1554	for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1555		if (swap_count(map[offset + i])) {
1556			ret = true;
1557			break;
1558		}
1559	}
1560unlock_out:
1561	unlock_cluster_or_swap_info(si, ci);
1562	return ret;
1563}
1564
1565static bool page_swapped(struct page *page)
1566{
1567	swp_entry_t entry;
1568	struct swap_info_struct *si;
1569
1570	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
1571		return page_swapcount(page) != 0;
1572
1573	page = compound_head(page);
1574	entry.val = page_private(page);
1575	si = _swap_info_get(entry);
1576	if (si)
1577		return swap_page_trans_huge_swapped(si, entry);
1578	return false;
1579}
1580
1581static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1582					 int *total_swapcount)
1583{
1584	int i, map_swapcount, _total_mapcount, _total_swapcount;
1585	unsigned long offset = 0;
1586	struct swap_info_struct *si;
1587	struct swap_cluster_info *ci = NULL;
1588	unsigned char *map = NULL;
1589	int mapcount, swapcount = 0;
1590
1591	/* hugetlbfs shouldn't call it */
1592	VM_BUG_ON_PAGE(PageHuge(page), page);
1593
1594	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
1595		mapcount = page_trans_huge_mapcount(page, total_mapcount);
1596		if (PageSwapCache(page))
1597			swapcount = page_swapcount(page);
1598		if (total_swapcount)
1599			*total_swapcount = swapcount;
1600		return mapcount + swapcount;
1601	}
1602
1603	page = compound_head(page);
1604
1605	_total_mapcount = _total_swapcount = map_swapcount = 0;
1606	if (PageSwapCache(page)) {
1607		swp_entry_t entry;
1608
1609		entry.val = page_private(page);
1610		si = _swap_info_get(entry);
1611		if (si) {
1612			map = si->swap_map;
1613			offset = swp_offset(entry);
1614		}
1615	}
1616	if (map)
1617		ci = lock_cluster(si, offset);
1618	for (i = 0; i < HPAGE_PMD_NR; i++) {
1619		mapcount = atomic_read(&page[i]._mapcount) + 1;
1620		_total_mapcount += mapcount;
1621		if (map) {
1622			swapcount = swap_count(map[offset + i]);
1623			_total_swapcount += swapcount;
1624		}
1625		map_swapcount = max(map_swapcount, mapcount + swapcount);
1626	}
1627	unlock_cluster(ci);
1628	if (PageDoubleMap(page)) {
1629		map_swapcount -= 1;
1630		_total_mapcount -= HPAGE_PMD_NR;
1631	}
1632	mapcount = compound_mapcount(page);
1633	map_swapcount += mapcount;
1634	_total_mapcount += mapcount;
1635	if (total_mapcount)
1636		*total_mapcount = _total_mapcount;
1637	if (total_swapcount)
1638		*total_swapcount = _total_swapcount;
1639
1640	return map_swapcount;
1641}
1642
1643/*
1644 * We can write to an anon page without COW if there are no other references
1645 * to it.  And as a side-effect, free up its swap: because the old content
1646 * on disk will never be read, and seeking back there to write new content
1647 * later would only waste time away from clustering.
1648 *
1649 * NOTE: total_map_swapcount should not be relied upon by the caller if
1650 * reuse_swap_page() returns false, but it may be always overwritten
1651 * (see the other implementation for CONFIG_SWAP=n).
1652 */
1653bool reuse_swap_page(struct page *page, int *total_map_swapcount)
1654{
1655	int count, total_mapcount, total_swapcount;
1656
1657	VM_BUG_ON_PAGE(!PageLocked(page), page);
1658	if (unlikely(PageKsm(page)))
1659		return false;
1660	count = page_trans_huge_map_swapcount(page, &total_mapcount,
1661					      &total_swapcount);
1662	if (total_map_swapcount)
1663		*total_map_swapcount = total_mapcount + total_swapcount;
1664	if (count == 1 && PageSwapCache(page) &&
1665	    (likely(!PageTransCompound(page)) ||
1666	     /* The remaining swap count will be freed soon */
1667	     total_swapcount == page_swapcount(page))) {
1668		if (!PageWriteback(page)) {
1669			page = compound_head(page);
1670			delete_from_swap_cache(page);
1671			SetPageDirty(page);
1672		} else {
1673			swp_entry_t entry;
1674			struct swap_info_struct *p;
1675
1676			entry.val = page_private(page);
1677			p = swap_info_get(entry);
1678			if (p->flags & SWP_STABLE_WRITES) {
1679				spin_unlock(&p->lock);
1680				return false;
1681			}
1682			spin_unlock(&p->lock);
1683		}
1684	}
1685
1686	return count <= 1;
1687}
1688
1689/*
1690 * If swap is getting full, or if there are no more mappings of this page,
1691 * then try_to_free_swap is called to free its swap space.
1692 */
1693int try_to_free_swap(struct page *page)
1694{
1695	VM_BUG_ON_PAGE(!PageLocked(page), page);
1696
1697	if (!PageSwapCache(page))
1698		return 0;
1699	if (PageWriteback(page))
1700		return 0;
1701	if (page_swapped(page))
1702		return 0;
1703
1704	/*
1705	 * Once hibernation has begun to create its image of memory,
1706	 * there's a danger that one of the calls to try_to_free_swap()
1707	 * - most probably a call from __try_to_reclaim_swap() while
1708	 * hibernation is allocating its own swap pages for the image,
1709	 * but conceivably even a call from memory reclaim - will free
1710	 * the swap from a page which has already been recorded in the
1711	 * image as a clean swapcache page, and then reuse its swap for
1712	 * another page of the image.  On waking from hibernation, the
1713	 * original page might be freed under memory pressure, then
1714	 * later read back in from swap, now with the wrong data.
1715	 *
1716	 * Hibernation suspends storage while it is writing the image
1717	 * to disk so check that here.
1718	 */
1719	if (pm_suspended_storage())
1720		return 0;
1721
1722	page = compound_head(page);
1723	delete_from_swap_cache(page);
1724	SetPageDirty(page);
1725	return 1;
1726}
1727
1728/*
1729 * Free the swap entry like above, but also try to
1730 * free the page cache entry if it is the last user.
1731 */
1732int free_swap_and_cache(swp_entry_t entry)
1733{
1734	struct swap_info_struct *p;
1735	unsigned char count;
1736
1737	if (non_swap_entry(entry))
1738		return 1;
1739
1740	p = _swap_info_get(entry);
1741	if (p) {
1742		count = __swap_entry_free(p, entry, 1);
1743		if (count == SWAP_HAS_CACHE &&
1744		    !swap_page_trans_huge_swapped(p, entry))
1745			__try_to_reclaim_swap(p, swp_offset(entry),
1746					      TTRS_UNMAPPED | TTRS_FULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1747	}
1748	return p != NULL;
1749}
1750
1751#ifdef CONFIG_HIBERNATION
1752/*
1753 * Find the swap type that corresponds to given device (if any).
1754 *
1755 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1756 * from 0, in which the swap header is expected to be located.
1757 *
1758 * This is needed for the suspend to disk (aka swsusp).
1759 */
1760int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
1761{
1762	struct block_device *bdev = NULL;
1763	int type;
1764
1765	if (device)
1766		bdev = bdget(device);
1767
1768	spin_lock(&swap_lock);
1769	for (type = 0; type < nr_swapfiles; type++) {
1770		struct swap_info_struct *sis = swap_info[type];
1771
1772		if (!(sis->flags & SWP_WRITEOK))
1773			continue;
1774
1775		if (!bdev) {
1776			if (bdev_p)
1777				*bdev_p = bdgrab(sis->bdev);
1778
1779			spin_unlock(&swap_lock);
1780			return type;
1781		}
1782		if (bdev == sis->bdev) {
1783			struct swap_extent *se = first_se(sis);
1784
1785			if (se->start_block == offset) {
1786				if (bdev_p)
1787					*bdev_p = bdgrab(sis->bdev);
1788
1789				spin_unlock(&swap_lock);
1790				bdput(bdev);
1791				return type;
1792			}
1793		}
1794	}
1795	spin_unlock(&swap_lock);
1796	if (bdev)
1797		bdput(bdev);
1798
1799	return -ENODEV;
1800}
1801
1802/*
1803 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1804 * corresponding to given index in swap_info (swap type).
1805 */
1806sector_t swapdev_block(int type, pgoff_t offset)
1807{
1808	struct block_device *bdev;
1809	struct swap_info_struct *si = swap_type_to_swap_info(type);
1810
1811	if (!si || !(si->flags & SWP_WRITEOK))
 
 
1812		return 0;
1813	return map_swap_entry(swp_entry(type, offset), &bdev);
1814}
1815
1816/*
1817 * Return either the total number of swap pages of given type, or the number
1818 * of free pages of that type (depending on @free)
1819 *
1820 * This is needed for software suspend
1821 */
1822unsigned int count_swap_pages(int type, int free)
1823{
1824	unsigned int n = 0;
1825
1826	spin_lock(&swap_lock);
1827	if ((unsigned int)type < nr_swapfiles) {
1828		struct swap_info_struct *sis = swap_info[type];
1829
1830		spin_lock(&sis->lock);
1831		if (sis->flags & SWP_WRITEOK) {
1832			n = sis->pages;
1833			if (free)
1834				n -= sis->inuse_pages;
1835		}
1836		spin_unlock(&sis->lock);
1837	}
1838	spin_unlock(&swap_lock);
1839	return n;
1840}
1841#endif /* CONFIG_HIBERNATION */
1842
1843static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1844{
1845	return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
1846}
1847
1848/*
1849 * No need to decide whether this PTE shares the swap entry with others,
1850 * just let do_wp_page work it out if a write is requested later - to
1851 * force COW, vm_page_prot omits write permission from any private vma.
1852 */
1853static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1854		unsigned long addr, swp_entry_t entry, struct page *page)
1855{
1856	struct page *swapcache;
1857	struct mem_cgroup *memcg;
1858	spinlock_t *ptl;
1859	pte_t *pte;
1860	int ret = 1;
1861
1862	swapcache = page;
1863	page = ksm_might_need_to_copy(page, vma, addr);
1864	if (unlikely(!page))
1865		return -ENOMEM;
1866
1867	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
1868				&memcg, false)) {
1869		ret = -ENOMEM;
1870		goto out_nolock;
1871	}
1872
1873	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1874	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1875		mem_cgroup_cancel_charge(page, memcg, false);
1876		ret = 0;
1877		goto out;
1878	}
1879
1880	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1881	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1882	get_page(page);
1883	set_pte_at(vma->vm_mm, addr, pte,
1884		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
1885	if (page == swapcache) {
1886		page_add_anon_rmap(page, vma, addr, false);
1887		mem_cgroup_commit_charge(page, memcg, true, false);
1888	} else { /* ksm created a completely new copy */
1889		page_add_new_anon_rmap(page, vma, addr, false);
1890		mem_cgroup_commit_charge(page, memcg, false, false);
1891		lru_cache_add_active_or_unevictable(page, vma);
1892	}
1893	swap_free(entry);
1894	/*
1895	 * Move the page to the active list so it is not
1896	 * immediately swapped out again after swapon.
1897	 */
1898	activate_page(page);
1899out:
1900	pte_unmap_unlock(pte, ptl);
1901out_nolock:
1902	if (page != swapcache) {
1903		unlock_page(page);
1904		put_page(page);
1905	}
1906	return ret;
1907}
1908
1909static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1910			unsigned long addr, unsigned long end,
1911			unsigned int type, bool frontswap,
1912			unsigned long *fs_pages_to_unuse)
1913{
1914	struct page *page;
1915	swp_entry_t entry;
1916	pte_t *pte;
1917	struct swap_info_struct *si;
1918	unsigned long offset;
1919	int ret = 0;
1920	volatile unsigned char *swap_map;
1921
1922	si = swap_info[type];
 
 
 
 
 
 
 
 
1923	pte = pte_offset_map(pmd, addr);
1924	do {
1925		struct vm_fault vmf;
1926
1927		if (!is_swap_pte(*pte))
1928			continue;
1929
1930		entry = pte_to_swp_entry(*pte);
1931		if (swp_type(entry) != type)
1932			continue;
1933
1934		offset = swp_offset(entry);
1935		if (frontswap && !frontswap_test(si, offset))
1936			continue;
1937
1938		pte_unmap(pte);
1939		swap_map = &si->swap_map[offset];
1940		vmf.vma = vma;
1941		vmf.address = addr;
1942		vmf.pmd = pmd;
1943		page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, &vmf);
1944		if (!page) {
1945			if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
1946				goto try_next;
1947			return -ENOMEM;
1948		}
1949
1950		lock_page(page);
1951		wait_on_page_writeback(page);
1952		ret = unuse_pte(vma, pmd, addr, entry, page);
1953		if (ret < 0) {
1954			unlock_page(page);
1955			put_page(page);
1956			goto out;
1957		}
1958
1959		try_to_free_swap(page);
1960		unlock_page(page);
1961		put_page(page);
1962
1963		if (*fs_pages_to_unuse && !--(*fs_pages_to_unuse)) {
1964			ret = FRONTSWAP_PAGES_UNUSED;
1965			goto out;
1966		}
1967try_next:
1968		pte = pte_offset_map(pmd, addr);
1969	} while (pte++, addr += PAGE_SIZE, addr != end);
1970	pte_unmap(pte - 1);
1971
1972	ret = 0;
1973out:
1974	return ret;
1975}
1976
1977static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1978				unsigned long addr, unsigned long end,
1979				unsigned int type, bool frontswap,
1980				unsigned long *fs_pages_to_unuse)
1981{
1982	pmd_t *pmd;
1983	unsigned long next;
1984	int ret;
1985
1986	pmd = pmd_offset(pud, addr);
1987	do {
1988		cond_resched();
1989		next = pmd_addr_end(addr, end);
1990		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1991			continue;
1992		ret = unuse_pte_range(vma, pmd, addr, next, type,
1993				      frontswap, fs_pages_to_unuse);
1994		if (ret)
1995			return ret;
1996	} while (pmd++, addr = next, addr != end);
1997	return 0;
1998}
1999
2000static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2001				unsigned long addr, unsigned long end,
2002				unsigned int type, bool frontswap,
2003				unsigned long *fs_pages_to_unuse)
2004{
2005	pud_t *pud;
2006	unsigned long next;
2007	int ret;
2008
2009	pud = pud_offset(p4d, addr);
2010	do {
2011		next = pud_addr_end(addr, end);
2012		if (pud_none_or_clear_bad(pud))
2013			continue;
2014		ret = unuse_pmd_range(vma, pud, addr, next, type,
2015				      frontswap, fs_pages_to_unuse);
2016		if (ret)
2017			return ret;
2018	} while (pud++, addr = next, addr != end);
2019	return 0;
2020}
2021
2022static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2023				unsigned long addr, unsigned long end,
2024				unsigned int type, bool frontswap,
2025				unsigned long *fs_pages_to_unuse)
2026{
2027	p4d_t *p4d;
2028	unsigned long next;
2029	int ret;
2030
2031	p4d = p4d_offset(pgd, addr);
2032	do {
2033		next = p4d_addr_end(addr, end);
2034		if (p4d_none_or_clear_bad(p4d))
2035			continue;
2036		ret = unuse_pud_range(vma, p4d, addr, next, type,
2037				      frontswap, fs_pages_to_unuse);
2038		if (ret)
2039			return ret;
2040	} while (p4d++, addr = next, addr != end);
2041	return 0;
2042}
2043
2044static int unuse_vma(struct vm_area_struct *vma, unsigned int type,
2045		     bool frontswap, unsigned long *fs_pages_to_unuse)
2046{
2047	pgd_t *pgd;
2048	unsigned long addr, end, next;
2049	int ret;
2050
2051	addr = vma->vm_start;
2052	end = vma->vm_end;
 
 
 
 
 
 
 
 
2053
2054	pgd = pgd_offset(vma->vm_mm, addr);
2055	do {
2056		next = pgd_addr_end(addr, end);
2057		if (pgd_none_or_clear_bad(pgd))
2058			continue;
2059		ret = unuse_p4d_range(vma, pgd, addr, next, type,
2060				      frontswap, fs_pages_to_unuse);
2061		if (ret)
2062			return ret;
2063	} while (pgd++, addr = next, addr != end);
2064	return 0;
2065}
2066
2067static int unuse_mm(struct mm_struct *mm, unsigned int type,
2068		    bool frontswap, unsigned long *fs_pages_to_unuse)
2069{
2070	struct vm_area_struct *vma;
2071	int ret = 0;
2072
2073	down_read(&mm->mmap_sem);
 
 
 
 
 
 
 
 
 
2074	for (vma = mm->mmap; vma; vma = vma->vm_next) {
2075		if (vma->anon_vma) {
2076			ret = unuse_vma(vma, type, frontswap,
2077					fs_pages_to_unuse);
2078			if (ret)
2079				break;
2080		}
2081		cond_resched();
2082	}
2083	up_read(&mm->mmap_sem);
2084	return ret;
2085}
2086
2087/*
2088 * Scan swap_map (or frontswap_map if frontswap parameter is true)
2089 * from current position to next entry still in use. Return 0
2090 * if there are no inuse entries after prev till end of the map.
2091 */
2092static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2093					unsigned int prev, bool frontswap)
2094{
2095	unsigned int i;
 
2096	unsigned char count;
2097
2098	/*
2099	 * No need for swap_lock here: we're just looking
2100	 * for whether an entry is in use, not modifying it; false
2101	 * hits are okay, and sys_swapoff() has already prevented new
2102	 * allocations from this area (while holding swap_lock).
2103	 */
2104	for (i = prev + 1; i < si->max; i++) {
 
 
 
 
 
 
 
 
 
 
 
 
 
2105		count = READ_ONCE(si->swap_map[i]);
2106		if (count && swap_count(count) != SWAP_MAP_BAD)
2107			if (!frontswap || frontswap_test(si, i))
2108				break;
2109		if ((i % LATENCY_LIMIT) == 0)
2110			cond_resched();
2111	}
2112
2113	if (i == si->max)
2114		i = 0;
2115
2116	return i;
2117}
2118
2119/*
2120 * If the boolean frontswap is true, only unuse pages_to_unuse pages;
 
 
 
 
2121 * pages_to_unuse==0 means all pages; ignored if frontswap is false
2122 */
2123int try_to_unuse(unsigned int type, bool frontswap,
2124		 unsigned long pages_to_unuse)
2125{
2126	struct mm_struct *prev_mm;
2127	struct mm_struct *mm;
2128	struct list_head *p;
2129	int retval = 0;
2130	struct swap_info_struct *si = swap_info[type];
 
 
 
 
 
 
 
2131	struct page *page;
2132	swp_entry_t entry;
2133	unsigned int i;
2134
2135	if (!si->inuse_pages)
2136		return 0;
2137
2138	if (!frontswap)
2139		pages_to_unuse = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2140
2141retry:
2142	retval = shmem_unuse(type, frontswap, &pages_to_unuse);
2143	if (retval)
2144		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2145
2146	prev_mm = &init_mm;
2147	mmget(prev_mm);
 
 
 
 
 
 
2148
2149	spin_lock(&mmlist_lock);
2150	p = &init_mm.mmlist;
2151	while (si->inuse_pages &&
2152	       !signal_pending(current) &&
2153	       (p = p->next) != &init_mm.mmlist) {
 
 
 
 
 
 
 
2154
2155		mm = list_entry(p, struct mm_struct, mmlist);
2156		if (!mmget_not_zero(mm))
 
 
 
 
 
 
 
2157			continue;
2158		spin_unlock(&mmlist_lock);
2159		mmput(prev_mm);
2160		prev_mm = mm;
2161		retval = unuse_mm(mm, type, frontswap, &pages_to_unuse);
2162
2163		if (retval) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2164			mmput(prev_mm);
2165			goto out;
 
 
 
 
 
 
2166		}
2167
2168		/*
2169		 * Make sure that we aren't completely killing
2170		 * interactive performance.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2171		 */
2172		cond_resched();
2173		spin_lock(&mmlist_lock);
2174	}
2175	spin_unlock(&mmlist_lock);
2176
2177	mmput(prev_mm);
2178
2179	i = 0;
2180	while (si->inuse_pages &&
2181	       !signal_pending(current) &&
2182	       (i = find_next_to_unuse(si, i, frontswap)) != 0) {
2183
2184		entry = swp_entry(type, i);
2185		page = find_get_page(swap_address_space(entry), i);
2186		if (!page)
2187			continue;
2188
2189		/*
2190		 * It is conceivable that a racing task removed this page from
2191		 * swap cache just before we acquired the page lock. The page
2192		 * might even be back in swap cache on another swap area. But
2193		 * that is okay, try_to_free_swap() only removes stale pages.
 
2194		 */
2195		lock_page(page);
2196		wait_on_page_writeback(page);
2197		try_to_free_swap(page);
 
 
 
 
 
 
 
2198		unlock_page(page);
2199		put_page(page);
2200
2201		/*
2202		 * For frontswap, we just need to unuse pages_to_unuse, if
2203		 * it was specified. Need not check frontswap again here as
2204		 * we already zeroed out pages_to_unuse if not frontswap.
2205		 */
2206		if (pages_to_unuse && --pages_to_unuse == 0)
2207			goto out;
 
 
 
2208	}
2209
2210	/*
2211	 * Lets check again to see if there are still swap entries in the map.
2212	 * If yes, we would need to do retry the unuse logic again.
2213	 * Under global memory pressure, swap entries can be reinserted back
2214	 * into process space after the mmlist loop above passes over them.
2215	 *
2216	 * Limit the number of retries? No: when mmget_not_zero() above fails,
2217	 * that mm is likely to be freeing swap from exit_mmap(), which proceeds
2218	 * at its own independent pace; and even shmem_writepage() could have
2219	 * been preempted after get_swap_page(), temporarily hiding that swap.
2220	 * It's easy and robust (though cpu-intensive) just to keep retrying.
2221	 */
2222	if (si->inuse_pages) {
2223		if (!signal_pending(current))
2224			goto retry;
2225		retval = -EINTR;
2226	}
2227out:
2228	return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
2229}
2230
2231/*
2232 * After a successful try_to_unuse, if no swap is now in use, we know
2233 * we can empty the mmlist.  swap_lock must be held on entry and exit.
2234 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2235 * added to the mmlist just after page_duplicate - before would be racy.
2236 */
2237static void drain_mmlist(void)
2238{
2239	struct list_head *p, *next;
2240	unsigned int type;
2241
2242	for (type = 0; type < nr_swapfiles; type++)
2243		if (swap_info[type]->inuse_pages)
2244			return;
2245	spin_lock(&mmlist_lock);
2246	list_for_each_safe(p, next, &init_mm.mmlist)
2247		list_del_init(p);
2248	spin_unlock(&mmlist_lock);
2249}
2250
2251/*
2252 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
2253 * corresponds to page offset for the specified swap entry.
2254 * Note that the type of this function is sector_t, but it returns page offset
2255 * into the bdev, not sector offset.
2256 */
2257static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
2258{
2259	struct swap_info_struct *sis;
 
2260	struct swap_extent *se;
2261	pgoff_t offset;
2262
2263	sis = swp_swap_info(entry);
2264	*bdev = sis->bdev;
2265
2266	offset = swp_offset(entry);
2267	se = offset_to_swap_extent(sis, offset);
2268	return se->start_block + (offset - se->start_page);
 
 
 
 
 
 
 
 
 
 
2269}
2270
2271/*
2272 * Returns the page offset into bdev for the specified page's swap entry.
2273 */
2274sector_t map_swap_page(struct page *page, struct block_device **bdev)
2275{
2276	swp_entry_t entry;
2277	entry.val = page_private(page);
2278	return map_swap_entry(entry, bdev);
2279}
2280
2281/*
2282 * Free all of a swapdev's extent information
2283 */
2284static void destroy_swap_extents(struct swap_info_struct *sis)
2285{
2286	while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2287		struct rb_node *rb = sis->swap_extent_root.rb_node;
2288		struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2289
2290		rb_erase(rb, &sis->swap_extent_root);
 
 
2291		kfree(se);
2292	}
2293
2294	if (sis->flags & SWP_ACTIVATED) {
2295		struct file *swap_file = sis->swap_file;
2296		struct address_space *mapping = swap_file->f_mapping;
2297
2298		sis->flags &= ~SWP_ACTIVATED;
2299		if (mapping->a_ops->swap_deactivate)
2300			mapping->a_ops->swap_deactivate(swap_file);
2301	}
2302}
2303
2304/*
2305 * Add a block range (and the corresponding page range) into this swapdev's
2306 * extent tree.
2307 *
2308 * This function rather assumes that it is called in ascending page order.
2309 */
2310int
2311add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2312		unsigned long nr_pages, sector_t start_block)
2313{
2314	struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2315	struct swap_extent *se;
2316	struct swap_extent *new_se;
 
2317
2318	/*
2319	 * place the new node at the right most since the
2320	 * function is called in ascending page order.
2321	 */
2322	while (*link) {
2323		parent = *link;
2324		link = &parent->rb_right;
2325	}
2326
2327	if (parent) {
2328		se = rb_entry(parent, struct swap_extent, rb_node);
2329		BUG_ON(se->start_page + se->nr_pages != start_page);
2330		if (se->start_block + se->nr_pages == start_block) {
2331			/* Merge it */
2332			se->nr_pages += nr_pages;
2333			return 0;
2334		}
2335	}
2336
2337	/* No merge, insert a new extent. */
 
 
2338	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2339	if (new_se == NULL)
2340		return -ENOMEM;
2341	new_se->start_page = start_page;
2342	new_se->nr_pages = nr_pages;
2343	new_se->start_block = start_block;
2344
2345	rb_link_node(&new_se->rb_node, parent, link);
2346	rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2347	return 1;
2348}
2349EXPORT_SYMBOL_GPL(add_swap_extent);
2350
2351/*
2352 * A `swap extent' is a simple thing which maps a contiguous range of pages
2353 * onto a contiguous range of disk blocks.  An ordered list of swap extents
2354 * is built at swapon time and is then used at swap_writepage/swap_readpage
2355 * time for locating where on disk a page belongs.
2356 *
2357 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2358 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2359 * swap files identically.
2360 *
2361 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2362 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2363 * swapfiles are handled *identically* after swapon time.
2364 *
2365 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2366 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
2367 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
2368 * requirements, they are simply tossed out - we will never use those blocks
2369 * for swapping.
2370 *
2371 * For all swap devices we set S_SWAPFILE across the life of the swapon.  This
2372 * prevents users from writing to the swap device, which will corrupt memory.
 
2373 *
2374 * The amount of disk space which a single swap extent represents varies.
2375 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2376 * extents in the list.  To avoid much list walking, we cache the previous
2377 * search location in `curr_swap_extent', and start new searches from there.
2378 * This is extremely effective.  The average number of iterations in
2379 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
2380 */
2381static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2382{
2383	struct file *swap_file = sis->swap_file;
2384	struct address_space *mapping = swap_file->f_mapping;
2385	struct inode *inode = mapping->host;
2386	int ret;
2387
2388	if (S_ISBLK(inode->i_mode)) {
2389		ret = add_swap_extent(sis, 0, sis->max, 0);
2390		*span = sis->pages;
2391		return ret;
2392	}
2393
2394	if (mapping->a_ops->swap_activate) {
2395		ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2396		if (ret >= 0)
2397			sis->flags |= SWP_ACTIVATED;
2398		if (!ret) {
2399			sis->flags |= SWP_FS;
2400			ret = add_swap_extent(sis, 0, sis->max, 0);
2401			*span = sis->pages;
2402		}
2403		return ret;
2404	}
2405
2406	return generic_swapfile_activate(sis, swap_file, span);
2407}
2408
2409static int swap_node(struct swap_info_struct *p)
2410{
2411	struct block_device *bdev;
2412
2413	if (p->bdev)
2414		bdev = p->bdev;
2415	else
2416		bdev = p->swap_file->f_inode->i_sb->s_bdev;
2417
2418	return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2419}
2420
2421static void setup_swap_info(struct swap_info_struct *p, int prio,
2422			    unsigned char *swap_map,
2423			    struct swap_cluster_info *cluster_info)
2424{
2425	int i;
2426
2427	if (prio >= 0)
2428		p->prio = prio;
2429	else
2430		p->prio = --least_priority;
2431	/*
2432	 * the plist prio is negated because plist ordering is
2433	 * low-to-high, while swap ordering is high-to-low
2434	 */
2435	p->list.prio = -p->prio;
2436	for_each_node(i) {
2437		if (p->prio >= 0)
2438			p->avail_lists[i].prio = -p->prio;
2439		else {
2440			if (swap_node(p) == i)
2441				p->avail_lists[i].prio = 1;
2442			else
2443				p->avail_lists[i].prio = -p->prio;
2444		}
2445	}
2446	p->swap_map = swap_map;
2447	p->cluster_info = cluster_info;
2448}
2449
2450static void _enable_swap_info(struct swap_info_struct *p)
2451{
2452	p->flags |= SWP_WRITEOK | SWP_VALID;
2453	atomic_long_add(p->pages, &nr_swap_pages);
2454	total_swap_pages += p->pages;
2455
2456	assert_spin_locked(&swap_lock);
2457	/*
2458	 * both lists are plists, and thus priority ordered.
2459	 * swap_active_head needs to be priority ordered for swapoff(),
2460	 * which on removal of any swap_info_struct with an auto-assigned
2461	 * (i.e. negative) priority increments the auto-assigned priority
2462	 * of any lower-priority swap_info_structs.
2463	 * swap_avail_head needs to be priority ordered for get_swap_page(),
2464	 * which allocates swap pages from the highest available priority
2465	 * swap_info_struct.
2466	 */
2467	plist_add(&p->list, &swap_active_head);
2468	add_to_avail_list(p);
 
 
2469}
2470
2471static void enable_swap_info(struct swap_info_struct *p, int prio,
2472				unsigned char *swap_map,
2473				struct swap_cluster_info *cluster_info,
2474				unsigned long *frontswap_map)
2475{
2476	frontswap_init(p->type, frontswap_map);
2477	spin_lock(&swap_lock);
2478	spin_lock(&p->lock);
2479	setup_swap_info(p, prio, swap_map, cluster_info);
2480	spin_unlock(&p->lock);
2481	spin_unlock(&swap_lock);
2482	/*
2483	 * Guarantee swap_map, cluster_info, etc. fields are valid
2484	 * between get/put_swap_device() if SWP_VALID bit is set
2485	 */
2486	synchronize_rcu();
2487	spin_lock(&swap_lock);
2488	spin_lock(&p->lock);
2489	_enable_swap_info(p);
2490	spin_unlock(&p->lock);
2491	spin_unlock(&swap_lock);
2492}
2493
2494static void reinsert_swap_info(struct swap_info_struct *p)
2495{
2496	spin_lock(&swap_lock);
2497	spin_lock(&p->lock);
2498	setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2499	_enable_swap_info(p);
2500	spin_unlock(&p->lock);
2501	spin_unlock(&swap_lock);
2502}
2503
2504bool has_usable_swap(void)
2505{
2506	bool ret = true;
2507
2508	spin_lock(&swap_lock);
2509	if (plist_head_empty(&swap_active_head))
2510		ret = false;
2511	spin_unlock(&swap_lock);
2512	return ret;
2513}
2514
2515SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2516{
2517	struct swap_info_struct *p = NULL;
2518	unsigned char *swap_map;
2519	struct swap_cluster_info *cluster_info;
2520	unsigned long *frontswap_map;
2521	struct file *swap_file, *victim;
2522	struct address_space *mapping;
2523	struct inode *inode;
2524	struct filename *pathname;
2525	int err, found = 0;
2526	unsigned int old_block_size;
2527
2528	if (!capable(CAP_SYS_ADMIN))
2529		return -EPERM;
2530
2531	BUG_ON(!current->mm);
2532
2533	pathname = getname(specialfile);
2534	if (IS_ERR(pathname))
2535		return PTR_ERR(pathname);
2536
2537	victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2538	err = PTR_ERR(victim);
2539	if (IS_ERR(victim))
2540		goto out;
2541
2542	mapping = victim->f_mapping;
2543	spin_lock(&swap_lock);
2544	plist_for_each_entry(p, &swap_active_head, list) {
2545		if (p->flags & SWP_WRITEOK) {
2546			if (p->swap_file->f_mapping == mapping) {
2547				found = 1;
2548				break;
2549			}
2550		}
2551	}
2552	if (!found) {
2553		err = -EINVAL;
2554		spin_unlock(&swap_lock);
2555		goto out_dput;
2556	}
2557	if (!security_vm_enough_memory_mm(current->mm, p->pages))
2558		vm_unacct_memory(p->pages);
2559	else {
2560		err = -ENOMEM;
2561		spin_unlock(&swap_lock);
2562		goto out_dput;
2563	}
2564	del_from_avail_list(p);
 
 
2565	spin_lock(&p->lock);
2566	if (p->prio < 0) {
2567		struct swap_info_struct *si = p;
2568		int nid;
2569
2570		plist_for_each_entry_continue(si, &swap_active_head, list) {
2571			si->prio++;
2572			si->list.prio--;
2573			for_each_node(nid) {
2574				if (si->avail_lists[nid].prio != 1)
2575					si->avail_lists[nid].prio--;
2576			}
2577		}
2578		least_priority++;
2579	}
2580	plist_del(&p->list, &swap_active_head);
2581	atomic_long_sub(p->pages, &nr_swap_pages);
2582	total_swap_pages -= p->pages;
2583	p->flags &= ~SWP_WRITEOK;
2584	spin_unlock(&p->lock);
2585	spin_unlock(&swap_lock);
2586
2587	disable_swap_slots_cache_lock();
2588
2589	set_current_oom_origin();
2590	err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
2591	clear_current_oom_origin();
2592
2593	if (err) {
2594		/* re-insert swap space back into swap_list */
2595		reinsert_swap_info(p);
2596		reenable_swap_slots_cache_unlock();
2597		goto out_dput;
2598	}
2599
2600	reenable_swap_slots_cache_unlock();
2601
2602	spin_lock(&swap_lock);
2603	spin_lock(&p->lock);
2604	p->flags &= ~SWP_VALID;		/* mark swap device as invalid */
2605	spin_unlock(&p->lock);
2606	spin_unlock(&swap_lock);
2607	/*
2608	 * wait for swap operations protected by get/put_swap_device()
2609	 * to complete
2610	 */
2611	synchronize_rcu();
2612
2613	flush_work(&p->discard_work);
2614
2615	destroy_swap_extents(p);
2616	if (p->flags & SWP_CONTINUED)
2617		free_swap_count_continuations(p);
2618
2619	if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
2620		atomic_dec(&nr_rotate_swap);
2621
2622	mutex_lock(&swapon_mutex);
2623	spin_lock(&swap_lock);
2624	spin_lock(&p->lock);
2625	drain_mmlist();
2626
2627	/* wait for anyone still in scan_swap_map */
2628	p->highest_bit = 0;		/* cuts scans short */
2629	while (p->flags >= SWP_SCANNING) {
2630		spin_unlock(&p->lock);
2631		spin_unlock(&swap_lock);
2632		schedule_timeout_uninterruptible(1);
2633		spin_lock(&swap_lock);
2634		spin_lock(&p->lock);
2635	}
2636
2637	swap_file = p->swap_file;
2638	old_block_size = p->old_block_size;
2639	p->swap_file = NULL;
2640	p->max = 0;
2641	swap_map = p->swap_map;
2642	p->swap_map = NULL;
2643	cluster_info = p->cluster_info;
2644	p->cluster_info = NULL;
2645	frontswap_map = frontswap_map_get(p);
2646	spin_unlock(&p->lock);
2647	spin_unlock(&swap_lock);
2648	frontswap_invalidate_area(p->type);
2649	frontswap_map_set(p, NULL);
2650	mutex_unlock(&swapon_mutex);
2651	free_percpu(p->percpu_cluster);
2652	p->percpu_cluster = NULL;
2653	vfree(swap_map);
2654	kvfree(cluster_info);
2655	kvfree(frontswap_map);
2656	/* Destroy swap account information */
2657	swap_cgroup_swapoff(p->type);
2658	exit_swap_address_space(p->type);
2659
2660	inode = mapping->host;
2661	if (S_ISBLK(inode->i_mode)) {
2662		struct block_device *bdev = I_BDEV(inode);
2663
2664		set_blocksize(bdev, old_block_size);
2665		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 
 
 
 
2666	}
2667
2668	inode_lock(inode);
2669	inode->i_flags &= ~S_SWAPFILE;
2670	inode_unlock(inode);
2671	filp_close(swap_file, NULL);
2672
2673	/*
2674	 * Clear the SWP_USED flag after all resources are freed so that swapon
2675	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2676	 * not hold p->lock after we cleared its SWP_WRITEOK.
2677	 */
2678	spin_lock(&swap_lock);
2679	p->flags = 0;
2680	spin_unlock(&swap_lock);
2681
2682	err = 0;
2683	atomic_inc(&proc_poll_event);
2684	wake_up_interruptible(&proc_poll_wait);
2685
2686out_dput:
2687	filp_close(victim, NULL);
2688out:
2689	putname(pathname);
2690	return err;
2691}
2692
2693#ifdef CONFIG_PROC_FS
2694static __poll_t swaps_poll(struct file *file, poll_table *wait)
2695{
2696	struct seq_file *seq = file->private_data;
2697
2698	poll_wait(file, &proc_poll_wait, wait);
2699
2700	if (seq->poll_event != atomic_read(&proc_poll_event)) {
2701		seq->poll_event = atomic_read(&proc_poll_event);
2702		return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2703	}
2704
2705	return EPOLLIN | EPOLLRDNORM;
2706}
2707
2708/* iterator */
2709static void *swap_start(struct seq_file *swap, loff_t *pos)
2710{
2711	struct swap_info_struct *si;
2712	int type;
2713	loff_t l = *pos;
2714
2715	mutex_lock(&swapon_mutex);
2716
2717	if (!l)
2718		return SEQ_START_TOKEN;
2719
2720	for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
 
 
2721		if (!(si->flags & SWP_USED) || !si->swap_map)
2722			continue;
2723		if (!--l)
2724			return si;
2725	}
2726
2727	return NULL;
2728}
2729
2730static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2731{
2732	struct swap_info_struct *si = v;
2733	int type;
2734
2735	if (v == SEQ_START_TOKEN)
2736		type = 0;
2737	else
2738		type = si->type + 1;
2739
2740	for (; (si = swap_type_to_swap_info(type)); type++) {
 
 
2741		if (!(si->flags & SWP_USED) || !si->swap_map)
2742			continue;
2743		++*pos;
2744		return si;
2745	}
2746
2747	return NULL;
2748}
2749
2750static void swap_stop(struct seq_file *swap, void *v)
2751{
2752	mutex_unlock(&swapon_mutex);
2753}
2754
2755static int swap_show(struct seq_file *swap, void *v)
2756{
2757	struct swap_info_struct *si = v;
2758	struct file *file;
2759	int len;
2760
2761	if (si == SEQ_START_TOKEN) {
2762		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
2763		return 0;
2764	}
2765
2766	file = si->swap_file;
2767	len = seq_file_path(swap, file, " \t\n\\");
2768	seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
2769			len < 40 ? 40 - len : 1, " ",
2770			S_ISBLK(file_inode(file)->i_mode) ?
2771				"partition" : "file\t",
2772			si->pages << (PAGE_SHIFT - 10),
2773			si->inuse_pages << (PAGE_SHIFT - 10),
2774			si->prio);
2775	return 0;
2776}
2777
2778static const struct seq_operations swaps_op = {
2779	.start =	swap_start,
2780	.next =		swap_next,
2781	.stop =		swap_stop,
2782	.show =		swap_show
2783};
2784
2785static int swaps_open(struct inode *inode, struct file *file)
2786{
2787	struct seq_file *seq;
2788	int ret;
2789
2790	ret = seq_open(file, &swaps_op);
2791	if (ret)
2792		return ret;
2793
2794	seq = file->private_data;
2795	seq->poll_event = atomic_read(&proc_poll_event);
2796	return 0;
2797}
2798
2799static const struct file_operations proc_swaps_operations = {
2800	.open		= swaps_open,
2801	.read		= seq_read,
2802	.llseek		= seq_lseek,
2803	.release	= seq_release,
2804	.poll		= swaps_poll,
2805};
2806
2807static int __init procswaps_init(void)
2808{
2809	proc_create("swaps", 0, NULL, &proc_swaps_operations);
2810	return 0;
2811}
2812__initcall(procswaps_init);
2813#endif /* CONFIG_PROC_FS */
2814
2815#ifdef MAX_SWAPFILES_CHECK
2816static int __init max_swapfiles_check(void)
2817{
2818	MAX_SWAPFILES_CHECK();
2819	return 0;
2820}
2821late_initcall(max_swapfiles_check);
2822#endif
2823
2824static struct swap_info_struct *alloc_swap_info(void)
2825{
2826	struct swap_info_struct *p;
2827	unsigned int type;
2828	int i;
2829
2830	p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2831	if (!p)
2832		return ERR_PTR(-ENOMEM);
2833
2834	spin_lock(&swap_lock);
2835	for (type = 0; type < nr_swapfiles; type++) {
2836		if (!(swap_info[type]->flags & SWP_USED))
2837			break;
2838	}
2839	if (type >= MAX_SWAPFILES) {
2840		spin_unlock(&swap_lock);
2841		kvfree(p);
2842		return ERR_PTR(-EPERM);
2843	}
2844	if (type >= nr_swapfiles) {
2845		p->type = type;
2846		WRITE_ONCE(swap_info[type], p);
2847		/*
2848		 * Write swap_info[type] before nr_swapfiles, in case a
2849		 * racing procfs swap_start() or swap_next() is reading them.
2850		 * (We never shrink nr_swapfiles, we never free this entry.)
2851		 */
2852		smp_wmb();
2853		WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
2854	} else {
2855		kvfree(p);
2856		p = swap_info[type];
2857		/*
2858		 * Do not memset this entry: a racing procfs swap_next()
2859		 * would be relying on p->type to remain valid.
2860		 */
2861	}
2862	p->swap_extent_root = RB_ROOT;
2863	plist_node_init(&p->list, 0);
2864	for_each_node(i)
2865		plist_node_init(&p->avail_lists[i], 0);
2866	p->flags = SWP_USED;
2867	spin_unlock(&swap_lock);
2868	spin_lock_init(&p->lock);
2869	spin_lock_init(&p->cont_lock);
2870
2871	return p;
2872}
2873
2874static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2875{
2876	int error;
2877
2878	if (S_ISBLK(inode->i_mode)) {
2879		p->bdev = bdgrab(I_BDEV(inode));
2880		error = blkdev_get(p->bdev,
2881				   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2882		if (error < 0) {
2883			p->bdev = NULL;
2884			return error;
2885		}
2886		p->old_block_size = block_size(p->bdev);
2887		error = set_blocksize(p->bdev, PAGE_SIZE);
2888		if (error < 0)
2889			return error;
2890		p->flags |= SWP_BLKDEV;
2891	} else if (S_ISREG(inode->i_mode)) {
2892		p->bdev = inode->i_sb->s_bdev;
2893	}
2894
2895	inode_lock(inode);
2896	if (IS_SWAPFILE(inode))
2897		return -EBUSY;
2898
2899	return 0;
2900}
2901
2902
2903/*
2904 * Find out how many pages are allowed for a single swap device. There
2905 * are two limiting factors:
2906 * 1) the number of bits for the swap offset in the swp_entry_t type, and
2907 * 2) the number of bits in the swap pte, as defined by the different
2908 * architectures.
2909 *
2910 * In order to find the largest possible bit mask, a swap entry with
2911 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
2912 * decoded to a swp_entry_t again, and finally the swap offset is
2913 * extracted.
2914 *
2915 * This will mask all the bits from the initial ~0UL mask that can't
2916 * be encoded in either the swp_entry_t or the architecture definition
2917 * of a swap pte.
2918 */
2919unsigned long generic_max_swapfile_size(void)
2920{
2921	return swp_offset(pte_to_swp_entry(
2922			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2923}
2924
2925/* Can be overridden by an architecture for additional checks. */
2926__weak unsigned long max_swapfile_size(void)
2927{
2928	return generic_max_swapfile_size();
2929}
2930
2931static unsigned long read_swap_header(struct swap_info_struct *p,
2932					union swap_header *swap_header,
2933					struct inode *inode)
2934{
2935	int i;
2936	unsigned long maxpages;
2937	unsigned long swapfilepages;
2938	unsigned long last_page;
2939
2940	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2941		pr_err("Unable to find swap-space signature\n");
2942		return 0;
2943	}
2944
2945	/* swap partition endianess hack... */
2946	if (swab32(swap_header->info.version) == 1) {
2947		swab32s(&swap_header->info.version);
2948		swab32s(&swap_header->info.last_page);
2949		swab32s(&swap_header->info.nr_badpages);
2950		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2951			return 0;
2952		for (i = 0; i < swap_header->info.nr_badpages; i++)
2953			swab32s(&swap_header->info.badpages[i]);
2954	}
2955	/* Check the swap header's sub-version */
2956	if (swap_header->info.version != 1) {
2957		pr_warn("Unable to handle swap header version %d\n",
2958			swap_header->info.version);
2959		return 0;
2960	}
2961
2962	p->lowest_bit  = 1;
2963	p->cluster_next = 1;
2964	p->cluster_nr = 0;
2965
2966	maxpages = max_swapfile_size();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2967	last_page = swap_header->info.last_page;
2968	if (!last_page) {
2969		pr_warn("Empty swap-file\n");
2970		return 0;
2971	}
2972	if (last_page > maxpages) {
2973		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2974			maxpages << (PAGE_SHIFT - 10),
2975			last_page << (PAGE_SHIFT - 10));
2976	}
2977	if (maxpages > last_page) {
2978		maxpages = last_page + 1;
2979		/* p->max is an unsigned int: don't overflow it */
2980		if ((unsigned int)maxpages == 0)
2981			maxpages = UINT_MAX;
2982	}
2983	p->highest_bit = maxpages - 1;
2984
2985	if (!maxpages)
2986		return 0;
2987	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2988	if (swapfilepages && maxpages > swapfilepages) {
2989		pr_warn("Swap area shorter than signature indicates\n");
2990		return 0;
2991	}
2992	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
2993		return 0;
2994	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2995		return 0;
2996
2997	return maxpages;
2998}
2999
3000#define SWAP_CLUSTER_INFO_COLS						\
3001	DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3002#define SWAP_CLUSTER_SPACE_COLS						\
3003	DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3004#define SWAP_CLUSTER_COLS						\
3005	max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3006
3007static int setup_swap_map_and_extents(struct swap_info_struct *p,
3008					union swap_header *swap_header,
3009					unsigned char *swap_map,
3010					struct swap_cluster_info *cluster_info,
3011					unsigned long maxpages,
3012					sector_t *span)
3013{
3014	unsigned int j, k;
3015	unsigned int nr_good_pages;
3016	int nr_extents;
3017	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3018	unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
3019	unsigned long i, idx;
3020
3021	nr_good_pages = maxpages - 1;	/* omit header page */
3022
3023	cluster_list_init(&p->free_clusters);
3024	cluster_list_init(&p->discard_clusters);
3025
3026	for (i = 0; i < swap_header->info.nr_badpages; i++) {
3027		unsigned int page_nr = swap_header->info.badpages[i];
3028		if (page_nr == 0 || page_nr > swap_header->info.last_page)
3029			return -EINVAL;
3030		if (page_nr < maxpages) {
3031			swap_map[page_nr] = SWAP_MAP_BAD;
3032			nr_good_pages--;
3033			/*
3034			 * Haven't marked the cluster free yet, no list
3035			 * operation involved
3036			 */
3037			inc_cluster_info_page(p, cluster_info, page_nr);
3038		}
3039	}
3040
3041	/* Haven't marked the cluster free yet, no list operation involved */
3042	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3043		inc_cluster_info_page(p, cluster_info, i);
3044
3045	if (nr_good_pages) {
3046		swap_map[0] = SWAP_MAP_BAD;
3047		/*
3048		 * Not mark the cluster free yet, no list
3049		 * operation involved
3050		 */
3051		inc_cluster_info_page(p, cluster_info, 0);
3052		p->max = maxpages;
3053		p->pages = nr_good_pages;
3054		nr_extents = setup_swap_extents(p, span);
3055		if (nr_extents < 0)
3056			return nr_extents;
3057		nr_good_pages = p->pages;
3058	}
3059	if (!nr_good_pages) {
3060		pr_warn("Empty swap-file\n");
3061		return -EINVAL;
3062	}
3063
3064	if (!cluster_info)
3065		return nr_extents;
3066
3067
3068	/*
3069	 * Reduce false cache line sharing between cluster_info and
3070	 * sharing same address space.
3071	 */
3072	for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3073		j = (k + col) % SWAP_CLUSTER_COLS;
3074		for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3075			idx = i * SWAP_CLUSTER_COLS + j;
3076			if (idx >= nr_clusters)
3077				continue;
3078			if (cluster_count(&cluster_info[idx]))
3079				continue;
3080			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
3081			cluster_list_add_tail(&p->free_clusters, cluster_info,
3082					      idx);
3083		}
 
 
 
3084	}
3085	return nr_extents;
3086}
3087
3088/*
3089 * Helper to sys_swapon determining if a given swap
3090 * backing device queue supports DISCARD operations.
3091 */
3092static bool swap_discardable(struct swap_info_struct *si)
3093{
3094	struct request_queue *q = bdev_get_queue(si->bdev);
3095
3096	if (!q || !blk_queue_discard(q))
3097		return false;
3098
3099	return true;
3100}
3101
3102SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3103{
3104	struct swap_info_struct *p;
3105	struct filename *name;
3106	struct file *swap_file = NULL;
3107	struct address_space *mapping;
3108	int prio;
3109	int error;
3110	union swap_header *swap_header;
3111	int nr_extents;
3112	sector_t span;
3113	unsigned long maxpages;
3114	unsigned char *swap_map = NULL;
3115	struct swap_cluster_info *cluster_info = NULL;
3116	unsigned long *frontswap_map = NULL;
3117	struct page *page = NULL;
3118	struct inode *inode = NULL;
3119	bool inced_nr_rotate_swap = false;
3120
3121	if (swap_flags & ~SWAP_FLAGS_VALID)
3122		return -EINVAL;
3123
3124	if (!capable(CAP_SYS_ADMIN))
3125		return -EPERM;
3126
3127	if (!swap_avail_heads)
3128		return -ENOMEM;
3129
3130	p = alloc_swap_info();
3131	if (IS_ERR(p))
3132		return PTR_ERR(p);
3133
3134	INIT_WORK(&p->discard_work, swap_discard_work);
3135
3136	name = getname(specialfile);
3137	if (IS_ERR(name)) {
3138		error = PTR_ERR(name);
3139		name = NULL;
3140		goto bad_swap;
3141	}
3142	swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3143	if (IS_ERR(swap_file)) {
3144		error = PTR_ERR(swap_file);
3145		swap_file = NULL;
3146		goto bad_swap;
3147	}
3148
3149	p->swap_file = swap_file;
3150	mapping = swap_file->f_mapping;
3151	inode = mapping->host;
3152
3153	/* If S_ISREG(inode->i_mode) will do inode_lock(inode); */
3154	error = claim_swapfile(p, inode);
3155	if (unlikely(error))
3156		goto bad_swap;
3157
3158	/*
3159	 * Read the swap header.
3160	 */
3161	if (!mapping->a_ops->readpage) {
3162		error = -EINVAL;
3163		goto bad_swap;
3164	}
3165	page = read_mapping_page(mapping, 0, swap_file);
3166	if (IS_ERR(page)) {
3167		error = PTR_ERR(page);
3168		goto bad_swap;
3169	}
3170	swap_header = kmap(page);
3171
3172	maxpages = read_swap_header(p, swap_header, inode);
3173	if (unlikely(!maxpages)) {
3174		error = -EINVAL;
3175		goto bad_swap;
3176	}
3177
3178	/* OK, set up the swap map and apply the bad block list */
3179	swap_map = vzalloc(maxpages);
3180	if (!swap_map) {
3181		error = -ENOMEM;
3182		goto bad_swap;
3183	}
3184
3185	if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
3186		p->flags |= SWP_STABLE_WRITES;
3187
3188	if (bdi_cap_synchronous_io(inode_to_bdi(inode)))
3189		p->flags |= SWP_SYNCHRONOUS_IO;
3190
3191	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
3192		int cpu;
3193		unsigned long ci, nr_cluster;
3194
3195		p->flags |= SWP_SOLIDSTATE;
3196		/*
3197		 * select a random position to start with to help wear leveling
3198		 * SSD
3199		 */
3200		p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
3201		nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3202
3203		cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
3204					GFP_KERNEL);
3205		if (!cluster_info) {
3206			error = -ENOMEM;
3207			goto bad_swap;
3208		}
3209
3210		for (ci = 0; ci < nr_cluster; ci++)
3211			spin_lock_init(&((cluster_info + ci)->lock));
3212
3213		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3214		if (!p->percpu_cluster) {
3215			error = -ENOMEM;
3216			goto bad_swap;
3217		}
3218		for_each_possible_cpu(cpu) {
3219			struct percpu_cluster *cluster;
3220			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3221			cluster_set_null(&cluster->index);
3222		}
3223	} else {
3224		atomic_inc(&nr_rotate_swap);
3225		inced_nr_rotate_swap = true;
3226	}
3227
3228	error = swap_cgroup_swapon(p->type, maxpages);
3229	if (error)
3230		goto bad_swap;
3231
3232	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3233		cluster_info, maxpages, &span);
3234	if (unlikely(nr_extents < 0)) {
3235		error = nr_extents;
3236		goto bad_swap;
3237	}
3238	/* frontswap enabled? set up bit-per-page map for frontswap */
3239	if (IS_ENABLED(CONFIG_FRONTSWAP))
3240		frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
3241					 sizeof(long),
3242					 GFP_KERNEL);
3243
3244	if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
3245		/*
3246		 * When discard is enabled for swap with no particular
3247		 * policy flagged, we set all swap discard flags here in
3248		 * order to sustain backward compatibility with older
3249		 * swapon(8) releases.
3250		 */
3251		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3252			     SWP_PAGE_DISCARD);
3253
3254		/*
3255		 * By flagging sys_swapon, a sysadmin can tell us to
3256		 * either do single-time area discards only, or to just
3257		 * perform discards for released swap page-clusters.
3258		 * Now it's time to adjust the p->flags accordingly.
3259		 */
3260		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3261			p->flags &= ~SWP_PAGE_DISCARD;
3262		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3263			p->flags &= ~SWP_AREA_DISCARD;
3264
3265		/* issue a swapon-time discard if it's still required */
3266		if (p->flags & SWP_AREA_DISCARD) {
3267			int err = discard_swap(p);
3268			if (unlikely(err))
3269				pr_err("swapon: discard_swap(%p): %d\n",
3270					p, err);
3271		}
3272	}
3273
3274	error = init_swap_address_space(p->type, maxpages);
3275	if (error)
3276		goto bad_swap;
3277
3278	/*
3279	 * Flush any pending IO and dirty mappings before we start using this
3280	 * swap device.
3281	 */
3282	inode->i_flags |= S_SWAPFILE;
3283	error = inode_drain_writes(inode);
3284	if (error) {
3285		inode->i_flags &= ~S_SWAPFILE;
3286		goto bad_swap;
3287	}
3288
3289	mutex_lock(&swapon_mutex);
3290	prio = -1;
3291	if (swap_flags & SWAP_FLAG_PREFER)
3292		prio =
3293		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3294	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
3295
3296	pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
3297		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
3298		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
3299		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3300		(p->flags & SWP_DISCARDABLE) ? "D" : "",
3301		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
3302		(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
3303		(frontswap_map) ? "FS" : "");
3304
3305	mutex_unlock(&swapon_mutex);
3306	atomic_inc(&proc_poll_event);
3307	wake_up_interruptible(&proc_poll_wait);
3308
 
 
3309	error = 0;
3310	goto out;
3311bad_swap:
3312	free_percpu(p->percpu_cluster);
3313	p->percpu_cluster = NULL;
3314	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
3315		set_blocksize(p->bdev, p->old_block_size);
3316		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3317	}
3318	destroy_swap_extents(p);
3319	swap_cgroup_swapoff(p->type);
3320	spin_lock(&swap_lock);
3321	p->swap_file = NULL;
3322	p->flags = 0;
3323	spin_unlock(&swap_lock);
3324	vfree(swap_map);
3325	kvfree(cluster_info);
3326	kvfree(frontswap_map);
3327	if (inced_nr_rotate_swap)
3328		atomic_dec(&nr_rotate_swap);
3329	if (swap_file) {
3330		if (inode) {
3331			inode_unlock(inode);
3332			inode = NULL;
3333		}
3334		filp_close(swap_file, NULL);
3335	}
3336out:
3337	if (page && !IS_ERR(page)) {
3338		kunmap(page);
3339		put_page(page);
3340	}
3341	if (name)
3342		putname(name);
3343	if (inode)
3344		inode_unlock(inode);
3345	if (!error)
3346		enable_swap_slots_cache();
3347	return error;
3348}
3349
3350void si_swapinfo(struct sysinfo *val)
3351{
3352	unsigned int type;
3353	unsigned long nr_to_be_unused = 0;
3354
3355	spin_lock(&swap_lock);
3356	for (type = 0; type < nr_swapfiles; type++) {
3357		struct swap_info_struct *si = swap_info[type];
3358
3359		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3360			nr_to_be_unused += si->inuse_pages;
3361	}
3362	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3363	val->totalswap = total_swap_pages + nr_to_be_unused;
3364	spin_unlock(&swap_lock);
3365}
3366
3367/*
3368 * Verify that a swap entry is valid and increment its swap map count.
3369 *
3370 * Returns error code in following case.
3371 * - success -> 0
3372 * - swp_entry is invalid -> EINVAL
3373 * - swp_entry is migration entry -> EINVAL
3374 * - swap-cache reference is requested but there is already one. -> EEXIST
3375 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3376 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3377 */
3378static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3379{
3380	struct swap_info_struct *p;
3381	struct swap_cluster_info *ci;
3382	unsigned long offset;
3383	unsigned char count;
3384	unsigned char has_cache;
3385	int err = -EINVAL;
3386
3387	p = get_swap_device(entry);
3388	if (!p)
3389		goto out;
3390
 
 
 
 
3391	offset = swp_offset(entry);
3392	ci = lock_cluster_or_swap_info(p, offset);
 
 
 
3393
3394	count = p->swap_map[offset];
3395
3396	/*
3397	 * swapin_readahead() doesn't check if a swap entry is valid, so the
3398	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3399	 */
3400	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3401		err = -ENOENT;
3402		goto unlock_out;
3403	}
3404
3405	has_cache = count & SWAP_HAS_CACHE;
3406	count &= ~SWAP_HAS_CACHE;
3407	err = 0;
3408
3409	if (usage == SWAP_HAS_CACHE) {
3410
3411		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
3412		if (!has_cache && count)
3413			has_cache = SWAP_HAS_CACHE;
3414		else if (has_cache)		/* someone else added cache */
3415			err = -EEXIST;
3416		else				/* no users remaining */
3417			err = -ENOENT;
3418
3419	} else if (count || has_cache) {
3420
3421		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3422			count += usage;
3423		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3424			err = -EINVAL;
3425		else if (swap_count_continued(p, offset, count))
3426			count = COUNT_CONTINUED;
3427		else
3428			err = -ENOMEM;
3429	} else
3430		err = -ENOENT;			/* unused swap entry */
3431
3432	p->swap_map[offset] = count | has_cache;
3433
3434unlock_out:
3435	unlock_cluster_or_swap_info(p, ci);
3436out:
3437	if (p)
3438		put_swap_device(p);
3439	return err;
 
 
 
 
3440}
3441
3442/*
3443 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3444 * (in which case its reference count is never incremented).
3445 */
3446void swap_shmem_alloc(swp_entry_t entry)
3447{
3448	__swap_duplicate(entry, SWAP_MAP_SHMEM);
3449}
3450
3451/*
3452 * Increase reference count of swap entry by 1.
3453 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3454 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3455 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3456 * might occur if a page table entry has got corrupted.
3457 */
3458int swap_duplicate(swp_entry_t entry)
3459{
3460	int err = 0;
3461
3462	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3463		err = add_swap_count_continuation(entry, GFP_ATOMIC);
3464	return err;
3465}
3466
3467/*
3468 * @entry: swap entry for which we allocate swap cache.
3469 *
3470 * Called when allocating swap cache for existing swap entry,
3471 * This can return error codes. Returns 0 at success.
3472 * -EBUSY means there is a swap cache.
3473 * Note: return code is different from swap_duplicate().
3474 */
3475int swapcache_prepare(swp_entry_t entry)
3476{
3477	return __swap_duplicate(entry, SWAP_HAS_CACHE);
3478}
3479
3480struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3481{
3482	return swap_type_to_swap_info(swp_type(entry));
3483}
3484
3485struct swap_info_struct *page_swap_info(struct page *page)
3486{
3487	swp_entry_t entry = { .val = page_private(page) };
3488	return swp_swap_info(entry);
3489}
3490
3491/*
3492 * out-of-line __page_file_ methods to avoid include hell.
3493 */
3494struct address_space *__page_file_mapping(struct page *page)
3495{
 
3496	return page_swap_info(page)->swap_file->f_mapping;
3497}
3498EXPORT_SYMBOL_GPL(__page_file_mapping);
3499
3500pgoff_t __page_file_index(struct page *page)
3501{
3502	swp_entry_t swap = { .val = page_private(page) };
 
3503	return swp_offset(swap);
3504}
3505EXPORT_SYMBOL_GPL(__page_file_index);
3506
3507/*
3508 * add_swap_count_continuation - called when a swap count is duplicated
3509 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3510 * page of the original vmalloc'ed swap_map, to hold the continuation count
3511 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3512 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3513 *
3514 * These continuation pages are seldom referenced: the common paths all work
3515 * on the original swap_map, only referring to a continuation page when the
3516 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3517 *
3518 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3519 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3520 * can be called after dropping locks.
3521 */
3522int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3523{
3524	struct swap_info_struct *si;
3525	struct swap_cluster_info *ci;
3526	struct page *head;
3527	struct page *page;
3528	struct page *list_page;
3529	pgoff_t offset;
3530	unsigned char count;
3531	int ret = 0;
3532
3533	/*
3534	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3535	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3536	 */
3537	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3538
3539	si = get_swap_device(entry);
3540	if (!si) {
3541		/*
3542		 * An acceptable race has occurred since the failing
3543		 * __swap_duplicate(): the swap device may be swapoff
 
3544		 */
3545		goto outer;
3546	}
3547	spin_lock(&si->lock);
3548
3549	offset = swp_offset(entry);
3550
3551	ci = lock_cluster(si, offset);
3552
3553	count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
3554
3555	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3556		/*
3557		 * The higher the swap count, the more likely it is that tasks
3558		 * will race to add swap count continuation: we need to avoid
3559		 * over-provisioning.
3560		 */
3561		goto out;
3562	}
3563
3564	if (!page) {
3565		ret = -ENOMEM;
3566		goto out;
3567	}
3568
3569	/*
3570	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
3571	 * no architecture is using highmem pages for kernel page tables: so it
3572	 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
3573	 */
3574	head = vmalloc_to_page(si->swap_map + offset);
3575	offset &= ~PAGE_MASK;
3576
3577	spin_lock(&si->cont_lock);
3578	/*
3579	 * Page allocation does not initialize the page's lru field,
3580	 * but it does always reset its private field.
3581	 */
3582	if (!page_private(head)) {
3583		BUG_ON(count & COUNT_CONTINUED);
3584		INIT_LIST_HEAD(&head->lru);
3585		set_page_private(head, SWP_CONTINUED);
3586		si->flags |= SWP_CONTINUED;
3587	}
3588
3589	list_for_each_entry(list_page, &head->lru, lru) {
3590		unsigned char *map;
3591
3592		/*
3593		 * If the previous map said no continuation, but we've found
3594		 * a continuation page, free our allocation and use this one.
3595		 */
3596		if (!(count & COUNT_CONTINUED))
3597			goto out_unlock_cont;
3598
3599		map = kmap_atomic(list_page) + offset;
3600		count = *map;
3601		kunmap_atomic(map);
3602
3603		/*
3604		 * If this continuation count now has some space in it,
3605		 * free our allocation and use this one.
3606		 */
3607		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3608			goto out_unlock_cont;
3609	}
3610
3611	list_add_tail(&page->lru, &head->lru);
3612	page = NULL;			/* now it's attached, don't free it */
3613out_unlock_cont:
3614	spin_unlock(&si->cont_lock);
3615out:
3616	unlock_cluster(ci);
3617	spin_unlock(&si->lock);
3618	put_swap_device(si);
3619outer:
3620	if (page)
3621		__free_page(page);
3622	return ret;
3623}
3624
3625/*
3626 * swap_count_continued - when the original swap_map count is incremented
3627 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3628 * into, carry if so, or else fail until a new continuation page is allocated;
3629 * when the original swap_map count is decremented from 0 with continuation,
3630 * borrow from the continuation and report whether it still holds more.
3631 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3632 * lock.
3633 */
3634static bool swap_count_continued(struct swap_info_struct *si,
3635				 pgoff_t offset, unsigned char count)
3636{
3637	struct page *head;
3638	struct page *page;
3639	unsigned char *map;
3640	bool ret;
3641
3642	head = vmalloc_to_page(si->swap_map + offset);
3643	if (page_private(head) != SWP_CONTINUED) {
3644		BUG_ON(count & COUNT_CONTINUED);
3645		return false;		/* need to add count continuation */
3646	}
3647
3648	spin_lock(&si->cont_lock);
3649	offset &= ~PAGE_MASK;
3650	page = list_entry(head->lru.next, struct page, lru);
3651	map = kmap_atomic(page) + offset;
3652
3653	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
3654		goto init_map;		/* jump over SWAP_CONT_MAX checks */
3655
3656	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3657		/*
3658		 * Think of how you add 1 to 999
3659		 */
3660		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3661			kunmap_atomic(map);
3662			page = list_entry(page->lru.next, struct page, lru);
3663			BUG_ON(page == head);
3664			map = kmap_atomic(page) + offset;
3665		}
3666		if (*map == SWAP_CONT_MAX) {
3667			kunmap_atomic(map);
3668			page = list_entry(page->lru.next, struct page, lru);
3669			if (page == head) {
3670				ret = false;	/* add count continuation */
3671				goto out;
3672			}
3673			map = kmap_atomic(page) + offset;
3674init_map:		*map = 0;		/* we didn't zero the page */
3675		}
3676		*map += 1;
3677		kunmap_atomic(map);
3678		page = list_entry(page->lru.prev, struct page, lru);
3679		while (page != head) {
3680			map = kmap_atomic(page) + offset;
3681			*map = COUNT_CONTINUED;
3682			kunmap_atomic(map);
3683			page = list_entry(page->lru.prev, struct page, lru);
3684		}
3685		ret = true;			/* incremented */
3686
3687	} else {				/* decrementing */
3688		/*
3689		 * Think of how you subtract 1 from 1000
3690		 */
3691		BUG_ON(count != COUNT_CONTINUED);
3692		while (*map == COUNT_CONTINUED) {
3693			kunmap_atomic(map);
3694			page = list_entry(page->lru.next, struct page, lru);
3695			BUG_ON(page == head);
3696			map = kmap_atomic(page) + offset;
3697		}
3698		BUG_ON(*map == 0);
3699		*map -= 1;
3700		if (*map == 0)
3701			count = 0;
3702		kunmap_atomic(map);
3703		page = list_entry(page->lru.prev, struct page, lru);
3704		while (page != head) {
3705			map = kmap_atomic(page) + offset;
3706			*map = SWAP_CONT_MAX | count;
3707			count = COUNT_CONTINUED;
3708			kunmap_atomic(map);
3709			page = list_entry(page->lru.prev, struct page, lru);
3710		}
3711		ret = count == COUNT_CONTINUED;
3712	}
3713out:
3714	spin_unlock(&si->cont_lock);
3715	return ret;
3716}
3717
3718/*
3719 * free_swap_count_continuations - swapoff free all the continuation pages
3720 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3721 */
3722static void free_swap_count_continuations(struct swap_info_struct *si)
3723{
3724	pgoff_t offset;
3725
3726	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3727		struct page *head;
3728		head = vmalloc_to_page(si->swap_map + offset);
3729		if (page_private(head)) {
3730			struct page *page, *next;
3731
3732			list_for_each_entry_safe(page, next, &head->lru, lru) {
3733				list_del(&page->lru);
3734				__free_page(page);
3735			}
3736		}
3737	}
3738}
3739
3740#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3741void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
3742				  gfp_t gfp_mask)
3743{
3744	struct swap_info_struct *si, *next;
3745	if (!(gfp_mask & __GFP_IO) || !memcg)
3746		return;
3747
3748	if (!blk_cgroup_congested())
3749		return;
3750
3751	/*
3752	 * We've already scheduled a throttle, avoid taking the global swap
3753	 * lock.
3754	 */
3755	if (current->throttle_queue)
3756		return;
3757
3758	spin_lock(&swap_avail_lock);
3759	plist_for_each_entry_safe(si, next, &swap_avail_heads[node],
3760				  avail_lists[node]) {
3761		if (si->bdev) {
3762			blkcg_schedule_throttle(bdev_get_queue(si->bdev),
3763						true);
3764			break;
3765		}
3766	}
3767	spin_unlock(&swap_avail_lock);
3768}
3769#endif
3770
3771static int __init swapfile_init(void)
3772{
3773	int nid;
3774
3775	swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3776					 GFP_KERNEL);
3777	if (!swap_avail_heads) {
3778		pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3779		return -ENOMEM;
3780	}
3781
3782	for_each_node(nid)
3783		plist_head_init(&swap_avail_heads[nid]);
3784
3785	return 0;
3786}
3787subsys_initcall(swapfile_init);
v4.10.11
 
   1/*
   2 *  linux/mm/swapfile.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 */
   7
   8#include <linux/mm.h>
 
 
   9#include <linux/hugetlb.h>
  10#include <linux/mman.h>
  11#include <linux/slab.h>
  12#include <linux/kernel_stat.h>
  13#include <linux/swap.h>
  14#include <linux/vmalloc.h>
  15#include <linux/pagemap.h>
  16#include <linux/namei.h>
  17#include <linux/shmem_fs.h>
  18#include <linux/blkdev.h>
  19#include <linux/random.h>
  20#include <linux/writeback.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/init.h>
  24#include <linux/ksm.h>
  25#include <linux/rmap.h>
  26#include <linux/security.h>
  27#include <linux/backing-dev.h>
  28#include <linux/mutex.h>
  29#include <linux/capability.h>
  30#include <linux/syscalls.h>
  31#include <linux/memcontrol.h>
  32#include <linux/poll.h>
  33#include <linux/oom.h>
  34#include <linux/frontswap.h>
  35#include <linux/swapfile.h>
  36#include <linux/export.h>
 
 
  37
  38#include <asm/pgtable.h>
  39#include <asm/tlbflush.h>
  40#include <linux/swapops.h>
  41#include <linux/swap_cgroup.h>
  42
  43static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  44				 unsigned char);
  45static void free_swap_count_continuations(struct swap_info_struct *);
  46static sector_t map_swap_entry(swp_entry_t, struct block_device**);
  47
  48DEFINE_SPINLOCK(swap_lock);
  49static unsigned int nr_swapfiles;
  50atomic_long_t nr_swap_pages;
  51/*
  52 * Some modules use swappable objects and may try to swap them out under
  53 * memory pressure (via the shrinker). Before doing so, they may wish to
  54 * check to see if any swap space is available.
  55 */
  56EXPORT_SYMBOL_GPL(nr_swap_pages);
  57/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  58long total_swap_pages;
  59static int least_priority;
  60
  61static const char Bad_file[] = "Bad swap file entry ";
  62static const char Unused_file[] = "Unused swap file entry ";
  63static const char Bad_offset[] = "Bad swap offset entry ";
  64static const char Unused_offset[] = "Unused swap offset entry ";
  65
  66/*
  67 * all active swap_info_structs
  68 * protected with swap_lock, and ordered by priority.
  69 */
  70PLIST_HEAD(swap_active_head);
  71
  72/*
  73 * all available (active, not full) swap_info_structs
  74 * protected with swap_avail_lock, ordered by priority.
  75 * This is used by get_swap_page() instead of swap_active_head
  76 * because swap_active_head includes all swap_info_structs,
  77 * but get_swap_page() doesn't need to look at full ones.
  78 * This uses its own lock instead of swap_lock because when a
  79 * swap_info_struct changes between not-full/full, it needs to
  80 * add/remove itself to/from this list, but the swap_info_struct->lock
  81 * is held and the locking order requires swap_lock to be taken
  82 * before any swap_info_struct->lock.
  83 */
  84static PLIST_HEAD(swap_avail_head);
  85static DEFINE_SPINLOCK(swap_avail_lock);
  86
  87struct swap_info_struct *swap_info[MAX_SWAPFILES];
  88
  89static DEFINE_MUTEX(swapon_mutex);
  90
  91static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  92/* Activity counter to indicate that a swapon or swapoff has occurred */
  93static atomic_t proc_poll_event = ATOMIC_INIT(0);
  94
 
 
 
 
 
 
 
 
 
 
 
  95static inline unsigned char swap_count(unsigned char ent)
  96{
  97	return ent & ~SWAP_HAS_CACHE;	/* may include SWAP_HAS_CONT flag */
  98}
  99
 
 
 
 
 
 
 
 
 
 
 100/* returns 1 if swap entry is freed */
 101static int
 102__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
 103{
 104	swp_entry_t entry = swp_entry(si->type, offset);
 105	struct page *page;
 106	int ret = 0;
 107
 108	page = find_get_page(swap_address_space(entry), swp_offset(entry));
 109	if (!page)
 110		return 0;
 111	/*
 112	 * This function is called from scan_swap_map() and it's called
 113	 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
 114	 * We have to use trylock for avoiding deadlock. This is a special
 115	 * case and you should use try_to_free_swap() with explicit lock_page()
 116	 * in usual operations.
 117	 */
 118	if (trylock_page(page)) {
 119		ret = try_to_free_swap(page);
 
 
 
 120		unlock_page(page);
 121	}
 122	put_page(page);
 123	return ret;
 124}
 125
 
 
 
 
 
 
 
 
 
 
 
 
 126/*
 127 * swapon tell device that all the old swap contents can be discarded,
 128 * to allow the swap device to optimize its wear-levelling.
 129 */
 130static int discard_swap(struct swap_info_struct *si)
 131{
 132	struct swap_extent *se;
 133	sector_t start_block;
 134	sector_t nr_blocks;
 135	int err = 0;
 136
 137	/* Do not discard the swap header page! */
 138	se = &si->first_swap_extent;
 139	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 140	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 141	if (nr_blocks) {
 142		err = blkdev_issue_discard(si->bdev, start_block,
 143				nr_blocks, GFP_KERNEL, 0);
 144		if (err)
 145			return err;
 146		cond_resched();
 147	}
 148
 149	list_for_each_entry(se, &si->first_swap_extent.list, list) {
 150		start_block = se->start_block << (PAGE_SHIFT - 9);
 151		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 152
 153		err = blkdev_issue_discard(si->bdev, start_block,
 154				nr_blocks, GFP_KERNEL, 0);
 155		if (err)
 156			break;
 157
 158		cond_resched();
 159	}
 160	return err;		/* That will often be -EOPNOTSUPP */
 161}
 162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 163/*
 164 * swap allocation tell device that a cluster of swap can now be discarded,
 165 * to allow the swap device to optimize its wear-levelling.
 166 */
 167static void discard_swap_cluster(struct swap_info_struct *si,
 168				 pgoff_t start_page, pgoff_t nr_pages)
 169{
 170	struct swap_extent *se = si->curr_swap_extent;
 171	int found_extent = 0;
 172
 173	while (nr_pages) {
 174		if (se->start_page <= start_page &&
 175		    start_page < se->start_page + se->nr_pages) {
 176			pgoff_t offset = start_page - se->start_page;
 177			sector_t start_block = se->start_block + offset;
 178			sector_t nr_blocks = se->nr_pages - offset;
 179
 180			if (nr_blocks > nr_pages)
 181				nr_blocks = nr_pages;
 182			start_page += nr_blocks;
 183			nr_pages -= nr_blocks;
 184
 185			if (!found_extent++)
 186				si->curr_swap_extent = se;
 187
 188			start_block <<= PAGE_SHIFT - 9;
 189			nr_blocks <<= PAGE_SHIFT - 9;
 190			if (blkdev_issue_discard(si->bdev, start_block,
 191				    nr_blocks, GFP_NOIO, 0))
 192				break;
 193		}
 194
 195		se = list_next_entry(se, list);
 196	}
 197}
 198
 
 
 
 
 
 199#define SWAPFILE_CLUSTER	256
 
 
 
 
 
 
 
 200#define LATENCY_LIMIT		256
 201
 202static inline void cluster_set_flag(struct swap_cluster_info *info,
 203	unsigned int flag)
 204{
 205	info->flags = flag;
 206}
 207
 208static inline unsigned int cluster_count(struct swap_cluster_info *info)
 209{
 210	return info->data;
 211}
 212
 213static inline void cluster_set_count(struct swap_cluster_info *info,
 214				     unsigned int c)
 215{
 216	info->data = c;
 217}
 218
 219static inline void cluster_set_count_flag(struct swap_cluster_info *info,
 220					 unsigned int c, unsigned int f)
 221{
 222	info->flags = f;
 223	info->data = c;
 224}
 225
 226static inline unsigned int cluster_next(struct swap_cluster_info *info)
 227{
 228	return info->data;
 229}
 230
 231static inline void cluster_set_next(struct swap_cluster_info *info,
 232				    unsigned int n)
 233{
 234	info->data = n;
 235}
 236
 237static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 238					 unsigned int n, unsigned int f)
 239{
 240	info->flags = f;
 241	info->data = n;
 242}
 243
 244static inline bool cluster_is_free(struct swap_cluster_info *info)
 245{
 246	return info->flags & CLUSTER_FLAG_FREE;
 247}
 248
 249static inline bool cluster_is_null(struct swap_cluster_info *info)
 250{
 251	return info->flags & CLUSTER_FLAG_NEXT_NULL;
 252}
 253
 254static inline void cluster_set_null(struct swap_cluster_info *info)
 255{
 256	info->flags = CLUSTER_FLAG_NEXT_NULL;
 257	info->data = 0;
 258}
 259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 260static inline bool cluster_list_empty(struct swap_cluster_list *list)
 261{
 262	return cluster_is_null(&list->head);
 263}
 264
 265static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
 266{
 267	return cluster_next(&list->head);
 268}
 269
 270static void cluster_list_init(struct swap_cluster_list *list)
 271{
 272	cluster_set_null(&list->head);
 273	cluster_set_null(&list->tail);
 274}
 275
 276static void cluster_list_add_tail(struct swap_cluster_list *list,
 277				  struct swap_cluster_info *ci,
 278				  unsigned int idx)
 279{
 280	if (cluster_list_empty(list)) {
 281		cluster_set_next_flag(&list->head, idx, 0);
 282		cluster_set_next_flag(&list->tail, idx, 0);
 283	} else {
 
 284		unsigned int tail = cluster_next(&list->tail);
 285
 286		cluster_set_next(&ci[tail], idx);
 
 
 
 
 
 
 
 287		cluster_set_next_flag(&list->tail, idx, 0);
 288	}
 289}
 290
 291static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
 292					   struct swap_cluster_info *ci)
 293{
 294	unsigned int idx;
 295
 296	idx = cluster_next(&list->head);
 297	if (cluster_next(&list->tail) == idx) {
 298		cluster_set_null(&list->head);
 299		cluster_set_null(&list->tail);
 300	} else
 301		cluster_set_next_flag(&list->head,
 302				      cluster_next(&ci[idx]), 0);
 303
 304	return idx;
 305}
 306
 307/* Add a cluster to discard list and schedule it to do discard */
 308static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 309		unsigned int idx)
 310{
 311	/*
 312	 * If scan_swap_map() can't find a free cluster, it will check
 313	 * si->swap_map directly. To make sure the discarding cluster isn't
 314	 * taken by scan_swap_map(), mark the swap entries bad (occupied). It
 315	 * will be cleared after discard
 316	 */
 317	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 318			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 319
 320	cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
 321
 322	schedule_work(&si->discard_work);
 323}
 324
 
 
 
 
 
 
 
 
 325/*
 326 * Doing discard actually. After a cluster discard is finished, the cluster
 327 * will be added to free cluster list. caller should hold si->lock.
 328*/
 329static void swap_do_scheduled_discard(struct swap_info_struct *si)
 330{
 331	struct swap_cluster_info *info;
 332	unsigned int idx;
 333
 334	info = si->cluster_info;
 335
 336	while (!cluster_list_empty(&si->discard_clusters)) {
 337		idx = cluster_list_del_first(&si->discard_clusters, info);
 338		spin_unlock(&si->lock);
 339
 340		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 341				SWAPFILE_CLUSTER);
 342
 343		spin_lock(&si->lock);
 344		cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE);
 345		cluster_list_add_tail(&si->free_clusters, info, idx);
 346		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 347				0, SWAPFILE_CLUSTER);
 
 348	}
 349}
 350
 351static void swap_discard_work(struct work_struct *work)
 352{
 353	struct swap_info_struct *si;
 354
 355	si = container_of(work, struct swap_info_struct, discard_work);
 356
 357	spin_lock(&si->lock);
 358	swap_do_scheduled_discard(si);
 359	spin_unlock(&si->lock);
 360}
 361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 362/*
 363 * The cluster corresponding to page_nr will be used. The cluster will be
 364 * removed from free cluster list and its usage counter will be increased.
 365 */
 366static void inc_cluster_info_page(struct swap_info_struct *p,
 367	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 368{
 369	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 370
 371	if (!cluster_info)
 372		return;
 373	if (cluster_is_free(&cluster_info[idx])) {
 374		VM_BUG_ON(cluster_list_first(&p->free_clusters) != idx);
 375		cluster_list_del_first(&p->free_clusters, cluster_info);
 376		cluster_set_count_flag(&cluster_info[idx], 0, 0);
 377	}
 378
 379	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
 380	cluster_set_count(&cluster_info[idx],
 381		cluster_count(&cluster_info[idx]) + 1);
 382}
 383
 384/*
 385 * The cluster corresponding to page_nr decreases one usage. If the usage
 386 * counter becomes 0, which means no page in the cluster is in using, we can
 387 * optionally discard the cluster and add it to free cluster list.
 388 */
 389static void dec_cluster_info_page(struct swap_info_struct *p,
 390	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 391{
 392	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 393
 394	if (!cluster_info)
 395		return;
 396
 397	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
 398	cluster_set_count(&cluster_info[idx],
 399		cluster_count(&cluster_info[idx]) - 1);
 400
 401	if (cluster_count(&cluster_info[idx]) == 0) {
 402		/*
 403		 * If the swap is discardable, prepare discard the cluster
 404		 * instead of free it immediately. The cluster will be freed
 405		 * after discard.
 406		 */
 407		if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 408				 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 409			swap_cluster_schedule_discard(p, idx);
 410			return;
 411		}
 412
 413		cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
 414		cluster_list_add_tail(&p->free_clusters, cluster_info, idx);
 415	}
 416}
 417
 418/*
 419 * It's possible scan_swap_map() uses a free cluster in the middle of free
 420 * cluster list. Avoiding such abuse to avoid list corruption.
 421 */
 422static bool
 423scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
 424	unsigned long offset)
 425{
 426	struct percpu_cluster *percpu_cluster;
 427	bool conflict;
 428
 429	offset /= SWAPFILE_CLUSTER;
 430	conflict = !cluster_list_empty(&si->free_clusters) &&
 431		offset != cluster_list_first(&si->free_clusters) &&
 432		cluster_is_free(&si->cluster_info[offset]);
 433
 434	if (!conflict)
 435		return false;
 436
 437	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
 438	cluster_set_null(&percpu_cluster->index);
 439	return true;
 440}
 441
 442/*
 443 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
 444 * might involve allocating a new cluster for current CPU too.
 445 */
 446static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
 447	unsigned long *offset, unsigned long *scan_base)
 448{
 449	struct percpu_cluster *cluster;
 
 450	bool found_free;
 451	unsigned long tmp;
 452
 453new_cluster:
 454	cluster = this_cpu_ptr(si->percpu_cluster);
 455	if (cluster_is_null(&cluster->index)) {
 456		if (!cluster_list_empty(&si->free_clusters)) {
 457			cluster->index = si->free_clusters.head;
 458			cluster->next = cluster_next(&cluster->index) *
 459					SWAPFILE_CLUSTER;
 460		} else if (!cluster_list_empty(&si->discard_clusters)) {
 461			/*
 462			 * we don't have free cluster but have some clusters in
 463			 * discarding, do discard now and reclaim them
 464			 */
 465			swap_do_scheduled_discard(si);
 466			*scan_base = *offset = si->cluster_next;
 467			goto new_cluster;
 468		} else
 469			return;
 470	}
 471
 472	found_free = false;
 473
 474	/*
 475	 * Other CPUs can use our cluster if they can't find a free cluster,
 476	 * check if there is still free entry in the cluster
 477	 */
 478	tmp = cluster->next;
 479	while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) *
 480	       SWAPFILE_CLUSTER) {
 
 
 
 
 
 
 481		if (!si->swap_map[tmp]) {
 482			found_free = true;
 483			break;
 484		}
 485		tmp++;
 486	}
 
 487	if (!found_free) {
 488		cluster_set_null(&cluster->index);
 489		goto new_cluster;
 490	}
 491	cluster->next = tmp + 1;
 492	*offset = tmp;
 493	*scan_base = tmp;
 
 494}
 495
 496static unsigned long scan_swap_map(struct swap_info_struct *si,
 497				   unsigned char usage)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 498{
 
 499	unsigned long offset;
 500	unsigned long scan_base;
 501	unsigned long last_in_cluster = 0;
 502	int latency_ration = LATENCY_LIMIT;
 
 
 
 
 503
 504	/*
 505	 * We try to cluster swap pages by allocating them sequentially
 506	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 507	 * way, however, we resort to first-free allocation, starting
 508	 * a new cluster.  This prevents us from scattering swap pages
 509	 * all over the entire swap partition, so that we reduce
 510	 * overall disk seek times between swap pages.  -- sct
 511	 * But we do now try to find an empty cluster.  -Andrea
 512	 * And we let swap pages go all over an SSD partition.  Hugh
 513	 */
 514
 515	si->flags += SWP_SCANNING;
 516	scan_base = offset = si->cluster_next;
 517
 518	/* SSD algorithm */
 519	if (si->cluster_info) {
 520		scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
 521		goto checks;
 
 
 522	}
 523
 524	if (unlikely(!si->cluster_nr--)) {
 525		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 526			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 527			goto checks;
 528		}
 529
 530		spin_unlock(&si->lock);
 531
 532		/*
 533		 * If seek is expensive, start searching for new cluster from
 534		 * start of partition, to minimize the span of allocated swap.
 535		 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
 536		 * case, just handled by scan_swap_map_try_ssd_cluster() above.
 537		 */
 538		scan_base = offset = si->lowest_bit;
 539		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 540
 541		/* Locate the first empty (unaligned) cluster */
 542		for (; last_in_cluster <= si->highest_bit; offset++) {
 543			if (si->swap_map[offset])
 544				last_in_cluster = offset + SWAPFILE_CLUSTER;
 545			else if (offset == last_in_cluster) {
 546				spin_lock(&si->lock);
 547				offset -= SWAPFILE_CLUSTER - 1;
 548				si->cluster_next = offset;
 549				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 550				goto checks;
 551			}
 552			if (unlikely(--latency_ration < 0)) {
 553				cond_resched();
 554				latency_ration = LATENCY_LIMIT;
 555			}
 556		}
 557
 558		offset = scan_base;
 559		spin_lock(&si->lock);
 560		si->cluster_nr = SWAPFILE_CLUSTER - 1;
 561	}
 562
 563checks:
 564	if (si->cluster_info) {
 565		while (scan_swap_map_ssd_cluster_conflict(si, offset))
 566			scan_swap_map_try_ssd_cluster(si, &offset, &scan_base);
 
 
 
 
 
 
 567	}
 568	if (!(si->flags & SWP_WRITEOK))
 569		goto no_page;
 570	if (!si->highest_bit)
 571		goto no_page;
 572	if (offset > si->highest_bit)
 573		scan_base = offset = si->lowest_bit;
 574
 
 575	/* reuse swap entry of cache-only swap if not busy. */
 576	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 577		int swap_was_freed;
 
 578		spin_unlock(&si->lock);
 579		swap_was_freed = __try_to_reclaim_swap(si, offset);
 580		spin_lock(&si->lock);
 581		/* entry was freed successfully, try to use this again */
 582		if (swap_was_freed)
 583			goto checks;
 584		goto scan; /* check next one */
 585	}
 586
 587	if (si->swap_map[offset])
 588		goto scan;
 589
 590	if (offset == si->lowest_bit)
 591		si->lowest_bit++;
 592	if (offset == si->highest_bit)
 593		si->highest_bit--;
 594	si->inuse_pages++;
 595	if (si->inuse_pages == si->pages) {
 596		si->lowest_bit = si->max;
 597		si->highest_bit = 0;
 598		spin_lock(&swap_avail_lock);
 599		plist_del(&si->avail_list, &swap_avail_head);
 600		spin_unlock(&swap_avail_lock);
 601	}
 602	si->swap_map[offset] = usage;
 603	inc_cluster_info_page(si, si->cluster_info, offset);
 
 
 
 604	si->cluster_next = offset + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 605	si->flags -= SWP_SCANNING;
 606
 607	return offset;
 608
 609scan:
 610	spin_unlock(&si->lock);
 611	while (++offset <= si->highest_bit) {
 612		if (!si->swap_map[offset]) {
 613			spin_lock(&si->lock);
 614			goto checks;
 615		}
 616		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 617			spin_lock(&si->lock);
 618			goto checks;
 619		}
 620		if (unlikely(--latency_ration < 0)) {
 621			cond_resched();
 622			latency_ration = LATENCY_LIMIT;
 623		}
 624	}
 625	offset = si->lowest_bit;
 626	while (offset < scan_base) {
 627		if (!si->swap_map[offset]) {
 628			spin_lock(&si->lock);
 629			goto checks;
 630		}
 631		if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 632			spin_lock(&si->lock);
 633			goto checks;
 634		}
 635		if (unlikely(--latency_ration < 0)) {
 636			cond_resched();
 637			latency_ration = LATENCY_LIMIT;
 638		}
 639		offset++;
 640	}
 641	spin_lock(&si->lock);
 642
 643no_page:
 644	si->flags -= SWP_SCANNING;
 645	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 646}
 647
 648swp_entry_t get_swap_page(void)
 649{
 
 650	struct swap_info_struct *si, *next;
 651	pgoff_t offset;
 
 
 
 
 
 652
 653	if (atomic_long_read(&nr_swap_pages) <= 0)
 
 654		goto noswap;
 655	atomic_long_dec(&nr_swap_pages);
 
 
 
 
 
 
 
 656
 657	spin_lock(&swap_avail_lock);
 658
 659start_over:
 660	plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) {
 
 661		/* requeue si to after same-priority siblings */
 662		plist_requeue(&si->avail_list, &swap_avail_head);
 663		spin_unlock(&swap_avail_lock);
 664		spin_lock(&si->lock);
 665		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
 666			spin_lock(&swap_avail_lock);
 667			if (plist_node_empty(&si->avail_list)) {
 668				spin_unlock(&si->lock);
 669				goto nextsi;
 670			}
 671			WARN(!si->highest_bit,
 672			     "swap_info %d in list but !highest_bit\n",
 673			     si->type);
 674			WARN(!(si->flags & SWP_WRITEOK),
 675			     "swap_info %d in list but !SWP_WRITEOK\n",
 676			     si->type);
 677			plist_del(&si->avail_list, &swap_avail_head);
 678			spin_unlock(&si->lock);
 679			goto nextsi;
 680		}
 681
 682		/* This is called for allocating swap entry for cache */
 683		offset = scan_swap_map(si, SWAP_HAS_CACHE);
 
 
 
 684		spin_unlock(&si->lock);
 685		if (offset)
 686			return swp_entry(si->type, offset);
 687		pr_debug("scan_swap_map of si %d failed to find offset\n",
 688		       si->type);
 
 689		spin_lock(&swap_avail_lock);
 690nextsi:
 691		/*
 692		 * if we got here, it's likely that si was almost full before,
 693		 * and since scan_swap_map() can drop the si->lock, multiple
 694		 * callers probably all tried to get a page from the same si
 695		 * and it filled up before we could get one; or, the si filled
 696		 * up between us dropping swap_avail_lock and taking si->lock.
 697		 * Since we dropped the swap_avail_lock, the swap_avail_head
 698		 * list may have been modified; so if next is still in the
 699		 * swap_avail_head list then try it, otherwise start over.
 
 700		 */
 701		if (plist_node_empty(&next->avail_list))
 702			goto start_over;
 703	}
 704
 705	spin_unlock(&swap_avail_lock);
 706
 707	atomic_long_inc(&nr_swap_pages);
 
 
 
 708noswap:
 709	return (swp_entry_t) {0};
 710}
 711
 712/* The only caller of this function is now suspend routine */
 713swp_entry_t get_swap_page_of_type(int type)
 714{
 715	struct swap_info_struct *si;
 716	pgoff_t offset;
 717
 718	si = swap_info[type];
 
 
 719	spin_lock(&si->lock);
 720	if (si && (si->flags & SWP_WRITEOK)) {
 721		atomic_long_dec(&nr_swap_pages);
 722		/* This is called for allocating swap entry, not cache */
 723		offset = scan_swap_map(si, 1);
 724		if (offset) {
 725			spin_unlock(&si->lock);
 726			return swp_entry(type, offset);
 727		}
 728		atomic_long_inc(&nr_swap_pages);
 729	}
 730	spin_unlock(&si->lock);
 
 731	return (swp_entry_t) {0};
 732}
 733
 734static struct swap_info_struct *swap_info_get(swp_entry_t entry)
 735{
 736	struct swap_info_struct *p;
 737	unsigned long offset, type;
 738
 739	if (!entry.val)
 740		goto out;
 741	type = swp_type(entry);
 742	if (type >= nr_swapfiles)
 743		goto bad_nofile;
 744	p = swap_info[type];
 745	if (!(p->flags & SWP_USED))
 746		goto bad_device;
 747	offset = swp_offset(entry);
 748	if (offset >= p->max)
 749		goto bad_offset;
 750	if (!p->swap_map[offset])
 751		goto bad_free;
 752	spin_lock(&p->lock);
 753	return p;
 754
 755bad_free:
 756	pr_err("swap_free: %s%08lx\n", Unused_offset, entry.val);
 757	goto out;
 758bad_offset:
 759	pr_err("swap_free: %s%08lx\n", Bad_offset, entry.val);
 760	goto out;
 761bad_device:
 762	pr_err("swap_free: %s%08lx\n", Unused_file, entry.val);
 763	goto out;
 764bad_nofile:
 765	pr_err("swap_free: %s%08lx\n", Bad_file, entry.val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766out:
 767	return NULL;
 768}
 769
 770static unsigned char swap_entry_free(struct swap_info_struct *p,
 771				     swp_entry_t entry, unsigned char usage)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 772{
 773	unsigned long offset = swp_offset(entry);
 774	unsigned char count;
 775	unsigned char has_cache;
 776
 777	count = p->swap_map[offset];
 
 778	has_cache = count & SWAP_HAS_CACHE;
 779	count &= ~SWAP_HAS_CACHE;
 780
 781	if (usage == SWAP_HAS_CACHE) {
 782		VM_BUG_ON(!has_cache);
 783		has_cache = 0;
 784	} else if (count == SWAP_MAP_SHMEM) {
 785		/*
 786		 * Or we could insist on shmem.c using a special
 787		 * swap_shmem_free() and free_shmem_swap_and_cache()...
 788		 */
 789		count = 0;
 790	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
 791		if (count == COUNT_CONTINUED) {
 792			if (swap_count_continued(p, offset, count))
 793				count = SWAP_MAP_MAX | COUNT_CONTINUED;
 794			else
 795				count = SWAP_MAP_MAX;
 796		} else
 797			count--;
 798	}
 799
 800	usage = count | has_cache;
 801	p->swap_map[offset] = usage;
 
 
 
 802
 803	/* free if no reference */
 804	if (!usage) {
 805		mem_cgroup_uncharge_swap(entry);
 806		dec_cluster_info_page(p, p->cluster_info, offset);
 807		if (offset < p->lowest_bit)
 808			p->lowest_bit = offset;
 809		if (offset > p->highest_bit) {
 810			bool was_full = !p->highest_bit;
 811			p->highest_bit = offset;
 812			if (was_full && (p->flags & SWP_WRITEOK)) {
 813				spin_lock(&swap_avail_lock);
 814				WARN_ON(!plist_node_empty(&p->avail_list));
 815				if (plist_node_empty(&p->avail_list))
 816					plist_add(&p->avail_list,
 817						  &swap_avail_head);
 818				spin_unlock(&swap_avail_lock);
 819			}
 820		}
 821		atomic_long_inc(&nr_swap_pages);
 822		p->inuse_pages--;
 823		frontswap_invalidate_page(p->type, offset);
 824		if (p->flags & SWP_BLKDEV) {
 825			struct gendisk *disk = p->bdev->bd_disk;
 826			if (disk->fops->swap_slot_free_notify)
 827				disk->fops->swap_slot_free_notify(p->bdev,
 828								  offset);
 829		}
 830	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 831
 832	return usage;
 833}
 834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835/*
 836 * Caller has made sure that the swap device corresponding to entry
 837 * is still around or has not been recycled.
 838 */
 839void swap_free(swp_entry_t entry)
 840{
 841	struct swap_info_struct *p;
 842
 843	p = swap_info_get(entry);
 844	if (p) {
 845		swap_entry_free(p, entry, 1);
 846		spin_unlock(&p->lock);
 847	}
 848}
 849
 850/*
 851 * Called after dropping swapcache to decrease refcnt to swap entries.
 852 */
 853void swapcache_free(swp_entry_t entry)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 854{
 855	struct swap_info_struct *p;
 
 
 
 
 
 
 
 856
 857	p = swap_info_get(entry);
 858	if (p) {
 859		swap_entry_free(p, entry, SWAP_HAS_CACHE);
 
 
 
 
 
 
 
 
 
 
 
 860		spin_unlock(&p->lock);
 861	}
 862}
 863
 864/*
 865 * How many references to page are currently swapped out?
 866 * This does not give an exact answer when swap count is continued,
 867 * but does include the high COUNT_CONTINUED flag to allow for that.
 868 */
 869int page_swapcount(struct page *page)
 870{
 871	int count = 0;
 872	struct swap_info_struct *p;
 
 873	swp_entry_t entry;
 
 874
 875	entry.val = page_private(page);
 876	p = swap_info_get(entry);
 877	if (p) {
 878		count = swap_count(p->swap_map[swp_offset(entry)]);
 879		spin_unlock(&p->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 880	}
 881	return count;
 882}
 883
 884/*
 885 * How many references to @entry are currently swapped out?
 886 * This considers COUNT_CONTINUED so it returns exact answer.
 887 */
 888int swp_swapcount(swp_entry_t entry)
 889{
 890	int count, tmp_count, n;
 891	struct swap_info_struct *p;
 
 892	struct page *page;
 893	pgoff_t offset;
 894	unsigned char *map;
 895
 896	p = swap_info_get(entry);
 897	if (!p)
 898		return 0;
 899
 900	count = swap_count(p->swap_map[swp_offset(entry)]);
 
 
 
 
 901	if (!(count & COUNT_CONTINUED))
 902		goto out;
 903
 904	count &= ~COUNT_CONTINUED;
 905	n = SWAP_MAP_MAX + 1;
 906
 907	offset = swp_offset(entry);
 908	page = vmalloc_to_page(p->swap_map + offset);
 909	offset &= ~PAGE_MASK;
 910	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
 911
 912	do {
 913		page = list_next_entry(page, lru);
 914		map = kmap_atomic(page);
 915		tmp_count = map[offset];
 916		kunmap_atomic(map);
 917
 918		count += (tmp_count & ~COUNT_CONTINUED) * n;
 919		n *= (SWAP_CONT_MAX + 1);
 920	} while (tmp_count & COUNT_CONTINUED);
 921out:
 922	spin_unlock(&p->lock);
 923	return count;
 924}
 925
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 926/*
 927 * We can write to an anon page without COW if there are no other references
 928 * to it.  And as a side-effect, free up its swap: because the old content
 929 * on disk will never be read, and seeking back there to write new content
 930 * later would only waste time away from clustering.
 931 *
 932 * NOTE: total_mapcount should not be relied upon by the caller if
 933 * reuse_swap_page() returns false, but it may be always overwritten
 934 * (see the other implementation for CONFIG_SWAP=n).
 935 */
 936bool reuse_swap_page(struct page *page, int *total_mapcount)
 937{
 938	int count;
 939
 940	VM_BUG_ON_PAGE(!PageLocked(page), page);
 941	if (unlikely(PageKsm(page)))
 942		return false;
 943	count = page_trans_huge_mapcount(page, total_mapcount);
 944	if (count <= 1 && PageSwapCache(page)) {
 945		count += page_swapcount(page);
 946		if (count != 1)
 947			goto out;
 
 
 
 948		if (!PageWriteback(page)) {
 
 949			delete_from_swap_cache(page);
 950			SetPageDirty(page);
 951		} else {
 952			swp_entry_t entry;
 953			struct swap_info_struct *p;
 954
 955			entry.val = page_private(page);
 956			p = swap_info_get(entry);
 957			if (p->flags & SWP_STABLE_WRITES) {
 958				spin_unlock(&p->lock);
 959				return false;
 960			}
 961			spin_unlock(&p->lock);
 962		}
 963	}
 964out:
 965	return count <= 1;
 966}
 967
 968/*
 969 * If swap is getting full, or if there are no more mappings of this page,
 970 * then try_to_free_swap is called to free its swap space.
 971 */
 972int try_to_free_swap(struct page *page)
 973{
 974	VM_BUG_ON_PAGE(!PageLocked(page), page);
 975
 976	if (!PageSwapCache(page))
 977		return 0;
 978	if (PageWriteback(page))
 979		return 0;
 980	if (page_swapcount(page))
 981		return 0;
 982
 983	/*
 984	 * Once hibernation has begun to create its image of memory,
 985	 * there's a danger that one of the calls to try_to_free_swap()
 986	 * - most probably a call from __try_to_reclaim_swap() while
 987	 * hibernation is allocating its own swap pages for the image,
 988	 * but conceivably even a call from memory reclaim - will free
 989	 * the swap from a page which has already been recorded in the
 990	 * image as a clean swapcache page, and then reuse its swap for
 991	 * another page of the image.  On waking from hibernation, the
 992	 * original page might be freed under memory pressure, then
 993	 * later read back in from swap, now with the wrong data.
 994	 *
 995	 * Hibernation suspends storage while it is writing the image
 996	 * to disk so check that here.
 997	 */
 998	if (pm_suspended_storage())
 999		return 0;
1000
 
1001	delete_from_swap_cache(page);
1002	SetPageDirty(page);
1003	return 1;
1004}
1005
1006/*
1007 * Free the swap entry like above, but also try to
1008 * free the page cache entry if it is the last user.
1009 */
1010int free_swap_and_cache(swp_entry_t entry)
1011{
1012	struct swap_info_struct *p;
1013	struct page *page = NULL;
1014
1015	if (non_swap_entry(entry))
1016		return 1;
1017
1018	p = swap_info_get(entry);
1019	if (p) {
1020		if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
1021			page = find_get_page(swap_address_space(entry),
1022					     swp_offset(entry));
1023			if (page && !trylock_page(page)) {
1024				put_page(page);
1025				page = NULL;
1026			}
1027		}
1028		spin_unlock(&p->lock);
1029	}
1030	if (page) {
1031		/*
1032		 * Not mapped elsewhere, or swap space full? Free it!
1033		 * Also recheck PageSwapCache now page is locked (above).
1034		 */
1035		if (PageSwapCache(page) && !PageWriteback(page) &&
1036		    (!page_mapped(page) || mem_cgroup_swap_full(page))) {
1037			delete_from_swap_cache(page);
1038			SetPageDirty(page);
1039		}
1040		unlock_page(page);
1041		put_page(page);
1042	}
1043	return p != NULL;
1044}
1045
1046#ifdef CONFIG_HIBERNATION
1047/*
1048 * Find the swap type that corresponds to given device (if any).
1049 *
1050 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1051 * from 0, in which the swap header is expected to be located.
1052 *
1053 * This is needed for the suspend to disk (aka swsusp).
1054 */
1055int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
1056{
1057	struct block_device *bdev = NULL;
1058	int type;
1059
1060	if (device)
1061		bdev = bdget(device);
1062
1063	spin_lock(&swap_lock);
1064	for (type = 0; type < nr_swapfiles; type++) {
1065		struct swap_info_struct *sis = swap_info[type];
1066
1067		if (!(sis->flags & SWP_WRITEOK))
1068			continue;
1069
1070		if (!bdev) {
1071			if (bdev_p)
1072				*bdev_p = bdgrab(sis->bdev);
1073
1074			spin_unlock(&swap_lock);
1075			return type;
1076		}
1077		if (bdev == sis->bdev) {
1078			struct swap_extent *se = &sis->first_swap_extent;
1079
1080			if (se->start_block == offset) {
1081				if (bdev_p)
1082					*bdev_p = bdgrab(sis->bdev);
1083
1084				spin_unlock(&swap_lock);
1085				bdput(bdev);
1086				return type;
1087			}
1088		}
1089	}
1090	spin_unlock(&swap_lock);
1091	if (bdev)
1092		bdput(bdev);
1093
1094	return -ENODEV;
1095}
1096
1097/*
1098 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1099 * corresponding to given index in swap_info (swap type).
1100 */
1101sector_t swapdev_block(int type, pgoff_t offset)
1102{
1103	struct block_device *bdev;
 
1104
1105	if ((unsigned int)type >= nr_swapfiles)
1106		return 0;
1107	if (!(swap_info[type]->flags & SWP_WRITEOK))
1108		return 0;
1109	return map_swap_entry(swp_entry(type, offset), &bdev);
1110}
1111
1112/*
1113 * Return either the total number of swap pages of given type, or the number
1114 * of free pages of that type (depending on @free)
1115 *
1116 * This is needed for software suspend
1117 */
1118unsigned int count_swap_pages(int type, int free)
1119{
1120	unsigned int n = 0;
1121
1122	spin_lock(&swap_lock);
1123	if ((unsigned int)type < nr_swapfiles) {
1124		struct swap_info_struct *sis = swap_info[type];
1125
1126		spin_lock(&sis->lock);
1127		if (sis->flags & SWP_WRITEOK) {
1128			n = sis->pages;
1129			if (free)
1130				n -= sis->inuse_pages;
1131		}
1132		spin_unlock(&sis->lock);
1133	}
1134	spin_unlock(&swap_lock);
1135	return n;
1136}
1137#endif /* CONFIG_HIBERNATION */
1138
1139static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1140{
1141	return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
1142}
1143
1144/*
1145 * No need to decide whether this PTE shares the swap entry with others,
1146 * just let do_wp_page work it out if a write is requested later - to
1147 * force COW, vm_page_prot omits write permission from any private vma.
1148 */
1149static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1150		unsigned long addr, swp_entry_t entry, struct page *page)
1151{
1152	struct page *swapcache;
1153	struct mem_cgroup *memcg;
1154	spinlock_t *ptl;
1155	pte_t *pte;
1156	int ret = 1;
1157
1158	swapcache = page;
1159	page = ksm_might_need_to_copy(page, vma, addr);
1160	if (unlikely(!page))
1161		return -ENOMEM;
1162
1163	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
1164				&memcg, false)) {
1165		ret = -ENOMEM;
1166		goto out_nolock;
1167	}
1168
1169	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1170	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1171		mem_cgroup_cancel_charge(page, memcg, false);
1172		ret = 0;
1173		goto out;
1174	}
1175
1176	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1177	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1178	get_page(page);
1179	set_pte_at(vma->vm_mm, addr, pte,
1180		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
1181	if (page == swapcache) {
1182		page_add_anon_rmap(page, vma, addr, false);
1183		mem_cgroup_commit_charge(page, memcg, true, false);
1184	} else { /* ksm created a completely new copy */
1185		page_add_new_anon_rmap(page, vma, addr, false);
1186		mem_cgroup_commit_charge(page, memcg, false, false);
1187		lru_cache_add_active_or_unevictable(page, vma);
1188	}
1189	swap_free(entry);
1190	/*
1191	 * Move the page to the active list so it is not
1192	 * immediately swapped out again after swapon.
1193	 */
1194	activate_page(page);
1195out:
1196	pte_unmap_unlock(pte, ptl);
1197out_nolock:
1198	if (page != swapcache) {
1199		unlock_page(page);
1200		put_page(page);
1201	}
1202	return ret;
1203}
1204
1205static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1206				unsigned long addr, unsigned long end,
1207				swp_entry_t entry, struct page *page)
 
1208{
1209	pte_t swp_pte = swp_entry_to_pte(entry);
 
1210	pte_t *pte;
 
 
1211	int ret = 0;
 
1212
1213	/*
1214	 * We don't actually need pte lock while scanning for swp_pte: since
1215	 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
1216	 * page table while we're scanning; though it could get zapped, and on
1217	 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
1218	 * of unmatched parts which look like swp_pte, so unuse_pte must
1219	 * recheck under pte lock.  Scanning without pte lock lets it be
1220	 * preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
1221	 */
1222	pte = pte_offset_map(pmd, addr);
1223	do {
1224		/*
1225		 * swapoff spends a _lot_ of time in this loop!
1226		 * Test inline before going to call unuse_pte.
1227		 */
1228		if (unlikely(pte_same_as_swp(*pte, swp_pte))) {
1229			pte_unmap(pte);
1230			ret = unuse_pte(vma, pmd, addr, entry, page);
1231			if (ret)
1232				goto out;
1233			pte = pte_offset_map(pmd, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1234		}
 
 
1235	} while (pte++, addr += PAGE_SIZE, addr != end);
1236	pte_unmap(pte - 1);
 
 
1237out:
1238	return ret;
1239}
1240
1241static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1242				unsigned long addr, unsigned long end,
1243				swp_entry_t entry, struct page *page)
 
1244{
1245	pmd_t *pmd;
1246	unsigned long next;
1247	int ret;
1248
1249	pmd = pmd_offset(pud, addr);
1250	do {
1251		cond_resched();
1252		next = pmd_addr_end(addr, end);
1253		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1254			continue;
1255		ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
 
1256		if (ret)
1257			return ret;
1258	} while (pmd++, addr = next, addr != end);
1259	return 0;
1260}
1261
1262static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
1263				unsigned long addr, unsigned long end,
1264				swp_entry_t entry, struct page *page)
 
1265{
1266	pud_t *pud;
1267	unsigned long next;
1268	int ret;
1269
1270	pud = pud_offset(pgd, addr);
1271	do {
1272		next = pud_addr_end(addr, end);
1273		if (pud_none_or_clear_bad(pud))
1274			continue;
1275		ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
 
1276		if (ret)
1277			return ret;
1278	} while (pud++, addr = next, addr != end);
1279	return 0;
1280}
1281
1282static int unuse_vma(struct vm_area_struct *vma,
1283				swp_entry_t entry, struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284{
1285	pgd_t *pgd;
1286	unsigned long addr, end, next;
1287	int ret;
1288
1289	if (page_anon_vma(page)) {
1290		addr = page_address_in_vma(page, vma);
1291		if (addr == -EFAULT)
1292			return 0;
1293		else
1294			end = addr + PAGE_SIZE;
1295	} else {
1296		addr = vma->vm_start;
1297		end = vma->vm_end;
1298	}
1299
1300	pgd = pgd_offset(vma->vm_mm, addr);
1301	do {
1302		next = pgd_addr_end(addr, end);
1303		if (pgd_none_or_clear_bad(pgd))
1304			continue;
1305		ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
 
1306		if (ret)
1307			return ret;
1308	} while (pgd++, addr = next, addr != end);
1309	return 0;
1310}
1311
1312static int unuse_mm(struct mm_struct *mm,
1313				swp_entry_t entry, struct page *page)
1314{
1315	struct vm_area_struct *vma;
1316	int ret = 0;
1317
1318	if (!down_read_trylock(&mm->mmap_sem)) {
1319		/*
1320		 * Activate page so shrink_inactive_list is unlikely to unmap
1321		 * its ptes while lock is dropped, so swapoff can make progress.
1322		 */
1323		activate_page(page);
1324		unlock_page(page);
1325		down_read(&mm->mmap_sem);
1326		lock_page(page);
1327	}
1328	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1329		if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
1330			break;
 
 
 
 
1331		cond_resched();
1332	}
1333	up_read(&mm->mmap_sem);
1334	return (ret < 0)? ret: 0;
1335}
1336
1337/*
1338 * Scan swap_map (or frontswap_map if frontswap parameter is true)
1339 * from current position to next entry still in use.
1340 * Recycle to start on reaching the end, returning 0 when empty.
1341 */
1342static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1343					unsigned int prev, bool frontswap)
1344{
1345	unsigned int max = si->max;
1346	unsigned int i = prev;
1347	unsigned char count;
1348
1349	/*
1350	 * No need for swap_lock here: we're just looking
1351	 * for whether an entry is in use, not modifying it; false
1352	 * hits are okay, and sys_swapoff() has already prevented new
1353	 * allocations from this area (while holding swap_lock).
1354	 */
1355	for (;;) {
1356		if (++i >= max) {
1357			if (!prev) {
1358				i = 0;
1359				break;
1360			}
1361			/*
1362			 * No entries in use at top of swap_map,
1363			 * loop back to start and recheck there.
1364			 */
1365			max = prev + 1;
1366			prev = 0;
1367			i = 1;
1368		}
1369		count = READ_ONCE(si->swap_map[i]);
1370		if (count && swap_count(count) != SWAP_MAP_BAD)
1371			if (!frontswap || frontswap_test(si, i))
1372				break;
1373		if ((i % LATENCY_LIMIT) == 0)
1374			cond_resched();
1375	}
 
 
 
 
1376	return i;
1377}
1378
1379/*
1380 * We completely avoid races by reading each swap page in advance,
1381 * and then search for the process using it.  All the necessary
1382 * page table adjustments can then be made atomically.
1383 *
1384 * if the boolean frontswap is true, only unuse pages_to_unuse pages;
1385 * pages_to_unuse==0 means all pages; ignored if frontswap is false
1386 */
1387int try_to_unuse(unsigned int type, bool frontswap,
1388		 unsigned long pages_to_unuse)
1389{
 
 
 
 
1390	struct swap_info_struct *si = swap_info[type];
1391	struct mm_struct *start_mm;
1392	volatile unsigned char *swap_map; /* swap_map is accessed without
1393					   * locking. Mark it as volatile
1394					   * to prevent compiler doing
1395					   * something odd.
1396					   */
1397	unsigned char swcount;
1398	struct page *page;
1399	swp_entry_t entry;
1400	unsigned int i = 0;
1401	int retval = 0;
 
 
1402
1403	/*
1404	 * When searching mms for an entry, a good strategy is to
1405	 * start at the first mm we freed the previous entry from
1406	 * (though actually we don't notice whether we or coincidence
1407	 * freed the entry).  Initialize this start_mm with a hold.
1408	 *
1409	 * A simpler strategy would be to start at the last mm we
1410	 * freed the previous entry from; but that would take less
1411	 * advantage of mmlist ordering, which clusters forked mms
1412	 * together, child after parent.  If we race with dup_mmap(), we
1413	 * prefer to resolve parent before child, lest we miss entries
1414	 * duplicated after we scanned child: using last mm would invert
1415	 * that.
1416	 */
1417	start_mm = &init_mm;
1418	atomic_inc(&init_mm.mm_users);
1419
1420	/*
1421	 * Keep on scanning until all entries have gone.  Usually,
1422	 * one pass through swap_map is enough, but not necessarily:
1423	 * there are races when an instance of an entry might be missed.
1424	 */
1425	while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
1426		if (signal_pending(current)) {
1427			retval = -EINTR;
1428			break;
1429		}
1430
1431		/*
1432		 * Get a page for the entry, using the existing swap
1433		 * cache page if there is one.  Otherwise, get a clean
1434		 * page and read the swap into it.
1435		 */
1436		swap_map = &si->swap_map[i];
1437		entry = swp_entry(type, i);
1438		page = read_swap_cache_async(entry,
1439					GFP_HIGHUSER_MOVABLE, NULL, 0);
1440		if (!page) {
1441			/*
1442			 * Either swap_duplicate() failed because entry
1443			 * has been freed independently, and will not be
1444			 * reused since sys_swapoff() already disabled
1445			 * allocation from here, or alloc_page() failed.
1446			 */
1447			swcount = *swap_map;
1448			/*
1449			 * We don't hold lock here, so the swap entry could be
1450			 * SWAP_MAP_BAD (when the cluster is discarding).
1451			 * Instead of fail out, We can just skip the swap
1452			 * entry because swapoff will wait for discarding
1453			 * finish anyway.
1454			 */
1455			if (!swcount || swcount == SWAP_MAP_BAD)
1456				continue;
1457			retval = -ENOMEM;
1458			break;
1459		}
1460
1461		/*
1462		 * Don't hold on to start_mm if it looks like exiting.
1463		 */
1464		if (atomic_read(&start_mm->mm_users) == 1) {
1465			mmput(start_mm);
1466			start_mm = &init_mm;
1467			atomic_inc(&init_mm.mm_users);
1468		}
1469
1470		/*
1471		 * Wait for and lock page.  When do_swap_page races with
1472		 * try_to_unuse, do_swap_page can handle the fault much
1473		 * faster than try_to_unuse can locate the entry.  This
1474		 * apparently redundant "wait_on_page_locked" lets try_to_unuse
1475		 * defer to do_swap_page in such a case - in some tests,
1476		 * do_swap_page and try_to_unuse repeatedly compete.
1477		 */
1478		wait_on_page_locked(page);
1479		wait_on_page_writeback(page);
1480		lock_page(page);
1481		wait_on_page_writeback(page);
1482
1483		/*
1484		 * Remove all references to entry.
1485		 */
1486		swcount = *swap_map;
1487		if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1488			retval = shmem_unuse(entry, page);
1489			/* page has already been unlocked and released */
1490			if (retval < 0)
1491				break;
1492			continue;
1493		}
1494		if (swap_count(swcount) && start_mm != &init_mm)
1495			retval = unuse_mm(start_mm, entry, page);
 
1496
1497		if (swap_count(*swap_map)) {
1498			int set_start_mm = (*swap_map >= swcount);
1499			struct list_head *p = &start_mm->mmlist;
1500			struct mm_struct *new_start_mm = start_mm;
1501			struct mm_struct *prev_mm = start_mm;
1502			struct mm_struct *mm;
1503
1504			atomic_inc(&new_start_mm->mm_users);
1505			atomic_inc(&prev_mm->mm_users);
1506			spin_lock(&mmlist_lock);
1507			while (swap_count(*swap_map) && !retval &&
1508					(p = p->next) != &start_mm->mmlist) {
1509				mm = list_entry(p, struct mm_struct, mmlist);
1510				if (!atomic_inc_not_zero(&mm->mm_users))
1511					continue;
1512				spin_unlock(&mmlist_lock);
1513				mmput(prev_mm);
1514				prev_mm = mm;
1515
1516				cond_resched();
1517
1518				swcount = *swap_map;
1519				if (!swap_count(swcount)) /* any usage ? */
1520					;
1521				else if (mm == &init_mm)
1522					set_start_mm = 1;
1523				else
1524					retval = unuse_mm(mm, entry, page);
1525
1526				if (set_start_mm && *swap_map < swcount) {
1527					mmput(new_start_mm);
1528					atomic_inc(&mm->mm_users);
1529					new_start_mm = mm;
1530					set_start_mm = 0;
1531				}
1532				spin_lock(&mmlist_lock);
1533			}
1534			spin_unlock(&mmlist_lock);
1535			mmput(prev_mm);
1536			mmput(start_mm);
1537			start_mm = new_start_mm;
1538		}
1539		if (retval) {
1540			unlock_page(page);
1541			put_page(page);
1542			break;
1543		}
1544
1545		/*
1546		 * If a reference remains (rare), we would like to leave
1547		 * the page in the swap cache; but try_to_unmap could
1548		 * then re-duplicate the entry once we drop page lock,
1549		 * so we might loop indefinitely; also, that page could
1550		 * not be swapped out to other storage meanwhile.  So:
1551		 * delete from cache even if there's another reference,
1552		 * after ensuring that the data has been saved to disk -
1553		 * since if the reference remains (rarer), it will be
1554		 * read from disk into another page.  Splitting into two
1555		 * pages would be incorrect if swap supported "shared
1556		 * private" pages, but they are handled by tmpfs files.
1557		 *
1558		 * Given how unuse_vma() targets one particular offset
1559		 * in an anon_vma, once the anon_vma has been determined,
1560		 * this splitting happens to be just what is needed to
1561		 * handle where KSM pages have been swapped out: re-reading
1562		 * is unnecessarily slow, but we can fix that later on.
1563		 */
1564		if (swap_count(*swap_map) &&
1565		     PageDirty(page) && PageSwapCache(page)) {
1566			struct writeback_control wbc = {
1567				.sync_mode = WB_SYNC_NONE,
1568			};
1569
1570			swap_writepage(page, &wbc);
1571			lock_page(page);
1572			wait_on_page_writeback(page);
1573		}
 
 
 
 
 
 
1574
1575		/*
1576		 * It is conceivable that a racing task removed this page from
1577		 * swap cache just before we acquired the page lock at the top,
1578		 * or while we dropped it in unuse_mm().  The page might even
1579		 * be back in swap cache on another swap area: that we must not
1580		 * delete, since it may not have been written out to swap yet.
1581		 */
1582		if (PageSwapCache(page) &&
1583		    likely(page_private(page) == entry.val))
1584			delete_from_swap_cache(page);
1585
1586		/*
1587		 * So we could skip searching mms once swap count went
1588		 * to 1, we did not mark any present ptes as dirty: must
1589		 * mark page dirty so shrink_page_list will preserve it.
1590		 */
1591		SetPageDirty(page);
1592		unlock_page(page);
1593		put_page(page);
1594
1595		/*
1596		 * Make sure that we aren't completely killing
1597		 * interactive performance.
 
1598		 */
1599		cond_resched();
1600		if (frontswap && pages_to_unuse > 0) {
1601			if (!--pages_to_unuse)
1602				break;
1603		}
1604	}
1605
1606	mmput(start_mm);
1607	return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1608}
1609
1610/*
1611 * After a successful try_to_unuse, if no swap is now in use, we know
1612 * we can empty the mmlist.  swap_lock must be held on entry and exit.
1613 * Note that mmlist_lock nests inside swap_lock, and an mm must be
1614 * added to the mmlist just after page_duplicate - before would be racy.
1615 */
1616static void drain_mmlist(void)
1617{
1618	struct list_head *p, *next;
1619	unsigned int type;
1620
1621	for (type = 0; type < nr_swapfiles; type++)
1622		if (swap_info[type]->inuse_pages)
1623			return;
1624	spin_lock(&mmlist_lock);
1625	list_for_each_safe(p, next, &init_mm.mmlist)
1626		list_del_init(p);
1627	spin_unlock(&mmlist_lock);
1628}
1629
1630/*
1631 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1632 * corresponds to page offset for the specified swap entry.
1633 * Note that the type of this function is sector_t, but it returns page offset
1634 * into the bdev, not sector offset.
1635 */
1636static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1637{
1638	struct swap_info_struct *sis;
1639	struct swap_extent *start_se;
1640	struct swap_extent *se;
1641	pgoff_t offset;
1642
1643	sis = swap_info[swp_type(entry)];
1644	*bdev = sis->bdev;
1645
1646	offset = swp_offset(entry);
1647	start_se = sis->curr_swap_extent;
1648	se = start_se;
1649
1650	for ( ; ; ) {
1651		if (se->start_page <= offset &&
1652				offset < (se->start_page + se->nr_pages)) {
1653			return se->start_block + (offset - se->start_page);
1654		}
1655		se = list_next_entry(se, list);
1656		sis->curr_swap_extent = se;
1657		BUG_ON(se == start_se);		/* It *must* be present */
1658	}
1659}
1660
1661/*
1662 * Returns the page offset into bdev for the specified page's swap entry.
1663 */
1664sector_t map_swap_page(struct page *page, struct block_device **bdev)
1665{
1666	swp_entry_t entry;
1667	entry.val = page_private(page);
1668	return map_swap_entry(entry, bdev);
1669}
1670
1671/*
1672 * Free all of a swapdev's extent information
1673 */
1674static void destroy_swap_extents(struct swap_info_struct *sis)
1675{
1676	while (!list_empty(&sis->first_swap_extent.list)) {
1677		struct swap_extent *se;
 
1678
1679		se = list_first_entry(&sis->first_swap_extent.list,
1680				struct swap_extent, list);
1681		list_del(&se->list);
1682		kfree(se);
1683	}
1684
1685	if (sis->flags & SWP_FILE) {
1686		struct file *swap_file = sis->swap_file;
1687		struct address_space *mapping = swap_file->f_mapping;
1688
1689		sis->flags &= ~SWP_FILE;
1690		mapping->a_ops->swap_deactivate(swap_file);
 
1691	}
1692}
1693
1694/*
1695 * Add a block range (and the corresponding page range) into this swapdev's
1696 * extent list.  The extent list is kept sorted in page order.
1697 *
1698 * This function rather assumes that it is called in ascending page order.
1699 */
1700int
1701add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1702		unsigned long nr_pages, sector_t start_block)
1703{
 
1704	struct swap_extent *se;
1705	struct swap_extent *new_se;
1706	struct list_head *lh;
1707
1708	if (start_page == 0) {
1709		se = &sis->first_swap_extent;
1710		sis->curr_swap_extent = se;
1711		se->start_page = 0;
1712		se->nr_pages = nr_pages;
1713		se->start_block = start_block;
1714		return 1;
1715	} else {
1716		lh = sis->first_swap_extent.list.prev;	/* Highest extent */
1717		se = list_entry(lh, struct swap_extent, list);
 
1718		BUG_ON(se->start_page + se->nr_pages != start_page);
1719		if (se->start_block + se->nr_pages == start_block) {
1720			/* Merge it */
1721			se->nr_pages += nr_pages;
1722			return 0;
1723		}
1724	}
1725
1726	/*
1727	 * No merge.  Insert a new extent, preserving ordering.
1728	 */
1729	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1730	if (new_se == NULL)
1731		return -ENOMEM;
1732	new_se->start_page = start_page;
1733	new_se->nr_pages = nr_pages;
1734	new_se->start_block = start_block;
1735
1736	list_add_tail(&new_se->list, &sis->first_swap_extent.list);
 
1737	return 1;
1738}
 
1739
1740/*
1741 * A `swap extent' is a simple thing which maps a contiguous range of pages
1742 * onto a contiguous range of disk blocks.  An ordered list of swap extents
1743 * is built at swapon time and is then used at swap_writepage/swap_readpage
1744 * time for locating where on disk a page belongs.
1745 *
1746 * If the swapfile is an S_ISBLK block device, a single extent is installed.
1747 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1748 * swap files identically.
1749 *
1750 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1751 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
1752 * swapfiles are handled *identically* after swapon time.
1753 *
1754 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1755 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
1756 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1757 * requirements, they are simply tossed out - we will never use those blocks
1758 * for swapping.
1759 *
1760 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This
1761 * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1762 * which will scribble on the fs.
1763 *
1764 * The amount of disk space which a single swap extent represents varies.
1765 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
1766 * extents in the list.  To avoid much list walking, we cache the previous
1767 * search location in `curr_swap_extent', and start new searches from there.
1768 * This is extremely effective.  The average number of iterations in
1769 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
1770 */
1771static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1772{
1773	struct file *swap_file = sis->swap_file;
1774	struct address_space *mapping = swap_file->f_mapping;
1775	struct inode *inode = mapping->host;
1776	int ret;
1777
1778	if (S_ISBLK(inode->i_mode)) {
1779		ret = add_swap_extent(sis, 0, sis->max, 0);
1780		*span = sis->pages;
1781		return ret;
1782	}
1783
1784	if (mapping->a_ops->swap_activate) {
1785		ret = mapping->a_ops->swap_activate(sis, swap_file, span);
 
 
1786		if (!ret) {
1787			sis->flags |= SWP_FILE;
1788			ret = add_swap_extent(sis, 0, sis->max, 0);
1789			*span = sis->pages;
1790		}
1791		return ret;
1792	}
1793
1794	return generic_swapfile_activate(sis, swap_file, span);
1795}
1796
1797static void _enable_swap_info(struct swap_info_struct *p, int prio,
1798				unsigned char *swap_map,
1799				struct swap_cluster_info *cluster_info)
 
 
 
 
 
 
 
 
 
 
 
 
1800{
 
 
1801	if (prio >= 0)
1802		p->prio = prio;
1803	else
1804		p->prio = --least_priority;
1805	/*
1806	 * the plist prio is negated because plist ordering is
1807	 * low-to-high, while swap ordering is high-to-low
1808	 */
1809	p->list.prio = -p->prio;
1810	p->avail_list.prio = -p->prio;
 
 
 
 
 
 
 
 
 
1811	p->swap_map = swap_map;
1812	p->cluster_info = cluster_info;
1813	p->flags |= SWP_WRITEOK;
 
 
 
 
1814	atomic_long_add(p->pages, &nr_swap_pages);
1815	total_swap_pages += p->pages;
1816
1817	assert_spin_locked(&swap_lock);
1818	/*
1819	 * both lists are plists, and thus priority ordered.
1820	 * swap_active_head needs to be priority ordered for swapoff(),
1821	 * which on removal of any swap_info_struct with an auto-assigned
1822	 * (i.e. negative) priority increments the auto-assigned priority
1823	 * of any lower-priority swap_info_structs.
1824	 * swap_avail_head needs to be priority ordered for get_swap_page(),
1825	 * which allocates swap pages from the highest available priority
1826	 * swap_info_struct.
1827	 */
1828	plist_add(&p->list, &swap_active_head);
1829	spin_lock(&swap_avail_lock);
1830	plist_add(&p->avail_list, &swap_avail_head);
1831	spin_unlock(&swap_avail_lock);
1832}
1833
1834static void enable_swap_info(struct swap_info_struct *p, int prio,
1835				unsigned char *swap_map,
1836				struct swap_cluster_info *cluster_info,
1837				unsigned long *frontswap_map)
1838{
1839	frontswap_init(p->type, frontswap_map);
1840	spin_lock(&swap_lock);
1841	spin_lock(&p->lock);
1842	 _enable_swap_info(p, prio, swap_map, cluster_info);
 
 
 
 
 
 
 
 
 
 
1843	spin_unlock(&p->lock);
1844	spin_unlock(&swap_lock);
1845}
1846
1847static void reinsert_swap_info(struct swap_info_struct *p)
1848{
1849	spin_lock(&swap_lock);
1850	spin_lock(&p->lock);
1851	_enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
 
1852	spin_unlock(&p->lock);
1853	spin_unlock(&swap_lock);
1854}
1855
 
 
 
 
 
 
 
 
 
 
 
1856SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1857{
1858	struct swap_info_struct *p = NULL;
1859	unsigned char *swap_map;
1860	struct swap_cluster_info *cluster_info;
1861	unsigned long *frontswap_map;
1862	struct file *swap_file, *victim;
1863	struct address_space *mapping;
1864	struct inode *inode;
1865	struct filename *pathname;
1866	int err, found = 0;
1867	unsigned int old_block_size;
1868
1869	if (!capable(CAP_SYS_ADMIN))
1870		return -EPERM;
1871
1872	BUG_ON(!current->mm);
1873
1874	pathname = getname(specialfile);
1875	if (IS_ERR(pathname))
1876		return PTR_ERR(pathname);
1877
1878	victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
1879	err = PTR_ERR(victim);
1880	if (IS_ERR(victim))
1881		goto out;
1882
1883	mapping = victim->f_mapping;
1884	spin_lock(&swap_lock);
1885	plist_for_each_entry(p, &swap_active_head, list) {
1886		if (p->flags & SWP_WRITEOK) {
1887			if (p->swap_file->f_mapping == mapping) {
1888				found = 1;
1889				break;
1890			}
1891		}
1892	}
1893	if (!found) {
1894		err = -EINVAL;
1895		spin_unlock(&swap_lock);
1896		goto out_dput;
1897	}
1898	if (!security_vm_enough_memory_mm(current->mm, p->pages))
1899		vm_unacct_memory(p->pages);
1900	else {
1901		err = -ENOMEM;
1902		spin_unlock(&swap_lock);
1903		goto out_dput;
1904	}
1905	spin_lock(&swap_avail_lock);
1906	plist_del(&p->avail_list, &swap_avail_head);
1907	spin_unlock(&swap_avail_lock);
1908	spin_lock(&p->lock);
1909	if (p->prio < 0) {
1910		struct swap_info_struct *si = p;
 
1911
1912		plist_for_each_entry_continue(si, &swap_active_head, list) {
1913			si->prio++;
1914			si->list.prio--;
1915			si->avail_list.prio--;
 
 
 
1916		}
1917		least_priority++;
1918	}
1919	plist_del(&p->list, &swap_active_head);
1920	atomic_long_sub(p->pages, &nr_swap_pages);
1921	total_swap_pages -= p->pages;
1922	p->flags &= ~SWP_WRITEOK;
1923	spin_unlock(&p->lock);
1924	spin_unlock(&swap_lock);
1925
 
 
1926	set_current_oom_origin();
1927	err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
1928	clear_current_oom_origin();
1929
1930	if (err) {
1931		/* re-insert swap space back into swap_list */
1932		reinsert_swap_info(p);
 
1933		goto out_dput;
1934	}
1935
 
 
 
 
 
 
 
 
 
 
 
 
 
1936	flush_work(&p->discard_work);
1937
1938	destroy_swap_extents(p);
1939	if (p->flags & SWP_CONTINUED)
1940		free_swap_count_continuations(p);
1941
 
 
 
1942	mutex_lock(&swapon_mutex);
1943	spin_lock(&swap_lock);
1944	spin_lock(&p->lock);
1945	drain_mmlist();
1946
1947	/* wait for anyone still in scan_swap_map */
1948	p->highest_bit = 0;		/* cuts scans short */
1949	while (p->flags >= SWP_SCANNING) {
1950		spin_unlock(&p->lock);
1951		spin_unlock(&swap_lock);
1952		schedule_timeout_uninterruptible(1);
1953		spin_lock(&swap_lock);
1954		spin_lock(&p->lock);
1955	}
1956
1957	swap_file = p->swap_file;
1958	old_block_size = p->old_block_size;
1959	p->swap_file = NULL;
1960	p->max = 0;
1961	swap_map = p->swap_map;
1962	p->swap_map = NULL;
1963	cluster_info = p->cluster_info;
1964	p->cluster_info = NULL;
1965	frontswap_map = frontswap_map_get(p);
1966	spin_unlock(&p->lock);
1967	spin_unlock(&swap_lock);
1968	frontswap_invalidate_area(p->type);
1969	frontswap_map_set(p, NULL);
1970	mutex_unlock(&swapon_mutex);
1971	free_percpu(p->percpu_cluster);
1972	p->percpu_cluster = NULL;
1973	vfree(swap_map);
1974	vfree(cluster_info);
1975	vfree(frontswap_map);
1976	/* Destroy swap account information */
1977	swap_cgroup_swapoff(p->type);
 
1978
1979	inode = mapping->host;
1980	if (S_ISBLK(inode->i_mode)) {
1981		struct block_device *bdev = I_BDEV(inode);
 
1982		set_blocksize(bdev, old_block_size);
1983		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1984	} else {
1985		inode_lock(inode);
1986		inode->i_flags &= ~S_SWAPFILE;
1987		inode_unlock(inode);
1988	}
 
 
 
 
1989	filp_close(swap_file, NULL);
1990
1991	/*
1992	 * Clear the SWP_USED flag after all resources are freed so that swapon
1993	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
1994	 * not hold p->lock after we cleared its SWP_WRITEOK.
1995	 */
1996	spin_lock(&swap_lock);
1997	p->flags = 0;
1998	spin_unlock(&swap_lock);
1999
2000	err = 0;
2001	atomic_inc(&proc_poll_event);
2002	wake_up_interruptible(&proc_poll_wait);
2003
2004out_dput:
2005	filp_close(victim, NULL);
2006out:
2007	putname(pathname);
2008	return err;
2009}
2010
2011#ifdef CONFIG_PROC_FS
2012static unsigned swaps_poll(struct file *file, poll_table *wait)
2013{
2014	struct seq_file *seq = file->private_data;
2015
2016	poll_wait(file, &proc_poll_wait, wait);
2017
2018	if (seq->poll_event != atomic_read(&proc_poll_event)) {
2019		seq->poll_event = atomic_read(&proc_poll_event);
2020		return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
2021	}
2022
2023	return POLLIN | POLLRDNORM;
2024}
2025
2026/* iterator */
2027static void *swap_start(struct seq_file *swap, loff_t *pos)
2028{
2029	struct swap_info_struct *si;
2030	int type;
2031	loff_t l = *pos;
2032
2033	mutex_lock(&swapon_mutex);
2034
2035	if (!l)
2036		return SEQ_START_TOKEN;
2037
2038	for (type = 0; type < nr_swapfiles; type++) {
2039		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
2040		si = swap_info[type];
2041		if (!(si->flags & SWP_USED) || !si->swap_map)
2042			continue;
2043		if (!--l)
2044			return si;
2045	}
2046
2047	return NULL;
2048}
2049
2050static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2051{
2052	struct swap_info_struct *si = v;
2053	int type;
2054
2055	if (v == SEQ_START_TOKEN)
2056		type = 0;
2057	else
2058		type = si->type + 1;
2059
2060	for (; type < nr_swapfiles; type++) {
2061		smp_rmb();	/* read nr_swapfiles before swap_info[type] */
2062		si = swap_info[type];
2063		if (!(si->flags & SWP_USED) || !si->swap_map)
2064			continue;
2065		++*pos;
2066		return si;
2067	}
2068
2069	return NULL;
2070}
2071
2072static void swap_stop(struct seq_file *swap, void *v)
2073{
2074	mutex_unlock(&swapon_mutex);
2075}
2076
2077static int swap_show(struct seq_file *swap, void *v)
2078{
2079	struct swap_info_struct *si = v;
2080	struct file *file;
2081	int len;
2082
2083	if (si == SEQ_START_TOKEN) {
2084		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
2085		return 0;
2086	}
2087
2088	file = si->swap_file;
2089	len = seq_file_path(swap, file, " \t\n\\");
2090	seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
2091			len < 40 ? 40 - len : 1, " ",
2092			S_ISBLK(file_inode(file)->i_mode) ?
2093				"partition" : "file\t",
2094			si->pages << (PAGE_SHIFT - 10),
2095			si->inuse_pages << (PAGE_SHIFT - 10),
2096			si->prio);
2097	return 0;
2098}
2099
2100static const struct seq_operations swaps_op = {
2101	.start =	swap_start,
2102	.next =		swap_next,
2103	.stop =		swap_stop,
2104	.show =		swap_show
2105};
2106
2107static int swaps_open(struct inode *inode, struct file *file)
2108{
2109	struct seq_file *seq;
2110	int ret;
2111
2112	ret = seq_open(file, &swaps_op);
2113	if (ret)
2114		return ret;
2115
2116	seq = file->private_data;
2117	seq->poll_event = atomic_read(&proc_poll_event);
2118	return 0;
2119}
2120
2121static const struct file_operations proc_swaps_operations = {
2122	.open		= swaps_open,
2123	.read		= seq_read,
2124	.llseek		= seq_lseek,
2125	.release	= seq_release,
2126	.poll		= swaps_poll,
2127};
2128
2129static int __init procswaps_init(void)
2130{
2131	proc_create("swaps", 0, NULL, &proc_swaps_operations);
2132	return 0;
2133}
2134__initcall(procswaps_init);
2135#endif /* CONFIG_PROC_FS */
2136
2137#ifdef MAX_SWAPFILES_CHECK
2138static int __init max_swapfiles_check(void)
2139{
2140	MAX_SWAPFILES_CHECK();
2141	return 0;
2142}
2143late_initcall(max_swapfiles_check);
2144#endif
2145
2146static struct swap_info_struct *alloc_swap_info(void)
2147{
2148	struct swap_info_struct *p;
2149	unsigned int type;
 
2150
2151	p = kzalloc(sizeof(*p), GFP_KERNEL);
2152	if (!p)
2153		return ERR_PTR(-ENOMEM);
2154
2155	spin_lock(&swap_lock);
2156	for (type = 0; type < nr_swapfiles; type++) {
2157		if (!(swap_info[type]->flags & SWP_USED))
2158			break;
2159	}
2160	if (type >= MAX_SWAPFILES) {
2161		spin_unlock(&swap_lock);
2162		kfree(p);
2163		return ERR_PTR(-EPERM);
2164	}
2165	if (type >= nr_swapfiles) {
2166		p->type = type;
2167		swap_info[type] = p;
2168		/*
2169		 * Write swap_info[type] before nr_swapfiles, in case a
2170		 * racing procfs swap_start() or swap_next() is reading them.
2171		 * (We never shrink nr_swapfiles, we never free this entry.)
2172		 */
2173		smp_wmb();
2174		nr_swapfiles++;
2175	} else {
2176		kfree(p);
2177		p = swap_info[type];
2178		/*
2179		 * Do not memset this entry: a racing procfs swap_next()
2180		 * would be relying on p->type to remain valid.
2181		 */
2182	}
2183	INIT_LIST_HEAD(&p->first_swap_extent.list);
2184	plist_node_init(&p->list, 0);
2185	plist_node_init(&p->avail_list, 0);
 
2186	p->flags = SWP_USED;
2187	spin_unlock(&swap_lock);
2188	spin_lock_init(&p->lock);
 
2189
2190	return p;
2191}
2192
2193static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2194{
2195	int error;
2196
2197	if (S_ISBLK(inode->i_mode)) {
2198		p->bdev = bdgrab(I_BDEV(inode));
2199		error = blkdev_get(p->bdev,
2200				   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2201		if (error < 0) {
2202			p->bdev = NULL;
2203			return error;
2204		}
2205		p->old_block_size = block_size(p->bdev);
2206		error = set_blocksize(p->bdev, PAGE_SIZE);
2207		if (error < 0)
2208			return error;
2209		p->flags |= SWP_BLKDEV;
2210	} else if (S_ISREG(inode->i_mode)) {
2211		p->bdev = inode->i_sb->s_bdev;
2212		inode_lock(inode);
2213		if (IS_SWAPFILE(inode))
2214			return -EBUSY;
2215	} else
2216		return -EINVAL;
2217
2218	return 0;
2219}
2220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2221static unsigned long read_swap_header(struct swap_info_struct *p,
2222					union swap_header *swap_header,
2223					struct inode *inode)
2224{
2225	int i;
2226	unsigned long maxpages;
2227	unsigned long swapfilepages;
2228	unsigned long last_page;
2229
2230	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2231		pr_err("Unable to find swap-space signature\n");
2232		return 0;
2233	}
2234
2235	/* swap partition endianess hack... */
2236	if (swab32(swap_header->info.version) == 1) {
2237		swab32s(&swap_header->info.version);
2238		swab32s(&swap_header->info.last_page);
2239		swab32s(&swap_header->info.nr_badpages);
2240		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2241			return 0;
2242		for (i = 0; i < swap_header->info.nr_badpages; i++)
2243			swab32s(&swap_header->info.badpages[i]);
2244	}
2245	/* Check the swap header's sub-version */
2246	if (swap_header->info.version != 1) {
2247		pr_warn("Unable to handle swap header version %d\n",
2248			swap_header->info.version);
2249		return 0;
2250	}
2251
2252	p->lowest_bit  = 1;
2253	p->cluster_next = 1;
2254	p->cluster_nr = 0;
2255
2256	/*
2257	 * Find out how many pages are allowed for a single swap
2258	 * device. There are two limiting factors: 1) the number
2259	 * of bits for the swap offset in the swp_entry_t type, and
2260	 * 2) the number of bits in the swap pte as defined by the
2261	 * different architectures. In order to find the
2262	 * largest possible bit mask, a swap entry with swap type 0
2263	 * and swap offset ~0UL is created, encoded to a swap pte,
2264	 * decoded to a swp_entry_t again, and finally the swap
2265	 * offset is extracted. This will mask all the bits from
2266	 * the initial ~0UL mask that can't be encoded in either
2267	 * the swp_entry_t or the architecture definition of a
2268	 * swap pte.
2269	 */
2270	maxpages = swp_offset(pte_to_swp_entry(
2271			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2272	last_page = swap_header->info.last_page;
 
 
 
 
2273	if (last_page > maxpages) {
2274		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2275			maxpages << (PAGE_SHIFT - 10),
2276			last_page << (PAGE_SHIFT - 10));
2277	}
2278	if (maxpages > last_page) {
2279		maxpages = last_page + 1;
2280		/* p->max is an unsigned int: don't overflow it */
2281		if ((unsigned int)maxpages == 0)
2282			maxpages = UINT_MAX;
2283	}
2284	p->highest_bit = maxpages - 1;
2285
2286	if (!maxpages)
2287		return 0;
2288	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2289	if (swapfilepages && maxpages > swapfilepages) {
2290		pr_warn("Swap area shorter than signature indicates\n");
2291		return 0;
2292	}
2293	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
2294		return 0;
2295	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2296		return 0;
2297
2298	return maxpages;
2299}
2300
 
 
 
 
 
 
 
2301static int setup_swap_map_and_extents(struct swap_info_struct *p,
2302					union swap_header *swap_header,
2303					unsigned char *swap_map,
2304					struct swap_cluster_info *cluster_info,
2305					unsigned long maxpages,
2306					sector_t *span)
2307{
2308	int i;
2309	unsigned int nr_good_pages;
2310	int nr_extents;
2311	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
2312	unsigned long idx = p->cluster_next / SWAPFILE_CLUSTER;
 
2313
2314	nr_good_pages = maxpages - 1;	/* omit header page */
2315
2316	cluster_list_init(&p->free_clusters);
2317	cluster_list_init(&p->discard_clusters);
2318
2319	for (i = 0; i < swap_header->info.nr_badpages; i++) {
2320		unsigned int page_nr = swap_header->info.badpages[i];
2321		if (page_nr == 0 || page_nr > swap_header->info.last_page)
2322			return -EINVAL;
2323		if (page_nr < maxpages) {
2324			swap_map[page_nr] = SWAP_MAP_BAD;
2325			nr_good_pages--;
2326			/*
2327			 * Haven't marked the cluster free yet, no list
2328			 * operation involved
2329			 */
2330			inc_cluster_info_page(p, cluster_info, page_nr);
2331		}
2332	}
2333
2334	/* Haven't marked the cluster free yet, no list operation involved */
2335	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
2336		inc_cluster_info_page(p, cluster_info, i);
2337
2338	if (nr_good_pages) {
2339		swap_map[0] = SWAP_MAP_BAD;
2340		/*
2341		 * Not mark the cluster free yet, no list
2342		 * operation involved
2343		 */
2344		inc_cluster_info_page(p, cluster_info, 0);
2345		p->max = maxpages;
2346		p->pages = nr_good_pages;
2347		nr_extents = setup_swap_extents(p, span);
2348		if (nr_extents < 0)
2349			return nr_extents;
2350		nr_good_pages = p->pages;
2351	}
2352	if (!nr_good_pages) {
2353		pr_warn("Empty swap-file\n");
2354		return -EINVAL;
2355	}
2356
2357	if (!cluster_info)
2358		return nr_extents;
2359
2360	for (i = 0; i < nr_clusters; i++) {
2361		if (!cluster_count(&cluster_info[idx])) {
 
 
 
 
 
 
 
 
 
 
 
2362			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
2363			cluster_list_add_tail(&p->free_clusters, cluster_info,
2364					      idx);
2365		}
2366		idx++;
2367		if (idx == nr_clusters)
2368			idx = 0;
2369	}
2370	return nr_extents;
2371}
2372
2373/*
2374 * Helper to sys_swapon determining if a given swap
2375 * backing device queue supports DISCARD operations.
2376 */
2377static bool swap_discardable(struct swap_info_struct *si)
2378{
2379	struct request_queue *q = bdev_get_queue(si->bdev);
2380
2381	if (!q || !blk_queue_discard(q))
2382		return false;
2383
2384	return true;
2385}
2386
2387SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2388{
2389	struct swap_info_struct *p;
2390	struct filename *name;
2391	struct file *swap_file = NULL;
2392	struct address_space *mapping;
2393	int prio;
2394	int error;
2395	union swap_header *swap_header;
2396	int nr_extents;
2397	sector_t span;
2398	unsigned long maxpages;
2399	unsigned char *swap_map = NULL;
2400	struct swap_cluster_info *cluster_info = NULL;
2401	unsigned long *frontswap_map = NULL;
2402	struct page *page = NULL;
2403	struct inode *inode = NULL;
 
2404
2405	if (swap_flags & ~SWAP_FLAGS_VALID)
2406		return -EINVAL;
2407
2408	if (!capable(CAP_SYS_ADMIN))
2409		return -EPERM;
2410
 
 
 
2411	p = alloc_swap_info();
2412	if (IS_ERR(p))
2413		return PTR_ERR(p);
2414
2415	INIT_WORK(&p->discard_work, swap_discard_work);
2416
2417	name = getname(specialfile);
2418	if (IS_ERR(name)) {
2419		error = PTR_ERR(name);
2420		name = NULL;
2421		goto bad_swap;
2422	}
2423	swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
2424	if (IS_ERR(swap_file)) {
2425		error = PTR_ERR(swap_file);
2426		swap_file = NULL;
2427		goto bad_swap;
2428	}
2429
2430	p->swap_file = swap_file;
2431	mapping = swap_file->f_mapping;
2432	inode = mapping->host;
2433
2434	/* If S_ISREG(inode->i_mode) will do inode_lock(inode); */
2435	error = claim_swapfile(p, inode);
2436	if (unlikely(error))
2437		goto bad_swap;
2438
2439	/*
2440	 * Read the swap header.
2441	 */
2442	if (!mapping->a_ops->readpage) {
2443		error = -EINVAL;
2444		goto bad_swap;
2445	}
2446	page = read_mapping_page(mapping, 0, swap_file);
2447	if (IS_ERR(page)) {
2448		error = PTR_ERR(page);
2449		goto bad_swap;
2450	}
2451	swap_header = kmap(page);
2452
2453	maxpages = read_swap_header(p, swap_header, inode);
2454	if (unlikely(!maxpages)) {
2455		error = -EINVAL;
2456		goto bad_swap;
2457	}
2458
2459	/* OK, set up the swap map and apply the bad block list */
2460	swap_map = vzalloc(maxpages);
2461	if (!swap_map) {
2462		error = -ENOMEM;
2463		goto bad_swap;
2464	}
2465
2466	if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
2467		p->flags |= SWP_STABLE_WRITES;
2468
 
 
 
2469	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2470		int cpu;
 
2471
2472		p->flags |= SWP_SOLIDSTATE;
2473		/*
2474		 * select a random position to start with to help wear leveling
2475		 * SSD
2476		 */
2477		p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
 
2478
2479		cluster_info = vzalloc(DIV_ROUND_UP(maxpages,
2480			SWAPFILE_CLUSTER) * sizeof(*cluster_info));
2481		if (!cluster_info) {
2482			error = -ENOMEM;
2483			goto bad_swap;
2484		}
 
 
 
 
2485		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
2486		if (!p->percpu_cluster) {
2487			error = -ENOMEM;
2488			goto bad_swap;
2489		}
2490		for_each_possible_cpu(cpu) {
2491			struct percpu_cluster *cluster;
2492			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
2493			cluster_set_null(&cluster->index);
2494		}
 
 
 
2495	}
2496
2497	error = swap_cgroup_swapon(p->type, maxpages);
2498	if (error)
2499		goto bad_swap;
2500
2501	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2502		cluster_info, maxpages, &span);
2503	if (unlikely(nr_extents < 0)) {
2504		error = nr_extents;
2505		goto bad_swap;
2506	}
2507	/* frontswap enabled? set up bit-per-page map for frontswap */
2508	if (IS_ENABLED(CONFIG_FRONTSWAP))
2509		frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
 
 
2510
2511	if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
2512		/*
2513		 * When discard is enabled for swap with no particular
2514		 * policy flagged, we set all swap discard flags here in
2515		 * order to sustain backward compatibility with older
2516		 * swapon(8) releases.
2517		 */
2518		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
2519			     SWP_PAGE_DISCARD);
2520
2521		/*
2522		 * By flagging sys_swapon, a sysadmin can tell us to
2523		 * either do single-time area discards only, or to just
2524		 * perform discards for released swap page-clusters.
2525		 * Now it's time to adjust the p->flags accordingly.
2526		 */
2527		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
2528			p->flags &= ~SWP_PAGE_DISCARD;
2529		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
2530			p->flags &= ~SWP_AREA_DISCARD;
2531
2532		/* issue a swapon-time discard if it's still required */
2533		if (p->flags & SWP_AREA_DISCARD) {
2534			int err = discard_swap(p);
2535			if (unlikely(err))
2536				pr_err("swapon: discard_swap(%p): %d\n",
2537					p, err);
2538		}
2539	}
2540
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2541	mutex_lock(&swapon_mutex);
2542	prio = -1;
2543	if (swap_flags & SWAP_FLAG_PREFER)
2544		prio =
2545		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2546	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
2547
2548	pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
2549		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
2550		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2551		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2552		(p->flags & SWP_DISCARDABLE) ? "D" : "",
2553		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
2554		(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
2555		(frontswap_map) ? "FS" : "");
2556
2557	mutex_unlock(&swapon_mutex);
2558	atomic_inc(&proc_poll_event);
2559	wake_up_interruptible(&proc_poll_wait);
2560
2561	if (S_ISREG(inode->i_mode))
2562		inode->i_flags |= S_SWAPFILE;
2563	error = 0;
2564	goto out;
2565bad_swap:
2566	free_percpu(p->percpu_cluster);
2567	p->percpu_cluster = NULL;
2568	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
2569		set_blocksize(p->bdev, p->old_block_size);
2570		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2571	}
2572	destroy_swap_extents(p);
2573	swap_cgroup_swapoff(p->type);
2574	spin_lock(&swap_lock);
2575	p->swap_file = NULL;
2576	p->flags = 0;
2577	spin_unlock(&swap_lock);
2578	vfree(swap_map);
2579	vfree(cluster_info);
 
 
 
2580	if (swap_file) {
2581		if (inode && S_ISREG(inode->i_mode)) {
2582			inode_unlock(inode);
2583			inode = NULL;
2584		}
2585		filp_close(swap_file, NULL);
2586	}
2587out:
2588	if (page && !IS_ERR(page)) {
2589		kunmap(page);
2590		put_page(page);
2591	}
2592	if (name)
2593		putname(name);
2594	if (inode && S_ISREG(inode->i_mode))
2595		inode_unlock(inode);
 
 
2596	return error;
2597}
2598
2599void si_swapinfo(struct sysinfo *val)
2600{
2601	unsigned int type;
2602	unsigned long nr_to_be_unused = 0;
2603
2604	spin_lock(&swap_lock);
2605	for (type = 0; type < nr_swapfiles; type++) {
2606		struct swap_info_struct *si = swap_info[type];
2607
2608		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2609			nr_to_be_unused += si->inuse_pages;
2610	}
2611	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
2612	val->totalswap = total_swap_pages + nr_to_be_unused;
2613	spin_unlock(&swap_lock);
2614}
2615
2616/*
2617 * Verify that a swap entry is valid and increment its swap map count.
2618 *
2619 * Returns error code in following case.
2620 * - success -> 0
2621 * - swp_entry is invalid -> EINVAL
2622 * - swp_entry is migration entry -> EINVAL
2623 * - swap-cache reference is requested but there is already one. -> EEXIST
2624 * - swap-cache reference is requested but the entry is not used. -> ENOENT
2625 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2626 */
2627static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2628{
2629	struct swap_info_struct *p;
2630	unsigned long offset, type;
 
2631	unsigned char count;
2632	unsigned char has_cache;
2633	int err = -EINVAL;
2634
2635	if (non_swap_entry(entry))
 
2636		goto out;
2637
2638	type = swp_type(entry);
2639	if (type >= nr_swapfiles)
2640		goto bad_file;
2641	p = swap_info[type];
2642	offset = swp_offset(entry);
2643
2644	spin_lock(&p->lock);
2645	if (unlikely(offset >= p->max))
2646		goto unlock_out;
2647
2648	count = p->swap_map[offset];
2649
2650	/*
2651	 * swapin_readahead() doesn't check if a swap entry is valid, so the
2652	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
2653	 */
2654	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
2655		err = -ENOENT;
2656		goto unlock_out;
2657	}
2658
2659	has_cache = count & SWAP_HAS_CACHE;
2660	count &= ~SWAP_HAS_CACHE;
2661	err = 0;
2662
2663	if (usage == SWAP_HAS_CACHE) {
2664
2665		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
2666		if (!has_cache && count)
2667			has_cache = SWAP_HAS_CACHE;
2668		else if (has_cache)		/* someone else added cache */
2669			err = -EEXIST;
2670		else				/* no users remaining */
2671			err = -ENOENT;
2672
2673	} else if (count || has_cache) {
2674
2675		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2676			count += usage;
2677		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2678			err = -EINVAL;
2679		else if (swap_count_continued(p, offset, count))
2680			count = COUNT_CONTINUED;
2681		else
2682			err = -ENOMEM;
2683	} else
2684		err = -ENOENT;			/* unused swap entry */
2685
2686	p->swap_map[offset] = count | has_cache;
2687
2688unlock_out:
2689	spin_unlock(&p->lock);
2690out:
 
 
2691	return err;
2692
2693bad_file:
2694	pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
2695	goto out;
2696}
2697
2698/*
2699 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2700 * (in which case its reference count is never incremented).
2701 */
2702void swap_shmem_alloc(swp_entry_t entry)
2703{
2704	__swap_duplicate(entry, SWAP_MAP_SHMEM);
2705}
2706
2707/*
2708 * Increase reference count of swap entry by 1.
2709 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
2710 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
2711 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
2712 * might occur if a page table entry has got corrupted.
2713 */
2714int swap_duplicate(swp_entry_t entry)
2715{
2716	int err = 0;
2717
2718	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2719		err = add_swap_count_continuation(entry, GFP_ATOMIC);
2720	return err;
2721}
2722
2723/*
2724 * @entry: swap entry for which we allocate swap cache.
2725 *
2726 * Called when allocating swap cache for existing swap entry,
2727 * This can return error codes. Returns 0 at success.
2728 * -EBUSY means there is a swap cache.
2729 * Note: return code is different from swap_duplicate().
2730 */
2731int swapcache_prepare(swp_entry_t entry)
2732{
2733	return __swap_duplicate(entry, SWAP_HAS_CACHE);
2734}
2735
 
 
 
 
 
2736struct swap_info_struct *page_swap_info(struct page *page)
2737{
2738	swp_entry_t swap = { .val = page_private(page) };
2739	return swap_info[swp_type(swap)];
2740}
2741
2742/*
2743 * out-of-line __page_file_ methods to avoid include hell.
2744 */
2745struct address_space *__page_file_mapping(struct page *page)
2746{
2747	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
2748	return page_swap_info(page)->swap_file->f_mapping;
2749}
2750EXPORT_SYMBOL_GPL(__page_file_mapping);
2751
2752pgoff_t __page_file_index(struct page *page)
2753{
2754	swp_entry_t swap = { .val = page_private(page) };
2755	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
2756	return swp_offset(swap);
2757}
2758EXPORT_SYMBOL_GPL(__page_file_index);
2759
2760/*
2761 * add_swap_count_continuation - called when a swap count is duplicated
2762 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2763 * page of the original vmalloc'ed swap_map, to hold the continuation count
2764 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
2765 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2766 *
2767 * These continuation pages are seldom referenced: the common paths all work
2768 * on the original swap_map, only referring to a continuation page when the
2769 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2770 *
2771 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2772 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2773 * can be called after dropping locks.
2774 */
2775int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2776{
2777	struct swap_info_struct *si;
 
2778	struct page *head;
2779	struct page *page;
2780	struct page *list_page;
2781	pgoff_t offset;
2782	unsigned char count;
 
2783
2784	/*
2785	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
2786	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
2787	 */
2788	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2789
2790	si = swap_info_get(entry);
2791	if (!si) {
2792		/*
2793		 * An acceptable race has occurred since the failing
2794		 * __swap_duplicate(): the swap entry has been freed,
2795		 * perhaps even the whole swap_map cleared for swapoff.
2796		 */
2797		goto outer;
2798	}
 
2799
2800	offset = swp_offset(entry);
 
 
 
2801	count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
2802
2803	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2804		/*
2805		 * The higher the swap count, the more likely it is that tasks
2806		 * will race to add swap count continuation: we need to avoid
2807		 * over-provisioning.
2808		 */
2809		goto out;
2810	}
2811
2812	if (!page) {
2813		spin_unlock(&si->lock);
2814		return -ENOMEM;
2815	}
2816
2817	/*
2818	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2819	 * no architecture is using highmem pages for kernel page tables: so it
2820	 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
2821	 */
2822	head = vmalloc_to_page(si->swap_map + offset);
2823	offset &= ~PAGE_MASK;
2824
 
2825	/*
2826	 * Page allocation does not initialize the page's lru field,
2827	 * but it does always reset its private field.
2828	 */
2829	if (!page_private(head)) {
2830		BUG_ON(count & COUNT_CONTINUED);
2831		INIT_LIST_HEAD(&head->lru);
2832		set_page_private(head, SWP_CONTINUED);
2833		si->flags |= SWP_CONTINUED;
2834	}
2835
2836	list_for_each_entry(list_page, &head->lru, lru) {
2837		unsigned char *map;
2838
2839		/*
2840		 * If the previous map said no continuation, but we've found
2841		 * a continuation page, free our allocation and use this one.
2842		 */
2843		if (!(count & COUNT_CONTINUED))
2844			goto out;
2845
2846		map = kmap_atomic(list_page) + offset;
2847		count = *map;
2848		kunmap_atomic(map);
2849
2850		/*
2851		 * If this continuation count now has some space in it,
2852		 * free our allocation and use this one.
2853		 */
2854		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2855			goto out;
2856	}
2857
2858	list_add_tail(&page->lru, &head->lru);
2859	page = NULL;			/* now it's attached, don't free it */
 
 
2860out:
 
2861	spin_unlock(&si->lock);
 
2862outer:
2863	if (page)
2864		__free_page(page);
2865	return 0;
2866}
2867
2868/*
2869 * swap_count_continued - when the original swap_map count is incremented
2870 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2871 * into, carry if so, or else fail until a new continuation page is allocated;
2872 * when the original swap_map count is decremented from 0 with continuation,
2873 * borrow from the continuation and report whether it still holds more.
2874 * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
 
2875 */
2876static bool swap_count_continued(struct swap_info_struct *si,
2877				 pgoff_t offset, unsigned char count)
2878{
2879	struct page *head;
2880	struct page *page;
2881	unsigned char *map;
 
2882
2883	head = vmalloc_to_page(si->swap_map + offset);
2884	if (page_private(head) != SWP_CONTINUED) {
2885		BUG_ON(count & COUNT_CONTINUED);
2886		return false;		/* need to add count continuation */
2887	}
2888
 
2889	offset &= ~PAGE_MASK;
2890	page = list_entry(head->lru.next, struct page, lru);
2891	map = kmap_atomic(page) + offset;
2892
2893	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
2894		goto init_map;		/* jump over SWAP_CONT_MAX checks */
2895
2896	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2897		/*
2898		 * Think of how you add 1 to 999
2899		 */
2900		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2901			kunmap_atomic(map);
2902			page = list_entry(page->lru.next, struct page, lru);
2903			BUG_ON(page == head);
2904			map = kmap_atomic(page) + offset;
2905		}
2906		if (*map == SWAP_CONT_MAX) {
2907			kunmap_atomic(map);
2908			page = list_entry(page->lru.next, struct page, lru);
2909			if (page == head)
2910				return false;	/* add count continuation */
 
 
2911			map = kmap_atomic(page) + offset;
2912init_map:		*map = 0;		/* we didn't zero the page */
2913		}
2914		*map += 1;
2915		kunmap_atomic(map);
2916		page = list_entry(page->lru.prev, struct page, lru);
2917		while (page != head) {
2918			map = kmap_atomic(page) + offset;
2919			*map = COUNT_CONTINUED;
2920			kunmap_atomic(map);
2921			page = list_entry(page->lru.prev, struct page, lru);
2922		}
2923		return true;			/* incremented */
2924
2925	} else {				/* decrementing */
2926		/*
2927		 * Think of how you subtract 1 from 1000
2928		 */
2929		BUG_ON(count != COUNT_CONTINUED);
2930		while (*map == COUNT_CONTINUED) {
2931			kunmap_atomic(map);
2932			page = list_entry(page->lru.next, struct page, lru);
2933			BUG_ON(page == head);
2934			map = kmap_atomic(page) + offset;
2935		}
2936		BUG_ON(*map == 0);
2937		*map -= 1;
2938		if (*map == 0)
2939			count = 0;
2940		kunmap_atomic(map);
2941		page = list_entry(page->lru.prev, struct page, lru);
2942		while (page != head) {
2943			map = kmap_atomic(page) + offset;
2944			*map = SWAP_CONT_MAX | count;
2945			count = COUNT_CONTINUED;
2946			kunmap_atomic(map);
2947			page = list_entry(page->lru.prev, struct page, lru);
2948		}
2949		return count == COUNT_CONTINUED;
2950	}
 
 
 
2951}
2952
2953/*
2954 * free_swap_count_continuations - swapoff free all the continuation pages
2955 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2956 */
2957static void free_swap_count_continuations(struct swap_info_struct *si)
2958{
2959	pgoff_t offset;
2960
2961	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2962		struct page *head;
2963		head = vmalloc_to_page(si->swap_map + offset);
2964		if (page_private(head)) {
2965			struct page *page, *next;
2966
2967			list_for_each_entry_safe(page, next, &head->lru, lru) {
2968				list_del(&page->lru);
2969				__free_page(page);
2970			}
2971		}
2972	}
2973}