Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/swapfile.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *  Swap reorganised 29.12.95, Stephen Tweedie
   7 */
   8
 
   9#include <linux/mm.h>
  10#include <linux/sched/mm.h>
  11#include <linux/sched/task.h>
  12#include <linux/hugetlb.h>
  13#include <linux/mman.h>
  14#include <linux/slab.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/swap.h>
  17#include <linux/vmalloc.h>
  18#include <linux/pagemap.h>
  19#include <linux/namei.h>
  20#include <linux/shmem_fs.h>
  21#include <linux/blkdev.h>
  22#include <linux/random.h>
  23#include <linux/writeback.h>
  24#include <linux/proc_fs.h>
  25#include <linux/seq_file.h>
  26#include <linux/init.h>
  27#include <linux/ksm.h>
  28#include <linux/rmap.h>
  29#include <linux/security.h>
  30#include <linux/backing-dev.h>
  31#include <linux/mutex.h>
  32#include <linux/capability.h>
  33#include <linux/syscalls.h>
  34#include <linux/memcontrol.h>
  35#include <linux/poll.h>
  36#include <linux/oom.h>
  37#include <linux/frontswap.h>
  38#include <linux/swapfile.h>
  39#include <linux/export.h>
  40#include <linux/swap_slots.h>
  41#include <linux/sort.h>
  42#include <linux/completion.h>
  43
  44#include <asm/tlbflush.h>
  45#include <linux/swapops.h>
  46#include <linux/swap_cgroup.h>
 
  47
  48static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  49				 unsigned char);
  50static void free_swap_count_continuations(struct swap_info_struct *);
  51
  52DEFINE_SPINLOCK(swap_lock);
  53static unsigned int nr_swapfiles;
  54atomic_long_t nr_swap_pages;
  55/*
  56 * Some modules use swappable objects and may try to swap them out under
  57 * memory pressure (via the shrinker). Before doing so, they may wish to
  58 * check to see if any swap space is available.
  59 */
  60EXPORT_SYMBOL_GPL(nr_swap_pages);
  61/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  62long total_swap_pages;
  63static int least_priority = -1;
 
 
 
 
  64
  65static const char Bad_file[] = "Bad swap file entry ";
  66static const char Unused_file[] = "Unused swap file entry ";
  67static const char Bad_offset[] = "Bad swap offset entry ";
  68static const char Unused_offset[] = "Unused swap offset entry ";
  69
  70/*
  71 * all active swap_info_structs
  72 * protected with swap_lock, and ordered by priority.
  73 */
  74PLIST_HEAD(swap_active_head);
  75
  76/*
  77 * all available (active, not full) swap_info_structs
  78 * protected with swap_avail_lock, ordered by priority.
  79 * This is used by get_swap_page() instead of swap_active_head
  80 * because swap_active_head includes all swap_info_structs,
  81 * but get_swap_page() doesn't need to look at full ones.
  82 * This uses its own lock instead of swap_lock because when a
  83 * swap_info_struct changes between not-full/full, it needs to
  84 * add/remove itself to/from this list, but the swap_info_struct->lock
  85 * is held and the locking order requires swap_lock to be taken
  86 * before any swap_info_struct->lock.
  87 */
  88static struct plist_head *swap_avail_heads;
  89static DEFINE_SPINLOCK(swap_avail_lock);
  90
  91struct swap_info_struct *swap_info[MAX_SWAPFILES];
  92
  93static DEFINE_MUTEX(swapon_mutex);
  94
  95static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  96/* Activity counter to indicate that a swapon or swapoff has occurred */
  97static atomic_t proc_poll_event = ATOMIC_INIT(0);
  98
  99atomic_t nr_rotate_swap = ATOMIC_INIT(0);
 100
 101static struct swap_info_struct *swap_type_to_swap_info(int type)
 102{
 103	if (type >= MAX_SWAPFILES)
 104		return NULL;
 105
 106	return READ_ONCE(swap_info[type]); /* rcu_dereference() */
 107}
 108
 109static inline unsigned char swap_count(unsigned char ent)
 110{
 111	return ent & ~SWAP_HAS_CACHE;	/* may include COUNT_CONTINUED flag */
 112}
 113
 114/* Reclaim the swap entry anyway if possible */
 115#define TTRS_ANYWAY		0x1
 116/*
 117 * Reclaim the swap entry if there are no more mappings of the
 118 * corresponding page
 119 */
 120#define TTRS_UNMAPPED		0x2
 121/* Reclaim the swap entry if swap is getting full*/
 122#define TTRS_FULL		0x4
 123
 124/* returns 1 if swap entry is freed */
 125static int __try_to_reclaim_swap(struct swap_info_struct *si,
 126				 unsigned long offset, unsigned long flags)
 127{
 128	swp_entry_t entry = swp_entry(si->type, offset);
 129	struct page *page;
 130	int ret = 0;
 131
 132	page = find_get_page(swap_address_space(entry), offset);
 133	if (!page)
 134		return 0;
 135	/*
 136	 * When this function is called from scan_swap_map_slots() and it's
 137	 * called by vmscan.c at reclaiming pages. So, we hold a lock on a page,
 138	 * here. We have to use trylock for avoiding deadlock. This is a special
 139	 * case and you should use try_to_free_swap() with explicit lock_page()
 140	 * in usual operations.
 141	 */
 142	if (trylock_page(page)) {
 143		if ((flags & TTRS_ANYWAY) ||
 144		    ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
 145		    ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
 146			ret = try_to_free_swap(page);
 147		unlock_page(page);
 148	}
 149	put_page(page);
 150	return ret;
 151}
 152
 153static inline struct swap_extent *first_se(struct swap_info_struct *sis)
 154{
 155	struct rb_node *rb = rb_first(&sis->swap_extent_root);
 156	return rb_entry(rb, struct swap_extent, rb_node);
 157}
 158
 159static inline struct swap_extent *next_se(struct swap_extent *se)
 160{
 161	struct rb_node *rb = rb_next(&se->rb_node);
 162	return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
 163}
 164
 165/*
 166 * swapon tell device that all the old swap contents can be discarded,
 167 * to allow the swap device to optimize its wear-levelling.
 168 */
 169static int discard_swap(struct swap_info_struct *si)
 170{
 171	struct swap_extent *se;
 172	sector_t start_block;
 173	sector_t nr_blocks;
 174	int err = 0;
 175
 176	/* Do not discard the swap header page! */
 177	se = first_se(si);
 178	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 179	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 180	if (nr_blocks) {
 181		err = blkdev_issue_discard(si->bdev, start_block,
 182				nr_blocks, GFP_KERNEL, 0);
 183		if (err)
 184			return err;
 185		cond_resched();
 186	}
 187
 188	for (se = next_se(se); se; se = next_se(se)) {
 189		start_block = se->start_block << (PAGE_SHIFT - 9);
 190		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 191
 192		err = blkdev_issue_discard(si->bdev, start_block,
 193				nr_blocks, GFP_KERNEL, 0);
 194		if (err)
 195			break;
 196
 197		cond_resched();
 198	}
 199	return err;		/* That will often be -EOPNOTSUPP */
 200}
 201
 202static struct swap_extent *
 203offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
 204{
 205	struct swap_extent *se;
 206	struct rb_node *rb;
 207
 208	rb = sis->swap_extent_root.rb_node;
 209	while (rb) {
 210		se = rb_entry(rb, struct swap_extent, rb_node);
 211		if (offset < se->start_page)
 212			rb = rb->rb_left;
 213		else if (offset >= se->start_page + se->nr_pages)
 214			rb = rb->rb_right;
 215		else
 216			return se;
 217	}
 218	/* It *must* be present */
 219	BUG();
 220}
 221
 222sector_t swap_page_sector(struct page *page)
 223{
 224	struct swap_info_struct *sis = page_swap_info(page);
 225	struct swap_extent *se;
 226	sector_t sector;
 227	pgoff_t offset;
 228
 229	offset = __page_file_index(page);
 230	se = offset_to_swap_extent(sis, offset);
 231	sector = se->start_block + (offset - se->start_page);
 232	return sector << (PAGE_SHIFT - 9);
 233}
 234
 235/*
 236 * swap allocation tell device that a cluster of swap can now be discarded,
 237 * to allow the swap device to optimize its wear-levelling.
 238 */
 239static void discard_swap_cluster(struct swap_info_struct *si,
 240				 pgoff_t start_page, pgoff_t nr_pages)
 241{
 242	struct swap_extent *se = offset_to_swap_extent(si, start_page);
 243
 244	while (nr_pages) {
 245		pgoff_t offset = start_page - se->start_page;
 246		sector_t start_block = se->start_block + offset;
 247		sector_t nr_blocks = se->nr_pages - offset;
 248
 249		if (nr_blocks > nr_pages)
 250			nr_blocks = nr_pages;
 251		start_page += nr_blocks;
 252		nr_pages -= nr_blocks;
 253
 254		start_block <<= PAGE_SHIFT - 9;
 255		nr_blocks <<= PAGE_SHIFT - 9;
 256		if (blkdev_issue_discard(si->bdev, start_block,
 257					nr_blocks, GFP_NOIO, 0))
 258			break;
 259
 260		se = next_se(se);
 261	}
 262}
 263
 264#ifdef CONFIG_THP_SWAP
 265#define SWAPFILE_CLUSTER	HPAGE_PMD_NR
 266
 267#define swap_entry_size(size)	(size)
 268#else
 269#define SWAPFILE_CLUSTER	256
 270
 271/*
 272 * Define swap_entry_size() as constant to let compiler to optimize
 273 * out some code if !CONFIG_THP_SWAP
 274 */
 275#define swap_entry_size(size)	1
 276#endif
 277#define LATENCY_LIMIT		256
 278
 279static inline void cluster_set_flag(struct swap_cluster_info *info,
 280	unsigned int flag)
 281{
 282	info->flags = flag;
 283}
 284
 285static inline unsigned int cluster_count(struct swap_cluster_info *info)
 286{
 287	return info->data;
 288}
 289
 290static inline void cluster_set_count(struct swap_cluster_info *info,
 291				     unsigned int c)
 292{
 293	info->data = c;
 294}
 295
 296static inline void cluster_set_count_flag(struct swap_cluster_info *info,
 297					 unsigned int c, unsigned int f)
 298{
 299	info->flags = f;
 300	info->data = c;
 301}
 302
 303static inline unsigned int cluster_next(struct swap_cluster_info *info)
 304{
 305	return info->data;
 306}
 307
 308static inline void cluster_set_next(struct swap_cluster_info *info,
 309				    unsigned int n)
 310{
 311	info->data = n;
 312}
 313
 314static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 315					 unsigned int n, unsigned int f)
 316{
 317	info->flags = f;
 318	info->data = n;
 319}
 320
 321static inline bool cluster_is_free(struct swap_cluster_info *info)
 322{
 323	return info->flags & CLUSTER_FLAG_FREE;
 324}
 325
 326static inline bool cluster_is_null(struct swap_cluster_info *info)
 327{
 328	return info->flags & CLUSTER_FLAG_NEXT_NULL;
 329}
 330
 331static inline void cluster_set_null(struct swap_cluster_info *info)
 332{
 333	info->flags = CLUSTER_FLAG_NEXT_NULL;
 334	info->data = 0;
 335}
 336
 337static inline bool cluster_is_huge(struct swap_cluster_info *info)
 338{
 339	if (IS_ENABLED(CONFIG_THP_SWAP))
 340		return info->flags & CLUSTER_FLAG_HUGE;
 341	return false;
 342}
 343
 344static inline void cluster_clear_huge(struct swap_cluster_info *info)
 345{
 346	info->flags &= ~CLUSTER_FLAG_HUGE;
 347}
 348
 349static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
 350						     unsigned long offset)
 351{
 352	struct swap_cluster_info *ci;
 353
 354	ci = si->cluster_info;
 355	if (ci) {
 356		ci += offset / SWAPFILE_CLUSTER;
 357		spin_lock(&ci->lock);
 358	}
 359	return ci;
 360}
 361
 362static inline void unlock_cluster(struct swap_cluster_info *ci)
 363{
 364	if (ci)
 365		spin_unlock(&ci->lock);
 366}
 367
 368/*
 369 * Determine the locking method in use for this device.  Return
 370 * swap_cluster_info if SSD-style cluster-based locking is in place.
 371 */
 372static inline struct swap_cluster_info *lock_cluster_or_swap_info(
 373		struct swap_info_struct *si, unsigned long offset)
 374{
 375	struct swap_cluster_info *ci;
 376
 377	/* Try to use fine-grained SSD-style locking if available: */
 378	ci = lock_cluster(si, offset);
 379	/* Otherwise, fall back to traditional, coarse locking: */
 380	if (!ci)
 381		spin_lock(&si->lock);
 382
 383	return ci;
 384}
 385
 386static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
 387					       struct swap_cluster_info *ci)
 388{
 389	if (ci)
 390		unlock_cluster(ci);
 391	else
 392		spin_unlock(&si->lock);
 393}
 394
 395static inline bool cluster_list_empty(struct swap_cluster_list *list)
 396{
 397	return cluster_is_null(&list->head);
 398}
 399
 400static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
 401{
 402	return cluster_next(&list->head);
 403}
 404
 405static void cluster_list_init(struct swap_cluster_list *list)
 406{
 407	cluster_set_null(&list->head);
 408	cluster_set_null(&list->tail);
 409}
 410
 411static void cluster_list_add_tail(struct swap_cluster_list *list,
 412				  struct swap_cluster_info *ci,
 413				  unsigned int idx)
 414{
 415	if (cluster_list_empty(list)) {
 416		cluster_set_next_flag(&list->head, idx, 0);
 417		cluster_set_next_flag(&list->tail, idx, 0);
 418	} else {
 419		struct swap_cluster_info *ci_tail;
 420		unsigned int tail = cluster_next(&list->tail);
 421
 422		/*
 423		 * Nested cluster lock, but both cluster locks are
 424		 * only acquired when we held swap_info_struct->lock
 425		 */
 426		ci_tail = ci + tail;
 427		spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
 428		cluster_set_next(ci_tail, idx);
 429		spin_unlock(&ci_tail->lock);
 430		cluster_set_next_flag(&list->tail, idx, 0);
 431	}
 432}
 433
 434static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
 435					   struct swap_cluster_info *ci)
 436{
 437	unsigned int idx;
 438
 439	idx = cluster_next(&list->head);
 440	if (cluster_next(&list->tail) == idx) {
 441		cluster_set_null(&list->head);
 442		cluster_set_null(&list->tail);
 443	} else
 444		cluster_set_next_flag(&list->head,
 445				      cluster_next(&ci[idx]), 0);
 446
 447	return idx;
 448}
 449
 450/* Add a cluster to discard list and schedule it to do discard */
 451static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 452		unsigned int idx)
 453{
 454	/*
 455	 * If scan_swap_map_slots() can't find a free cluster, it will check
 456	 * si->swap_map directly. To make sure the discarding cluster isn't
 457	 * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
 458	 * It will be cleared after discard
 459	 */
 460	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 461			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 462
 463	cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
 464
 465	schedule_work(&si->discard_work);
 466}
 467
 468static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
 469{
 470	struct swap_cluster_info *ci = si->cluster_info;
 471
 472	cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
 473	cluster_list_add_tail(&si->free_clusters, ci, idx);
 474}
 475
 476/*
 477 * Doing discard actually. After a cluster discard is finished, the cluster
 478 * will be added to free cluster list. caller should hold si->lock.
 479*/
 480static void swap_do_scheduled_discard(struct swap_info_struct *si)
 481{
 482	struct swap_cluster_info *info, *ci;
 483	unsigned int idx;
 484
 485	info = si->cluster_info;
 486
 487	while (!cluster_list_empty(&si->discard_clusters)) {
 488		idx = cluster_list_del_first(&si->discard_clusters, info);
 489		spin_unlock(&si->lock);
 490
 491		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 492				SWAPFILE_CLUSTER);
 493
 494		spin_lock(&si->lock);
 495		ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
 496		__free_cluster(si, idx);
 497		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 498				0, SWAPFILE_CLUSTER);
 499		unlock_cluster(ci);
 500	}
 501}
 502
 503static void swap_discard_work(struct work_struct *work)
 504{
 505	struct swap_info_struct *si;
 506
 507	si = container_of(work, struct swap_info_struct, discard_work);
 508
 509	spin_lock(&si->lock);
 510	swap_do_scheduled_discard(si);
 511	spin_unlock(&si->lock);
 512}
 513
 514static void swap_users_ref_free(struct percpu_ref *ref)
 515{
 516	struct swap_info_struct *si;
 517
 518	si = container_of(ref, struct swap_info_struct, users);
 519	complete(&si->comp);
 520}
 521
 522static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
 523{
 524	struct swap_cluster_info *ci = si->cluster_info;
 525
 526	VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
 527	cluster_list_del_first(&si->free_clusters, ci);
 528	cluster_set_count_flag(ci + idx, 0, 0);
 529}
 530
 531static void free_cluster(struct swap_info_struct *si, unsigned long idx)
 532{
 533	struct swap_cluster_info *ci = si->cluster_info + idx;
 534
 535	VM_BUG_ON(cluster_count(ci) != 0);
 536	/*
 537	 * If the swap is discardable, prepare discard the cluster
 538	 * instead of free it immediately. The cluster will be freed
 539	 * after discard.
 540	 */
 541	if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 542	    (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 543		swap_cluster_schedule_discard(si, idx);
 544		return;
 545	}
 546
 547	__free_cluster(si, idx);
 548}
 549
 550/*
 551 * The cluster corresponding to page_nr will be used. The cluster will be
 552 * removed from free cluster list and its usage counter will be increased.
 553 */
 554static void inc_cluster_info_page(struct swap_info_struct *p,
 555	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 556{
 557	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 558
 559	if (!cluster_info)
 560		return;
 561	if (cluster_is_free(&cluster_info[idx]))
 562		alloc_cluster(p, idx);
 563
 564	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
 565	cluster_set_count(&cluster_info[idx],
 566		cluster_count(&cluster_info[idx]) + 1);
 567}
 568
 569/*
 570 * The cluster corresponding to page_nr decreases one usage. If the usage
 571 * counter becomes 0, which means no page in the cluster is in using, we can
 572 * optionally discard the cluster and add it to free cluster list.
 573 */
 574static void dec_cluster_info_page(struct swap_info_struct *p,
 575	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 576{
 577	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 578
 579	if (!cluster_info)
 580		return;
 581
 582	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
 583	cluster_set_count(&cluster_info[idx],
 584		cluster_count(&cluster_info[idx]) - 1);
 585
 586	if (cluster_count(&cluster_info[idx]) == 0)
 587		free_cluster(p, idx);
 588}
 589
 590/*
 591 * It's possible scan_swap_map_slots() uses a free cluster in the middle of free
 592 * cluster list. Avoiding such abuse to avoid list corruption.
 593 */
 594static bool
 595scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
 596	unsigned long offset)
 597{
 598	struct percpu_cluster *percpu_cluster;
 599	bool conflict;
 600
 601	offset /= SWAPFILE_CLUSTER;
 602	conflict = !cluster_list_empty(&si->free_clusters) &&
 603		offset != cluster_list_first(&si->free_clusters) &&
 604		cluster_is_free(&si->cluster_info[offset]);
 605
 606	if (!conflict)
 607		return false;
 608
 609	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
 610	cluster_set_null(&percpu_cluster->index);
 611	return true;
 612}
 613
 614/*
 615 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
 616 * might involve allocating a new cluster for current CPU too.
 617 */
 618static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
 619	unsigned long *offset, unsigned long *scan_base)
 620{
 621	struct percpu_cluster *cluster;
 622	struct swap_cluster_info *ci;
 623	unsigned long tmp, max;
 624
 625new_cluster:
 626	cluster = this_cpu_ptr(si->percpu_cluster);
 627	if (cluster_is_null(&cluster->index)) {
 628		if (!cluster_list_empty(&si->free_clusters)) {
 629			cluster->index = si->free_clusters.head;
 630			cluster->next = cluster_next(&cluster->index) *
 631					SWAPFILE_CLUSTER;
 632		} else if (!cluster_list_empty(&si->discard_clusters)) {
 633			/*
 634			 * we don't have free cluster but have some clusters in
 635			 * discarding, do discard now and reclaim them, then
 636			 * reread cluster_next_cpu since we dropped si->lock
 637			 */
 638			swap_do_scheduled_discard(si);
 639			*scan_base = this_cpu_read(*si->cluster_next_cpu);
 640			*offset = *scan_base;
 641			goto new_cluster;
 642		} else
 643			return false;
 644	}
 645
 646	/*
 647	 * Other CPUs can use our cluster if they can't find a free cluster,
 648	 * check if there is still free entry in the cluster
 649	 */
 650	tmp = cluster->next;
 651	max = min_t(unsigned long, si->max,
 652		    (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
 653	if (tmp < max) {
 654		ci = lock_cluster(si, tmp);
 655		while (tmp < max) {
 656			if (!si->swap_map[tmp])
 657				break;
 658			tmp++;
 659		}
 660		unlock_cluster(ci);
 661	}
 662	if (tmp >= max) {
 663		cluster_set_null(&cluster->index);
 664		goto new_cluster;
 665	}
 666	cluster->next = tmp + 1;
 667	*offset = tmp;
 668	*scan_base = tmp;
 669	return true;
 670}
 671
 672static void __del_from_avail_list(struct swap_info_struct *p)
 673{
 674	int nid;
 675
 676	for_each_node(nid)
 677		plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
 678}
 679
 680static void del_from_avail_list(struct swap_info_struct *p)
 681{
 682	spin_lock(&swap_avail_lock);
 683	__del_from_avail_list(p);
 684	spin_unlock(&swap_avail_lock);
 685}
 686
 687static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
 688			     unsigned int nr_entries)
 689{
 690	unsigned int end = offset + nr_entries - 1;
 691
 692	if (offset == si->lowest_bit)
 693		si->lowest_bit += nr_entries;
 694	if (end == si->highest_bit)
 695		WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
 696	si->inuse_pages += nr_entries;
 697	if (si->inuse_pages == si->pages) {
 698		si->lowest_bit = si->max;
 699		si->highest_bit = 0;
 700		del_from_avail_list(si);
 701	}
 702}
 703
 704static void add_to_avail_list(struct swap_info_struct *p)
 705{
 706	int nid;
 707
 708	spin_lock(&swap_avail_lock);
 709	for_each_node(nid) {
 710		WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
 711		plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
 712	}
 713	spin_unlock(&swap_avail_lock);
 714}
 715
 716static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
 717			    unsigned int nr_entries)
 718{
 719	unsigned long begin = offset;
 720	unsigned long end = offset + nr_entries - 1;
 721	void (*swap_slot_free_notify)(struct block_device *, unsigned long);
 722
 723	if (offset < si->lowest_bit)
 724		si->lowest_bit = offset;
 725	if (end > si->highest_bit) {
 726		bool was_full = !si->highest_bit;
 727
 728		WRITE_ONCE(si->highest_bit, end);
 729		if (was_full && (si->flags & SWP_WRITEOK))
 730			add_to_avail_list(si);
 731	}
 732	atomic_long_add(nr_entries, &nr_swap_pages);
 733	si->inuse_pages -= nr_entries;
 734	if (si->flags & SWP_BLKDEV)
 735		swap_slot_free_notify =
 736			si->bdev->bd_disk->fops->swap_slot_free_notify;
 737	else
 738		swap_slot_free_notify = NULL;
 739	while (offset <= end) {
 740		arch_swap_invalidate_page(si->type, offset);
 741		frontswap_invalidate_page(si->type, offset);
 742		if (swap_slot_free_notify)
 743			swap_slot_free_notify(si->bdev, offset);
 744		offset++;
 745	}
 746	clear_shadow_from_swap_cache(si->type, begin, end);
 747}
 748
 749static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
 750{
 751	unsigned long prev;
 752
 753	if (!(si->flags & SWP_SOLIDSTATE)) {
 754		si->cluster_next = next;
 755		return;
 756	}
 757
 758	prev = this_cpu_read(*si->cluster_next_cpu);
 759	/*
 760	 * Cross the swap address space size aligned trunk, choose
 761	 * another trunk randomly to avoid lock contention on swap
 762	 * address space if possible.
 763	 */
 764	if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) !=
 765	    (next >> SWAP_ADDRESS_SPACE_SHIFT)) {
 766		/* No free swap slots available */
 767		if (si->highest_bit <= si->lowest_bit)
 768			return;
 769		next = si->lowest_bit +
 770			prandom_u32_max(si->highest_bit - si->lowest_bit + 1);
 771		next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
 772		next = max_t(unsigned int, next, si->lowest_bit);
 773	}
 774	this_cpu_write(*si->cluster_next_cpu, next);
 775}
 776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 777static int scan_swap_map_slots(struct swap_info_struct *si,
 778			       unsigned char usage, int nr,
 779			       swp_entry_t slots[])
 780{
 781	struct swap_cluster_info *ci;
 782	unsigned long offset;
 783	unsigned long scan_base;
 784	unsigned long last_in_cluster = 0;
 785	int latency_ration = LATENCY_LIMIT;
 786	int n_ret = 0;
 787	bool scanned_many = false;
 788
 789	/*
 790	 * We try to cluster swap pages by allocating them sequentially
 791	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 792	 * way, however, we resort to first-free allocation, starting
 793	 * a new cluster.  This prevents us from scattering swap pages
 794	 * all over the entire swap partition, so that we reduce
 795	 * overall disk seek times between swap pages.  -- sct
 796	 * But we do now try to find an empty cluster.  -Andrea
 797	 * And we let swap pages go all over an SSD partition.  Hugh
 798	 */
 799
 800	si->flags += SWP_SCANNING;
 801	/*
 802	 * Use percpu scan base for SSD to reduce lock contention on
 803	 * cluster and swap cache.  For HDD, sequential access is more
 804	 * important.
 805	 */
 806	if (si->flags & SWP_SOLIDSTATE)
 807		scan_base = this_cpu_read(*si->cluster_next_cpu);
 808	else
 809		scan_base = si->cluster_next;
 810	offset = scan_base;
 811
 812	/* SSD algorithm */
 813	if (si->cluster_info) {
 814		if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 815			goto scan;
 816	} else if (unlikely(!si->cluster_nr--)) {
 817		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 818			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 819			goto checks;
 820		}
 821
 822		spin_unlock(&si->lock);
 823
 824		/*
 825		 * If seek is expensive, start searching for new cluster from
 826		 * start of partition, to minimize the span of allocated swap.
 827		 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
 828		 * case, just handled by scan_swap_map_try_ssd_cluster() above.
 829		 */
 830		scan_base = offset = si->lowest_bit;
 831		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 832
 833		/* Locate the first empty (unaligned) cluster */
 834		for (; last_in_cluster <= si->highest_bit; offset++) {
 835			if (si->swap_map[offset])
 836				last_in_cluster = offset + SWAPFILE_CLUSTER;
 837			else if (offset == last_in_cluster) {
 838				spin_lock(&si->lock);
 839				offset -= SWAPFILE_CLUSTER - 1;
 840				si->cluster_next = offset;
 841				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 842				goto checks;
 843			}
 844			if (unlikely(--latency_ration < 0)) {
 845				cond_resched();
 846				latency_ration = LATENCY_LIMIT;
 847			}
 848		}
 849
 850		offset = scan_base;
 851		spin_lock(&si->lock);
 852		si->cluster_nr = SWAPFILE_CLUSTER - 1;
 853	}
 854
 855checks:
 856	if (si->cluster_info) {
 857		while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
 858		/* take a break if we already got some slots */
 859			if (n_ret)
 860				goto done;
 861			if (!scan_swap_map_try_ssd_cluster(si, &offset,
 862							&scan_base))
 863				goto scan;
 864		}
 865	}
 866	if (!(si->flags & SWP_WRITEOK))
 867		goto no_page;
 868	if (!si->highest_bit)
 869		goto no_page;
 870	if (offset > si->highest_bit)
 871		scan_base = offset = si->lowest_bit;
 872
 873	ci = lock_cluster(si, offset);
 874	/* reuse swap entry of cache-only swap if not busy. */
 875	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 876		int swap_was_freed;
 877		unlock_cluster(ci);
 878		spin_unlock(&si->lock);
 879		swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
 880		spin_lock(&si->lock);
 881		/* entry was freed successfully, try to use this again */
 882		if (swap_was_freed)
 883			goto checks;
 884		goto scan; /* check next one */
 885	}
 886
 887	if (si->swap_map[offset]) {
 888		unlock_cluster(ci);
 889		if (!n_ret)
 890			goto scan;
 891		else
 892			goto done;
 893	}
 894	WRITE_ONCE(si->swap_map[offset], usage);
 895	inc_cluster_info_page(si, si->cluster_info, offset);
 896	unlock_cluster(ci);
 897
 898	swap_range_alloc(si, offset, 1);
 899	slots[n_ret++] = swp_entry(si->type, offset);
 900
 901	/* got enough slots or reach max slots? */
 902	if ((n_ret == nr) || (offset >= si->highest_bit))
 903		goto done;
 904
 905	/* search for next available slot */
 906
 907	/* time to take a break? */
 908	if (unlikely(--latency_ration < 0)) {
 909		if (n_ret)
 910			goto done;
 911		spin_unlock(&si->lock);
 912		cond_resched();
 913		spin_lock(&si->lock);
 914		latency_ration = LATENCY_LIMIT;
 915	}
 916
 917	/* try to get more slots in cluster */
 918	if (si->cluster_info) {
 919		if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 920			goto checks;
 921	} else if (si->cluster_nr && !si->swap_map[++offset]) {
 922		/* non-ssd case, still more slots in cluster? */
 923		--si->cluster_nr;
 924		goto checks;
 925	}
 926
 927	/*
 928	 * Even if there's no free clusters available (fragmented),
 929	 * try to scan a little more quickly with lock held unless we
 930	 * have scanned too many slots already.
 931	 */
 932	if (!scanned_many) {
 933		unsigned long scan_limit;
 934
 935		if (offset < scan_base)
 936			scan_limit = scan_base;
 937		else
 938			scan_limit = si->highest_bit;
 939		for (; offset <= scan_limit && --latency_ration > 0;
 940		     offset++) {
 941			if (!si->swap_map[offset])
 942				goto checks;
 943		}
 944	}
 945
 946done:
 947	set_cluster_next(si, offset + 1);
 948	si->flags -= SWP_SCANNING;
 949	return n_ret;
 950
 951scan:
 952	spin_unlock(&si->lock);
 953	while (++offset <= READ_ONCE(si->highest_bit)) {
 954		if (data_race(!si->swap_map[offset])) {
 955			spin_lock(&si->lock);
 956			goto checks;
 957		}
 958		if (vm_swap_full() &&
 959		    READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
 960			spin_lock(&si->lock);
 961			goto checks;
 962		}
 963		if (unlikely(--latency_ration < 0)) {
 964			cond_resched();
 965			latency_ration = LATENCY_LIMIT;
 966			scanned_many = true;
 967		}
 
 
 968	}
 969	offset = si->lowest_bit;
 970	while (offset < scan_base) {
 971		if (data_race(!si->swap_map[offset])) {
 972			spin_lock(&si->lock);
 973			goto checks;
 974		}
 975		if (vm_swap_full() &&
 976		    READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
 977			spin_lock(&si->lock);
 978			goto checks;
 979		}
 980		if (unlikely(--latency_ration < 0)) {
 981			cond_resched();
 982			latency_ration = LATENCY_LIMIT;
 983			scanned_many = true;
 984		}
 
 
 985		offset++;
 986	}
 987	spin_lock(&si->lock);
 988
 989no_page:
 990	si->flags -= SWP_SCANNING;
 991	return n_ret;
 992}
 993
 994static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
 995{
 996	unsigned long idx;
 997	struct swap_cluster_info *ci;
 998	unsigned long offset;
 999
1000	/*
1001	 * Should not even be attempting cluster allocations when huge
1002	 * page swap is disabled.  Warn and fail the allocation.
1003	 */
1004	if (!IS_ENABLED(CONFIG_THP_SWAP)) {
1005		VM_WARN_ON_ONCE(1);
1006		return 0;
1007	}
1008
1009	if (cluster_list_empty(&si->free_clusters))
1010		return 0;
1011
1012	idx = cluster_list_first(&si->free_clusters);
1013	offset = idx * SWAPFILE_CLUSTER;
1014	ci = lock_cluster(si, offset);
1015	alloc_cluster(si, idx);
1016	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
1017
1018	memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
1019	unlock_cluster(ci);
1020	swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
1021	*slot = swp_entry(si->type, offset);
1022
1023	return 1;
1024}
1025
1026static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
1027{
1028	unsigned long offset = idx * SWAPFILE_CLUSTER;
1029	struct swap_cluster_info *ci;
1030
1031	ci = lock_cluster(si, offset);
1032	memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
1033	cluster_set_count_flag(ci, 0, 0);
1034	free_cluster(si, idx);
1035	unlock_cluster(ci);
1036	swap_range_free(si, offset, SWAPFILE_CLUSTER);
1037}
1038
1039int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
1040{
1041	unsigned long size = swap_entry_size(entry_size);
1042	struct swap_info_struct *si, *next;
1043	long avail_pgs;
1044	int n_ret = 0;
1045	int node;
1046
1047	/* Only single cluster request supported */
1048	WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
1049
1050	spin_lock(&swap_avail_lock);
1051
1052	avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1053	if (avail_pgs <= 0) {
1054		spin_unlock(&swap_avail_lock);
1055		goto noswap;
1056	}
1057
1058	n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
1059
1060	atomic_long_sub(n_goal * size, &nr_swap_pages);
1061
1062start_over:
1063	node = numa_node_id();
1064	plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1065		/* requeue si to after same-priority siblings */
1066		plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1067		spin_unlock(&swap_avail_lock);
1068		spin_lock(&si->lock);
1069		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1070			spin_lock(&swap_avail_lock);
1071			if (plist_node_empty(&si->avail_lists[node])) {
1072				spin_unlock(&si->lock);
1073				goto nextsi;
1074			}
1075			WARN(!si->highest_bit,
1076			     "swap_info %d in list but !highest_bit\n",
1077			     si->type);
1078			WARN(!(si->flags & SWP_WRITEOK),
1079			     "swap_info %d in list but !SWP_WRITEOK\n",
1080			     si->type);
1081			__del_from_avail_list(si);
1082			spin_unlock(&si->lock);
1083			goto nextsi;
1084		}
1085		if (size == SWAPFILE_CLUSTER) {
1086			if (si->flags & SWP_BLKDEV)
1087				n_ret = swap_alloc_cluster(si, swp_entries);
1088		} else
1089			n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1090						    n_goal, swp_entries);
1091		spin_unlock(&si->lock);
1092		if (n_ret || size == SWAPFILE_CLUSTER)
1093			goto check_out;
1094		pr_debug("scan_swap_map of si %d failed to find offset\n",
1095			si->type);
 
1096
1097		spin_lock(&swap_avail_lock);
1098nextsi:
1099		/*
1100		 * if we got here, it's likely that si was almost full before,
1101		 * and since scan_swap_map_slots() can drop the si->lock,
1102		 * multiple callers probably all tried to get a page from the
1103		 * same si and it filled up before we could get one; or, the si
1104		 * filled up between us dropping swap_avail_lock and taking
1105		 * si->lock. Since we dropped the swap_avail_lock, the
1106		 * swap_avail_head list may have been modified; so if next is
1107		 * still in the swap_avail_head list then try it, otherwise
1108		 * start over if we have not gotten any slots.
1109		 */
1110		if (plist_node_empty(&next->avail_lists[node]))
1111			goto start_over;
1112	}
1113
1114	spin_unlock(&swap_avail_lock);
1115
1116check_out:
1117	if (n_ret < n_goal)
1118		atomic_long_add((long)(n_goal - n_ret) * size,
1119				&nr_swap_pages);
1120noswap:
1121	return n_ret;
1122}
1123
1124static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
1125{
1126	struct swap_info_struct *p;
1127	unsigned long offset;
1128
1129	if (!entry.val)
1130		goto out;
1131	p = swp_swap_info(entry);
1132	if (!p)
1133		goto bad_nofile;
1134	if (data_race(!(p->flags & SWP_USED)))
1135		goto bad_device;
1136	offset = swp_offset(entry);
1137	if (offset >= p->max)
1138		goto bad_offset;
 
 
1139	return p;
1140
 
 
 
1141bad_offset:
1142	pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1143	goto out;
1144bad_device:
1145	pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1146	goto out;
1147bad_nofile:
1148	pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1149out:
1150	return NULL;
1151}
1152
1153static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1154{
1155	struct swap_info_struct *p;
1156
1157	p = __swap_info_get(entry);
1158	if (!p)
1159		goto out;
1160	if (data_race(!p->swap_map[swp_offset(entry)]))
1161		goto bad_free;
1162	return p;
1163
1164bad_free:
1165	pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1166out:
1167	return NULL;
1168}
1169
1170static struct swap_info_struct *swap_info_get(swp_entry_t entry)
1171{
1172	struct swap_info_struct *p;
1173
1174	p = _swap_info_get(entry);
1175	if (p)
1176		spin_lock(&p->lock);
1177	return p;
1178}
1179
1180static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1181					struct swap_info_struct *q)
1182{
1183	struct swap_info_struct *p;
1184
1185	p = _swap_info_get(entry);
1186
1187	if (p != q) {
1188		if (q != NULL)
1189			spin_unlock(&q->lock);
1190		if (p != NULL)
1191			spin_lock(&p->lock);
1192	}
1193	return p;
1194}
1195
1196static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
1197					      unsigned long offset,
1198					      unsigned char usage)
1199{
1200	unsigned char count;
1201	unsigned char has_cache;
1202
1203	count = p->swap_map[offset];
1204
1205	has_cache = count & SWAP_HAS_CACHE;
1206	count &= ~SWAP_HAS_CACHE;
1207
1208	if (usage == SWAP_HAS_CACHE) {
1209		VM_BUG_ON(!has_cache);
1210		has_cache = 0;
1211	} else if (count == SWAP_MAP_SHMEM) {
1212		/*
1213		 * Or we could insist on shmem.c using a special
1214		 * swap_shmem_free() and free_shmem_swap_and_cache()...
1215		 */
1216		count = 0;
1217	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1218		if (count == COUNT_CONTINUED) {
1219			if (swap_count_continued(p, offset, count))
1220				count = SWAP_MAP_MAX | COUNT_CONTINUED;
1221			else
1222				count = SWAP_MAP_MAX;
1223		} else
1224			count--;
1225	}
1226
1227	usage = count | has_cache;
1228	if (usage)
1229		WRITE_ONCE(p->swap_map[offset], usage);
1230	else
1231		WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE);
1232
1233	return usage;
1234}
1235
1236/*
1237 * Check whether swap entry is valid in the swap device.  If so,
1238 * return pointer to swap_info_struct, and keep the swap entry valid
1239 * via preventing the swap device from being swapoff, until
1240 * put_swap_device() is called.  Otherwise return NULL.
1241 *
1242 * Notice that swapoff or swapoff+swapon can still happen before the
1243 * percpu_ref_tryget_live() in get_swap_device() or after the
1244 * percpu_ref_put() in put_swap_device() if there isn't any other way
1245 * to prevent swapoff, such as page lock, page table lock, etc.  The
1246 * caller must be prepared for that.  For example, the following
1247 * situation is possible.
1248 *
1249 *   CPU1				CPU2
1250 *   do_swap_page()
1251 *     ...				swapoff+swapon
1252 *     __read_swap_cache_async()
1253 *       swapcache_prepare()
1254 *         __swap_duplicate()
1255 *           // check swap_map
1256 *     // verify PTE not changed
1257 *
1258 * In __swap_duplicate(), the swap_map need to be checked before
1259 * changing partly because the specified swap entry may be for another
1260 * swap device which has been swapoff.  And in do_swap_page(), after
1261 * the page is read from the swap device, the PTE is verified not
1262 * changed with the page table locked to check whether the swap device
1263 * has been swapoff or swapoff+swapon.
1264 */
1265struct swap_info_struct *get_swap_device(swp_entry_t entry)
1266{
1267	struct swap_info_struct *si;
1268	unsigned long offset;
1269
1270	if (!entry.val)
1271		goto out;
1272	si = swp_swap_info(entry);
1273	if (!si)
1274		goto bad_nofile;
1275	if (!percpu_ref_tryget_live(&si->users))
1276		goto out;
1277	/*
1278	 * Guarantee the si->users are checked before accessing other
1279	 * fields of swap_info_struct.
1280	 *
1281	 * Paired with the spin_unlock() after setup_swap_info() in
1282	 * enable_swap_info().
1283	 */
1284	smp_rmb();
1285	offset = swp_offset(entry);
1286	if (offset >= si->max)
1287		goto put_out;
1288
1289	return si;
1290bad_nofile:
1291	pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1292out:
1293	return NULL;
1294put_out:
 
1295	percpu_ref_put(&si->users);
1296	return NULL;
1297}
1298
1299static unsigned char __swap_entry_free(struct swap_info_struct *p,
1300				       swp_entry_t entry)
1301{
1302	struct swap_cluster_info *ci;
1303	unsigned long offset = swp_offset(entry);
1304	unsigned char usage;
1305
1306	ci = lock_cluster_or_swap_info(p, offset);
1307	usage = __swap_entry_free_locked(p, offset, 1);
1308	unlock_cluster_or_swap_info(p, ci);
1309	if (!usage)
1310		free_swap_slot(entry);
1311
1312	return usage;
1313}
1314
1315static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1316{
1317	struct swap_cluster_info *ci;
1318	unsigned long offset = swp_offset(entry);
1319	unsigned char count;
1320
1321	ci = lock_cluster(p, offset);
1322	count = p->swap_map[offset];
1323	VM_BUG_ON(count != SWAP_HAS_CACHE);
1324	p->swap_map[offset] = 0;
1325	dec_cluster_info_page(p, p->cluster_info, offset);
1326	unlock_cluster(ci);
1327
1328	mem_cgroup_uncharge_swap(entry, 1);
1329	swap_range_free(p, offset, 1);
1330}
1331
1332/*
1333 * Caller has made sure that the swap device corresponding to entry
1334 * is still around or has not been recycled.
1335 */
1336void swap_free(swp_entry_t entry)
1337{
1338	struct swap_info_struct *p;
1339
1340	p = _swap_info_get(entry);
1341	if (p)
1342		__swap_entry_free(p, entry);
1343}
1344
1345/*
1346 * Called after dropping swapcache to decrease refcnt to swap entries.
1347 */
1348void put_swap_page(struct page *page, swp_entry_t entry)
1349{
1350	unsigned long offset = swp_offset(entry);
1351	unsigned long idx = offset / SWAPFILE_CLUSTER;
1352	struct swap_cluster_info *ci;
1353	struct swap_info_struct *si;
1354	unsigned char *map;
1355	unsigned int i, free_entries = 0;
1356	unsigned char val;
1357	int size = swap_entry_size(thp_nr_pages(page));
1358
1359	si = _swap_info_get(entry);
1360	if (!si)
1361		return;
1362
1363	ci = lock_cluster_or_swap_info(si, offset);
1364	if (size == SWAPFILE_CLUSTER) {
1365		VM_BUG_ON(!cluster_is_huge(ci));
1366		map = si->swap_map + offset;
1367		for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1368			val = map[i];
1369			VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1370			if (val == SWAP_HAS_CACHE)
1371				free_entries++;
1372		}
1373		cluster_clear_huge(ci);
1374		if (free_entries == SWAPFILE_CLUSTER) {
1375			unlock_cluster_or_swap_info(si, ci);
1376			spin_lock(&si->lock);
1377			mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1378			swap_free_cluster(si, idx);
1379			spin_unlock(&si->lock);
1380			return;
1381		}
1382	}
1383	for (i = 0; i < size; i++, entry.val++) {
1384		if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1385			unlock_cluster_or_swap_info(si, ci);
1386			free_swap_slot(entry);
1387			if (i == size - 1)
1388				return;
1389			lock_cluster_or_swap_info(si, offset);
1390		}
1391	}
1392	unlock_cluster_or_swap_info(si, ci);
1393}
1394
1395#ifdef CONFIG_THP_SWAP
1396int split_swap_cluster(swp_entry_t entry)
1397{
1398	struct swap_info_struct *si;
1399	struct swap_cluster_info *ci;
1400	unsigned long offset = swp_offset(entry);
1401
1402	si = _swap_info_get(entry);
1403	if (!si)
1404		return -EBUSY;
1405	ci = lock_cluster(si, offset);
1406	cluster_clear_huge(ci);
1407	unlock_cluster(ci);
1408	return 0;
1409}
1410#endif
1411
1412static int swp_entry_cmp(const void *ent1, const void *ent2)
1413{
1414	const swp_entry_t *e1 = ent1, *e2 = ent2;
1415
1416	return (int)swp_type(*e1) - (int)swp_type(*e2);
1417}
1418
1419void swapcache_free_entries(swp_entry_t *entries, int n)
1420{
1421	struct swap_info_struct *p, *prev;
1422	int i;
1423
1424	if (n <= 0)
1425		return;
1426
1427	prev = NULL;
1428	p = NULL;
1429
1430	/*
1431	 * Sort swap entries by swap device, so each lock is only taken once.
1432	 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1433	 * so low that it isn't necessary to optimize further.
1434	 */
1435	if (nr_swapfiles > 1)
1436		sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1437	for (i = 0; i < n; ++i) {
1438		p = swap_info_get_cont(entries[i], prev);
1439		if (p)
1440			swap_entry_free(p, entries[i]);
1441		prev = p;
1442	}
1443	if (p)
1444		spin_unlock(&p->lock);
1445}
1446
1447/*
1448 * How many references to page are currently swapped out?
1449 * This does not give an exact answer when swap count is continued,
1450 * but does include the high COUNT_CONTINUED flag to allow for that.
1451 */
1452int page_swapcount(struct page *page)
1453{
1454	int count = 0;
1455	struct swap_info_struct *p;
1456	struct swap_cluster_info *ci;
1457	swp_entry_t entry;
1458	unsigned long offset;
1459
1460	entry.val = page_private(page);
1461	p = _swap_info_get(entry);
1462	if (p) {
1463		offset = swp_offset(entry);
1464		ci = lock_cluster_or_swap_info(p, offset);
1465		count = swap_count(p->swap_map[offset]);
1466		unlock_cluster_or_swap_info(p, ci);
1467	}
1468	return count;
1469}
1470
1471int __swap_count(swp_entry_t entry)
1472{
1473	struct swap_info_struct *si;
1474	pgoff_t offset = swp_offset(entry);
1475	int count = 0;
1476
1477	si = get_swap_device(entry);
1478	if (si) {
1479		count = swap_count(si->swap_map[offset]);
1480		put_swap_device(si);
1481	}
1482	return count;
1483}
1484
 
 
 
 
 
1485static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1486{
1487	int count = 0;
1488	pgoff_t offset = swp_offset(entry);
1489	struct swap_cluster_info *ci;
 
1490
1491	ci = lock_cluster_or_swap_info(si, offset);
1492	count = swap_count(si->swap_map[offset]);
1493	unlock_cluster_or_swap_info(si, ci);
1494	return count;
1495}
1496
1497/*
1498 * How many references to @entry are currently swapped out?
1499 * This does not give an exact answer when swap count is continued,
1500 * but does include the high COUNT_CONTINUED flag to allow for that.
1501 */
1502int __swp_swapcount(swp_entry_t entry)
1503{
1504	int count = 0;
1505	struct swap_info_struct *si;
1506
1507	si = get_swap_device(entry);
1508	if (si) {
1509		count = swap_swapcount(si, entry);
1510		put_swap_device(si);
1511	}
1512	return count;
1513}
1514
1515/*
1516 * How many references to @entry are currently swapped out?
1517 * This considers COUNT_CONTINUED so it returns exact answer.
1518 */
1519int swp_swapcount(swp_entry_t entry)
1520{
1521	int count, tmp_count, n;
1522	struct swap_info_struct *p;
1523	struct swap_cluster_info *ci;
1524	struct page *page;
1525	pgoff_t offset;
1526	unsigned char *map;
1527
1528	p = _swap_info_get(entry);
1529	if (!p)
1530		return 0;
1531
1532	offset = swp_offset(entry);
1533
1534	ci = lock_cluster_or_swap_info(p, offset);
1535
1536	count = swap_count(p->swap_map[offset]);
1537	if (!(count & COUNT_CONTINUED))
1538		goto out;
1539
1540	count &= ~COUNT_CONTINUED;
1541	n = SWAP_MAP_MAX + 1;
1542
1543	page = vmalloc_to_page(p->swap_map + offset);
1544	offset &= ~PAGE_MASK;
1545	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1546
1547	do {
1548		page = list_next_entry(page, lru);
1549		map = kmap_atomic(page);
1550		tmp_count = map[offset];
1551		kunmap_atomic(map);
1552
1553		count += (tmp_count & ~COUNT_CONTINUED) * n;
1554		n *= (SWAP_CONT_MAX + 1);
1555	} while (tmp_count & COUNT_CONTINUED);
1556out:
1557	unlock_cluster_or_swap_info(p, ci);
1558	return count;
1559}
1560
1561static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1562					 swp_entry_t entry)
1563{
1564	struct swap_cluster_info *ci;
1565	unsigned char *map = si->swap_map;
1566	unsigned long roffset = swp_offset(entry);
1567	unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1568	int i;
1569	bool ret = false;
1570
1571	ci = lock_cluster_or_swap_info(si, offset);
1572	if (!ci || !cluster_is_huge(ci)) {
1573		if (swap_count(map[roffset]))
1574			ret = true;
1575		goto unlock_out;
1576	}
1577	for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1578		if (swap_count(map[offset + i])) {
1579			ret = true;
1580			break;
1581		}
1582	}
1583unlock_out:
1584	unlock_cluster_or_swap_info(si, ci);
1585	return ret;
1586}
1587
1588static bool page_swapped(struct page *page)
1589{
1590	swp_entry_t entry;
1591	struct swap_info_struct *si;
1592
1593	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
1594		return page_swapcount(page) != 0;
1595
1596	page = compound_head(page);
1597	entry.val = page_private(page);
1598	si = _swap_info_get(entry);
1599	if (si)
1600		return swap_page_trans_huge_swapped(si, entry);
1601	return false;
1602}
1603
1604static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1605					 int *total_swapcount)
1606{
1607	int i, map_swapcount, _total_mapcount, _total_swapcount;
1608	unsigned long offset = 0;
1609	struct swap_info_struct *si;
1610	struct swap_cluster_info *ci = NULL;
1611	unsigned char *map = NULL;
1612	int mapcount, swapcount = 0;
1613
1614	/* hugetlbfs shouldn't call it */
1615	VM_BUG_ON_PAGE(PageHuge(page), page);
1616
1617	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
1618		mapcount = page_trans_huge_mapcount(page, total_mapcount);
1619		if (PageSwapCache(page))
1620			swapcount = page_swapcount(page);
1621		if (total_swapcount)
1622			*total_swapcount = swapcount;
1623		return mapcount + swapcount;
1624	}
1625
1626	page = compound_head(page);
1627
1628	_total_mapcount = _total_swapcount = map_swapcount = 0;
1629	if (PageSwapCache(page)) {
1630		swp_entry_t entry;
1631
1632		entry.val = page_private(page);
1633		si = _swap_info_get(entry);
1634		if (si) {
1635			map = si->swap_map;
1636			offset = swp_offset(entry);
1637		}
1638	}
1639	if (map)
1640		ci = lock_cluster(si, offset);
1641	for (i = 0; i < HPAGE_PMD_NR; i++) {
1642		mapcount = atomic_read(&page[i]._mapcount) + 1;
1643		_total_mapcount += mapcount;
1644		if (map) {
1645			swapcount = swap_count(map[offset + i]);
1646			_total_swapcount += swapcount;
1647		}
1648		map_swapcount = max(map_swapcount, mapcount + swapcount);
1649	}
1650	unlock_cluster(ci);
1651	if (PageDoubleMap(page)) {
1652		map_swapcount -= 1;
1653		_total_mapcount -= HPAGE_PMD_NR;
1654	}
1655	mapcount = compound_mapcount(page);
1656	map_swapcount += mapcount;
1657	_total_mapcount += mapcount;
1658	if (total_mapcount)
1659		*total_mapcount = _total_mapcount;
1660	if (total_swapcount)
1661		*total_swapcount = _total_swapcount;
1662
1663	return map_swapcount;
1664}
1665
1666/*
1667 * We can write to an anon page without COW if there are no other references
1668 * to it.  And as a side-effect, free up its swap: because the old content
1669 * on disk will never be read, and seeking back there to write new content
1670 * later would only waste time away from clustering.
1671 *
1672 * NOTE: total_map_swapcount should not be relied upon by the caller if
1673 * reuse_swap_page() returns false, but it may be always overwritten
1674 * (see the other implementation for CONFIG_SWAP=n).
 
1675 */
1676bool reuse_swap_page(struct page *page, int *total_map_swapcount)
1677{
1678	int count, total_mapcount, total_swapcount;
1679
1680	VM_BUG_ON_PAGE(!PageLocked(page), page);
1681	if (unlikely(PageKsm(page)))
 
 
 
1682		return false;
1683	count = page_trans_huge_map_swapcount(page, &total_mapcount,
1684					      &total_swapcount);
1685	if (total_map_swapcount)
1686		*total_map_swapcount = total_mapcount + total_swapcount;
1687	if (count == 1 && PageSwapCache(page) &&
1688	    (likely(!PageTransCompound(page)) ||
1689	     /* The remaining swap count will be freed soon */
1690	     total_swapcount == page_swapcount(page))) {
1691		if (!PageWriteback(page)) {
1692			page = compound_head(page);
1693			delete_from_swap_cache(page);
1694			SetPageDirty(page);
1695		} else {
1696			swp_entry_t entry;
1697			struct swap_info_struct *p;
1698
1699			entry.val = page_private(page);
1700			p = swap_info_get(entry);
1701			if (p->flags & SWP_STABLE_WRITES) {
1702				spin_unlock(&p->lock);
1703				return false;
1704			}
1705			spin_unlock(&p->lock);
1706		}
1707	}
1708
1709	return count <= 1;
1710}
1711
1712/*
1713 * If swap is getting full, or if there are no more mappings of this page,
1714 * then try_to_free_swap is called to free its swap space.
1715 */
1716int try_to_free_swap(struct page *page)
1717{
1718	VM_BUG_ON_PAGE(!PageLocked(page), page);
1719
1720	if (!PageSwapCache(page))
1721		return 0;
1722	if (PageWriteback(page))
1723		return 0;
1724	if (page_swapped(page))
1725		return 0;
1726
1727	/*
1728	 * Once hibernation has begun to create its image of memory,
1729	 * there's a danger that one of the calls to try_to_free_swap()
1730	 * - most probably a call from __try_to_reclaim_swap() while
1731	 * hibernation is allocating its own swap pages for the image,
1732	 * but conceivably even a call from memory reclaim - will free
1733	 * the swap from a page which has already been recorded in the
1734	 * image as a clean swapcache page, and then reuse its swap for
1735	 * another page of the image.  On waking from hibernation, the
1736	 * original page might be freed under memory pressure, then
1737	 * later read back in from swap, now with the wrong data.
1738	 *
1739	 * Hibernation suspends storage while it is writing the image
1740	 * to disk so check that here.
1741	 */
1742	if (pm_suspended_storage())
1743		return 0;
1744
1745	page = compound_head(page);
1746	delete_from_swap_cache(page);
1747	SetPageDirty(page);
1748	return 1;
1749}
1750
1751/*
1752 * Free the swap entry like above, but also try to
1753 * free the page cache entry if it is the last user.
1754 */
1755int free_swap_and_cache(swp_entry_t entry)
1756{
1757	struct swap_info_struct *p;
1758	unsigned char count;
1759
1760	if (non_swap_entry(entry))
1761		return 1;
1762
1763	p = _swap_info_get(entry);
1764	if (p) {
1765		count = __swap_entry_free(p, entry);
1766		if (count == SWAP_HAS_CACHE &&
1767		    !swap_page_trans_huge_swapped(p, entry))
1768			__try_to_reclaim_swap(p, swp_offset(entry),
1769					      TTRS_UNMAPPED | TTRS_FULL);
1770	}
1771	return p != NULL;
1772}
1773
1774#ifdef CONFIG_HIBERNATION
1775
1776swp_entry_t get_swap_page_of_type(int type)
1777{
1778	struct swap_info_struct *si = swap_type_to_swap_info(type);
1779	swp_entry_t entry = {0};
1780
1781	if (!si)
1782		goto fail;
1783
1784	/* This is called for allocating swap entry, not cache */
1785	spin_lock(&si->lock);
1786	if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry))
1787		atomic_long_dec(&nr_swap_pages);
1788	spin_unlock(&si->lock);
1789fail:
1790	return entry;
1791}
1792
1793/*
1794 * Find the swap type that corresponds to given device (if any).
1795 *
1796 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1797 * from 0, in which the swap header is expected to be located.
1798 *
1799 * This is needed for the suspend to disk (aka swsusp).
1800 */
1801int swap_type_of(dev_t device, sector_t offset)
1802{
1803	int type;
1804
1805	if (!device)
1806		return -1;
1807
1808	spin_lock(&swap_lock);
1809	for (type = 0; type < nr_swapfiles; type++) {
1810		struct swap_info_struct *sis = swap_info[type];
1811
1812		if (!(sis->flags & SWP_WRITEOK))
1813			continue;
1814
1815		if (device == sis->bdev->bd_dev) {
1816			struct swap_extent *se = first_se(sis);
1817
1818			if (se->start_block == offset) {
1819				spin_unlock(&swap_lock);
1820				return type;
1821			}
1822		}
1823	}
1824	spin_unlock(&swap_lock);
1825	return -ENODEV;
1826}
1827
1828int find_first_swap(dev_t *device)
1829{
1830	int type;
1831
1832	spin_lock(&swap_lock);
1833	for (type = 0; type < nr_swapfiles; type++) {
1834		struct swap_info_struct *sis = swap_info[type];
1835
1836		if (!(sis->flags & SWP_WRITEOK))
1837			continue;
1838		*device = sis->bdev->bd_dev;
1839		spin_unlock(&swap_lock);
1840		return type;
1841	}
1842	spin_unlock(&swap_lock);
1843	return -ENODEV;
1844}
1845
1846/*
1847 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1848 * corresponding to given index in swap_info (swap type).
1849 */
1850sector_t swapdev_block(int type, pgoff_t offset)
1851{
1852	struct swap_info_struct *si = swap_type_to_swap_info(type);
1853	struct swap_extent *se;
1854
1855	if (!si || !(si->flags & SWP_WRITEOK))
1856		return 0;
1857	se = offset_to_swap_extent(si, offset);
1858	return se->start_block + (offset - se->start_page);
1859}
1860
1861/*
1862 * Return either the total number of swap pages of given type, or the number
1863 * of free pages of that type (depending on @free)
1864 *
1865 * This is needed for software suspend
1866 */
1867unsigned int count_swap_pages(int type, int free)
1868{
1869	unsigned int n = 0;
1870
1871	spin_lock(&swap_lock);
1872	if ((unsigned int)type < nr_swapfiles) {
1873		struct swap_info_struct *sis = swap_info[type];
1874
1875		spin_lock(&sis->lock);
1876		if (sis->flags & SWP_WRITEOK) {
1877			n = sis->pages;
1878			if (free)
1879				n -= sis->inuse_pages;
1880		}
1881		spin_unlock(&sis->lock);
1882	}
1883	spin_unlock(&swap_lock);
1884	return n;
1885}
1886#endif /* CONFIG_HIBERNATION */
1887
1888static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1889{
1890	return pte_same(pte_swp_clear_flags(pte), swp_pte);
1891}
1892
1893/*
1894 * No need to decide whether this PTE shares the swap entry with others,
1895 * just let do_wp_page work it out if a write is requested later - to
1896 * force COW, vm_page_prot omits write permission from any private vma.
1897 */
1898static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1899		unsigned long addr, swp_entry_t entry, struct page *page)
1900{
 
1901	struct page *swapcache;
1902	spinlock_t *ptl;
1903	pte_t *pte;
 
1904	int ret = 1;
1905
1906	swapcache = page;
1907	page = ksm_might_need_to_copy(page, vma, addr);
1908	if (unlikely(!page))
1909		return -ENOMEM;
 
 
1910
1911	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1912	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1913		ret = 0;
1914		goto out;
1915	}
1916
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1917	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1918	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1919	get_page(page);
1920	set_pte_at(vma->vm_mm, addr, pte,
1921		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
1922	if (page == swapcache) {
1923		page_add_anon_rmap(page, vma, addr, false);
 
 
 
 
 
 
 
 
 
 
 
1924	} else { /* ksm created a completely new copy */
1925		page_add_new_anon_rmap(page, vma, addr, false);
1926		lru_cache_add_inactive_or_unevictable(page, vma);
1927	}
 
 
 
 
 
 
 
1928	swap_free(entry);
1929out:
1930	pte_unmap_unlock(pte, ptl);
1931	if (page != swapcache) {
1932		unlock_page(page);
1933		put_page(page);
1934	}
1935	return ret;
1936}
1937
1938static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1939			unsigned long addr, unsigned long end,
1940			unsigned int type, bool frontswap,
1941			unsigned long *fs_pages_to_unuse)
1942{
1943	struct page *page;
1944	swp_entry_t entry;
1945	pte_t *pte;
1946	struct swap_info_struct *si;
1947	unsigned long offset;
1948	int ret = 0;
1949	volatile unsigned char *swap_map;
1950
1951	si = swap_info[type];
1952	pte = pte_offset_map(pmd, addr);
1953	do {
 
 
 
1954		if (!is_swap_pte(*pte))
1955			continue;
1956
1957		entry = pte_to_swp_entry(*pte);
1958		if (swp_type(entry) != type)
1959			continue;
1960
1961		offset = swp_offset(entry);
1962		if (frontswap && !frontswap_test(si, offset))
1963			continue;
1964
1965		pte_unmap(pte);
1966		swap_map = &si->swap_map[offset];
1967		page = lookup_swap_cache(entry, vma, addr);
1968		if (!page) {
 
1969			struct vm_fault vmf = {
1970				.vma = vma,
1971				.address = addr,
 
1972				.pmd = pmd,
1973			};
1974
1975			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
1976						&vmf);
 
 
1977		}
1978		if (!page) {
1979			if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
1980				goto try_next;
1981			return -ENOMEM;
1982		}
1983
1984		lock_page(page);
1985		wait_on_page_writeback(page);
1986		ret = unuse_pte(vma, pmd, addr, entry, page);
1987		if (ret < 0) {
1988			unlock_page(page);
1989			put_page(page);
1990			goto out;
1991		}
1992
1993		try_to_free_swap(page);
1994		unlock_page(page);
1995		put_page(page);
1996
1997		if (*fs_pages_to_unuse && !--(*fs_pages_to_unuse)) {
1998			ret = FRONTSWAP_PAGES_UNUSED;
1999			goto out;
2000		}
2001try_next:
2002		pte = pte_offset_map(pmd, addr);
2003	} while (pte++, addr += PAGE_SIZE, addr != end);
2004	pte_unmap(pte - 1);
2005
2006	ret = 0;
2007out:
2008	return ret;
2009}
2010
2011static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
2012				unsigned long addr, unsigned long end,
2013				unsigned int type, bool frontswap,
2014				unsigned long *fs_pages_to_unuse)
2015{
2016	pmd_t *pmd;
2017	unsigned long next;
2018	int ret;
2019
2020	pmd = pmd_offset(pud, addr);
2021	do {
2022		cond_resched();
2023		next = pmd_addr_end(addr, end);
2024		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
2025			continue;
2026		ret = unuse_pte_range(vma, pmd, addr, next, type,
2027				      frontswap, fs_pages_to_unuse);
2028		if (ret)
2029			return ret;
2030	} while (pmd++, addr = next, addr != end);
2031	return 0;
2032}
2033
2034static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2035				unsigned long addr, unsigned long end,
2036				unsigned int type, bool frontswap,
2037				unsigned long *fs_pages_to_unuse)
2038{
2039	pud_t *pud;
2040	unsigned long next;
2041	int ret;
2042
2043	pud = pud_offset(p4d, addr);
2044	do {
2045		next = pud_addr_end(addr, end);
2046		if (pud_none_or_clear_bad(pud))
2047			continue;
2048		ret = unuse_pmd_range(vma, pud, addr, next, type,
2049				      frontswap, fs_pages_to_unuse);
2050		if (ret)
2051			return ret;
2052	} while (pud++, addr = next, addr != end);
2053	return 0;
2054}
2055
2056static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2057				unsigned long addr, unsigned long end,
2058				unsigned int type, bool frontswap,
2059				unsigned long *fs_pages_to_unuse)
2060{
2061	p4d_t *p4d;
2062	unsigned long next;
2063	int ret;
2064
2065	p4d = p4d_offset(pgd, addr);
2066	do {
2067		next = p4d_addr_end(addr, end);
2068		if (p4d_none_or_clear_bad(p4d))
2069			continue;
2070		ret = unuse_pud_range(vma, p4d, addr, next, type,
2071				      frontswap, fs_pages_to_unuse);
2072		if (ret)
2073			return ret;
2074	} while (p4d++, addr = next, addr != end);
2075	return 0;
2076}
2077
2078static int unuse_vma(struct vm_area_struct *vma, unsigned int type,
2079		     bool frontswap, unsigned long *fs_pages_to_unuse)
2080{
2081	pgd_t *pgd;
2082	unsigned long addr, end, next;
2083	int ret;
2084
2085	addr = vma->vm_start;
2086	end = vma->vm_end;
2087
2088	pgd = pgd_offset(vma->vm_mm, addr);
2089	do {
2090		next = pgd_addr_end(addr, end);
2091		if (pgd_none_or_clear_bad(pgd))
2092			continue;
2093		ret = unuse_p4d_range(vma, pgd, addr, next, type,
2094				      frontswap, fs_pages_to_unuse);
2095		if (ret)
2096			return ret;
2097	} while (pgd++, addr = next, addr != end);
2098	return 0;
2099}
2100
2101static int unuse_mm(struct mm_struct *mm, unsigned int type,
2102		    bool frontswap, unsigned long *fs_pages_to_unuse)
2103{
2104	struct vm_area_struct *vma;
2105	int ret = 0;
 
2106
2107	mmap_read_lock(mm);
2108	for (vma = mm->mmap; vma; vma = vma->vm_next) {
2109		if (vma->anon_vma) {
2110			ret = unuse_vma(vma, type, frontswap,
2111					fs_pages_to_unuse);
2112			if (ret)
2113				break;
2114		}
 
2115		cond_resched();
2116	}
2117	mmap_read_unlock(mm);
2118	return ret;
2119}
2120
2121/*
2122 * Scan swap_map (or frontswap_map if frontswap parameter is true)
2123 * from current position to next entry still in use. Return 0
2124 * if there are no inuse entries after prev till end of the map.
2125 */
2126static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2127					unsigned int prev, bool frontswap)
2128{
2129	unsigned int i;
2130	unsigned char count;
2131
2132	/*
2133	 * No need for swap_lock here: we're just looking
2134	 * for whether an entry is in use, not modifying it; false
2135	 * hits are okay, and sys_swapoff() has already prevented new
2136	 * allocations from this area (while holding swap_lock).
2137	 */
2138	for (i = prev + 1; i < si->max; i++) {
2139		count = READ_ONCE(si->swap_map[i]);
2140		if (count && swap_count(count) != SWAP_MAP_BAD)
2141			if (!frontswap || frontswap_test(si, i))
2142				break;
2143		if ((i % LATENCY_LIMIT) == 0)
2144			cond_resched();
2145	}
2146
2147	if (i == si->max)
2148		i = 0;
2149
2150	return i;
2151}
2152
2153/*
2154 * If the boolean frontswap is true, only unuse pages_to_unuse pages;
2155 * pages_to_unuse==0 means all pages; ignored if frontswap is false
2156 */
2157int try_to_unuse(unsigned int type, bool frontswap,
2158		 unsigned long pages_to_unuse)
2159{
2160	struct mm_struct *prev_mm;
2161	struct mm_struct *mm;
2162	struct list_head *p;
2163	int retval = 0;
2164	struct swap_info_struct *si = swap_info[type];
2165	struct page *page;
2166	swp_entry_t entry;
2167	unsigned int i;
2168
2169	if (!READ_ONCE(si->inuse_pages))
2170		return 0;
2171
2172	if (!frontswap)
2173		pages_to_unuse = 0;
2174
2175retry:
2176	retval = shmem_unuse(type, frontswap, &pages_to_unuse);
2177	if (retval)
2178		goto out;
2179
2180	prev_mm = &init_mm;
2181	mmget(prev_mm);
2182
2183	spin_lock(&mmlist_lock);
2184	p = &init_mm.mmlist;
2185	while (READ_ONCE(si->inuse_pages) &&
2186	       !signal_pending(current) &&
2187	       (p = p->next) != &init_mm.mmlist) {
2188
2189		mm = list_entry(p, struct mm_struct, mmlist);
2190		if (!mmget_not_zero(mm))
2191			continue;
2192		spin_unlock(&mmlist_lock);
2193		mmput(prev_mm);
2194		prev_mm = mm;
2195		retval = unuse_mm(mm, type, frontswap, &pages_to_unuse);
2196
2197		if (retval) {
2198			mmput(prev_mm);
2199			goto out;
2200		}
2201
2202		/*
2203		 * Make sure that we aren't completely killing
2204		 * interactive performance.
2205		 */
2206		cond_resched();
2207		spin_lock(&mmlist_lock);
2208	}
2209	spin_unlock(&mmlist_lock);
2210
2211	mmput(prev_mm);
2212
2213	i = 0;
2214	while (READ_ONCE(si->inuse_pages) &&
2215	       !signal_pending(current) &&
2216	       (i = find_next_to_unuse(si, i, frontswap)) != 0) {
2217
2218		entry = swp_entry(type, i);
2219		page = find_get_page(swap_address_space(entry), i);
2220		if (!page)
2221			continue;
2222
2223		/*
2224		 * It is conceivable that a racing task removed this page from
2225		 * swap cache just before we acquired the page lock. The page
2226		 * might even be back in swap cache on another swap area. But
2227		 * that is okay, try_to_free_swap() only removes stale pages.
2228		 */
2229		lock_page(page);
2230		wait_on_page_writeback(page);
2231		try_to_free_swap(page);
2232		unlock_page(page);
2233		put_page(page);
2234
2235		/*
2236		 * For frontswap, we just need to unuse pages_to_unuse, if
2237		 * it was specified. Need not check frontswap again here as
2238		 * we already zeroed out pages_to_unuse if not frontswap.
2239		 */
2240		if (pages_to_unuse && --pages_to_unuse == 0)
2241			goto out;
 
 
 
2242	}
2243
2244	/*
2245	 * Lets check again to see if there are still swap entries in the map.
2246	 * If yes, we would need to do retry the unuse logic again.
2247	 * Under global memory pressure, swap entries can be reinserted back
2248	 * into process space after the mmlist loop above passes over them.
2249	 *
2250	 * Limit the number of retries? No: when mmget_not_zero() above fails,
2251	 * that mm is likely to be freeing swap from exit_mmap(), which proceeds
2252	 * at its own independent pace; and even shmem_writepage() could have
2253	 * been preempted after get_swap_page(), temporarily hiding that swap.
2254	 * It's easy and robust (though cpu-intensive) just to keep retrying.
 
2255	 */
2256	if (READ_ONCE(si->inuse_pages)) {
2257		if (!signal_pending(current))
2258			goto retry;
2259		retval = -EINTR;
2260	}
2261out:
2262	return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
2263}
2264
2265/*
2266 * After a successful try_to_unuse, if no swap is now in use, we know
2267 * we can empty the mmlist.  swap_lock must be held on entry and exit.
2268 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2269 * added to the mmlist just after page_duplicate - before would be racy.
2270 */
2271static void drain_mmlist(void)
2272{
2273	struct list_head *p, *next;
2274	unsigned int type;
2275
2276	for (type = 0; type < nr_swapfiles; type++)
2277		if (swap_info[type]->inuse_pages)
2278			return;
2279	spin_lock(&mmlist_lock);
2280	list_for_each_safe(p, next, &init_mm.mmlist)
2281		list_del_init(p);
2282	spin_unlock(&mmlist_lock);
2283}
2284
2285/*
2286 * Free all of a swapdev's extent information
2287 */
2288static void destroy_swap_extents(struct swap_info_struct *sis)
2289{
2290	while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2291		struct rb_node *rb = sis->swap_extent_root.rb_node;
2292		struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2293
2294		rb_erase(rb, &sis->swap_extent_root);
2295		kfree(se);
2296	}
2297
2298	if (sis->flags & SWP_ACTIVATED) {
2299		struct file *swap_file = sis->swap_file;
2300		struct address_space *mapping = swap_file->f_mapping;
2301
2302		sis->flags &= ~SWP_ACTIVATED;
2303		if (mapping->a_ops->swap_deactivate)
2304			mapping->a_ops->swap_deactivate(swap_file);
2305	}
2306}
2307
2308/*
2309 * Add a block range (and the corresponding page range) into this swapdev's
2310 * extent tree.
2311 *
2312 * This function rather assumes that it is called in ascending page order.
2313 */
2314int
2315add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2316		unsigned long nr_pages, sector_t start_block)
2317{
2318	struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2319	struct swap_extent *se;
2320	struct swap_extent *new_se;
2321
2322	/*
2323	 * place the new node at the right most since the
2324	 * function is called in ascending page order.
2325	 */
2326	while (*link) {
2327		parent = *link;
2328		link = &parent->rb_right;
2329	}
2330
2331	if (parent) {
2332		se = rb_entry(parent, struct swap_extent, rb_node);
2333		BUG_ON(se->start_page + se->nr_pages != start_page);
2334		if (se->start_block + se->nr_pages == start_block) {
2335			/* Merge it */
2336			se->nr_pages += nr_pages;
2337			return 0;
2338		}
2339	}
2340
2341	/* No merge, insert a new extent. */
2342	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2343	if (new_se == NULL)
2344		return -ENOMEM;
2345	new_se->start_page = start_page;
2346	new_se->nr_pages = nr_pages;
2347	new_se->start_block = start_block;
2348
2349	rb_link_node(&new_se->rb_node, parent, link);
2350	rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2351	return 1;
2352}
2353EXPORT_SYMBOL_GPL(add_swap_extent);
2354
2355/*
2356 * A `swap extent' is a simple thing which maps a contiguous range of pages
2357 * onto a contiguous range of disk blocks.  An ordered list of swap extents
2358 * is built at swapon time and is then used at swap_writepage/swap_readpage
2359 * time for locating where on disk a page belongs.
2360 *
2361 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2362 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2363 * swap files identically.
2364 *
2365 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2366 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2367 * swapfiles are handled *identically* after swapon time.
2368 *
2369 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2370 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
2371 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
2372 * requirements, they are simply tossed out - we will never use those blocks
2373 * for swapping.
2374 *
2375 * For all swap devices we set S_SWAPFILE across the life of the swapon.  This
2376 * prevents users from writing to the swap device, which will corrupt memory.
2377 *
2378 * The amount of disk space which a single swap extent represents varies.
2379 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2380 * extents in the list.  To avoid much list walking, we cache the previous
2381 * search location in `curr_swap_extent', and start new searches from there.
2382 * This is extremely effective.  The average number of iterations in
2383 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
2384 */
2385static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2386{
2387	struct file *swap_file = sis->swap_file;
2388	struct address_space *mapping = swap_file->f_mapping;
2389	struct inode *inode = mapping->host;
2390	int ret;
2391
2392	if (S_ISBLK(inode->i_mode)) {
2393		ret = add_swap_extent(sis, 0, sis->max, 0);
2394		*span = sis->pages;
2395		return ret;
2396	}
2397
2398	if (mapping->a_ops->swap_activate) {
2399		ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2400		if (ret >= 0)
2401			sis->flags |= SWP_ACTIVATED;
2402		if (!ret) {
2403			sis->flags |= SWP_FS_OPS;
2404			ret = add_swap_extent(sis, 0, sis->max, 0);
2405			*span = sis->pages;
 
2406		}
2407		return ret;
2408	}
2409
2410	return generic_swapfile_activate(sis, swap_file, span);
2411}
2412
2413static int swap_node(struct swap_info_struct *p)
2414{
2415	struct block_device *bdev;
2416
2417	if (p->bdev)
2418		bdev = p->bdev;
2419	else
2420		bdev = p->swap_file->f_inode->i_sb->s_bdev;
2421
2422	return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2423}
2424
2425static void setup_swap_info(struct swap_info_struct *p, int prio,
2426			    unsigned char *swap_map,
2427			    struct swap_cluster_info *cluster_info)
2428{
2429	int i;
2430
2431	if (prio >= 0)
2432		p->prio = prio;
2433	else
2434		p->prio = --least_priority;
2435	/*
2436	 * the plist prio is negated because plist ordering is
2437	 * low-to-high, while swap ordering is high-to-low
2438	 */
2439	p->list.prio = -p->prio;
2440	for_each_node(i) {
2441		if (p->prio >= 0)
2442			p->avail_lists[i].prio = -p->prio;
2443		else {
2444			if (swap_node(p) == i)
2445				p->avail_lists[i].prio = 1;
2446			else
2447				p->avail_lists[i].prio = -p->prio;
2448		}
2449	}
2450	p->swap_map = swap_map;
2451	p->cluster_info = cluster_info;
2452}
2453
2454static void _enable_swap_info(struct swap_info_struct *p)
2455{
2456	p->flags |= SWP_WRITEOK;
2457	atomic_long_add(p->pages, &nr_swap_pages);
2458	total_swap_pages += p->pages;
2459
2460	assert_spin_locked(&swap_lock);
2461	/*
2462	 * both lists are plists, and thus priority ordered.
2463	 * swap_active_head needs to be priority ordered for swapoff(),
2464	 * which on removal of any swap_info_struct with an auto-assigned
2465	 * (i.e. negative) priority increments the auto-assigned priority
2466	 * of any lower-priority swap_info_structs.
2467	 * swap_avail_head needs to be priority ordered for get_swap_page(),
2468	 * which allocates swap pages from the highest available priority
2469	 * swap_info_struct.
2470	 */
2471	plist_add(&p->list, &swap_active_head);
2472	add_to_avail_list(p);
2473}
2474
2475static void enable_swap_info(struct swap_info_struct *p, int prio,
2476				unsigned char *swap_map,
2477				struct swap_cluster_info *cluster_info,
2478				unsigned long *frontswap_map)
2479{
2480	frontswap_init(p->type, frontswap_map);
 
2481	spin_lock(&swap_lock);
2482	spin_lock(&p->lock);
2483	setup_swap_info(p, prio, swap_map, cluster_info);
2484	spin_unlock(&p->lock);
2485	spin_unlock(&swap_lock);
2486	/*
2487	 * Finished initializing swap device, now it's safe to reference it.
2488	 */
2489	percpu_ref_resurrect(&p->users);
2490	spin_lock(&swap_lock);
2491	spin_lock(&p->lock);
2492	_enable_swap_info(p);
2493	spin_unlock(&p->lock);
2494	spin_unlock(&swap_lock);
2495}
2496
2497static void reinsert_swap_info(struct swap_info_struct *p)
2498{
2499	spin_lock(&swap_lock);
2500	spin_lock(&p->lock);
2501	setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2502	_enable_swap_info(p);
2503	spin_unlock(&p->lock);
2504	spin_unlock(&swap_lock);
2505}
2506
2507bool has_usable_swap(void)
2508{
2509	bool ret = true;
2510
2511	spin_lock(&swap_lock);
2512	if (plist_head_empty(&swap_active_head))
2513		ret = false;
2514	spin_unlock(&swap_lock);
2515	return ret;
2516}
2517
2518SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2519{
2520	struct swap_info_struct *p = NULL;
2521	unsigned char *swap_map;
2522	struct swap_cluster_info *cluster_info;
2523	unsigned long *frontswap_map;
2524	struct file *swap_file, *victim;
2525	struct address_space *mapping;
2526	struct inode *inode;
2527	struct filename *pathname;
2528	int err, found = 0;
2529	unsigned int old_block_size;
2530
2531	if (!capable(CAP_SYS_ADMIN))
2532		return -EPERM;
2533
2534	BUG_ON(!current->mm);
2535
2536	pathname = getname(specialfile);
2537	if (IS_ERR(pathname))
2538		return PTR_ERR(pathname);
2539
2540	victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2541	err = PTR_ERR(victim);
2542	if (IS_ERR(victim))
2543		goto out;
2544
2545	mapping = victim->f_mapping;
2546	spin_lock(&swap_lock);
2547	plist_for_each_entry(p, &swap_active_head, list) {
2548		if (p->flags & SWP_WRITEOK) {
2549			if (p->swap_file->f_mapping == mapping) {
2550				found = 1;
2551				break;
2552			}
2553		}
2554	}
2555	if (!found) {
2556		err = -EINVAL;
2557		spin_unlock(&swap_lock);
2558		goto out_dput;
2559	}
2560	if (!security_vm_enough_memory_mm(current->mm, p->pages))
2561		vm_unacct_memory(p->pages);
2562	else {
2563		err = -ENOMEM;
2564		spin_unlock(&swap_lock);
2565		goto out_dput;
2566	}
2567	del_from_avail_list(p);
2568	spin_lock(&p->lock);
2569	if (p->prio < 0) {
2570		struct swap_info_struct *si = p;
2571		int nid;
2572
2573		plist_for_each_entry_continue(si, &swap_active_head, list) {
2574			si->prio++;
2575			si->list.prio--;
2576			for_each_node(nid) {
2577				if (si->avail_lists[nid].prio != 1)
2578					si->avail_lists[nid].prio--;
2579			}
2580		}
2581		least_priority++;
2582	}
2583	plist_del(&p->list, &swap_active_head);
2584	atomic_long_sub(p->pages, &nr_swap_pages);
2585	total_swap_pages -= p->pages;
2586	p->flags &= ~SWP_WRITEOK;
2587	spin_unlock(&p->lock);
2588	spin_unlock(&swap_lock);
2589
2590	disable_swap_slots_cache_lock();
2591
2592	set_current_oom_origin();
2593	err = try_to_unuse(p->type, false, 0); /* force unuse all pages */
2594	clear_current_oom_origin();
2595
2596	if (err) {
2597		/* re-insert swap space back into swap_list */
2598		reinsert_swap_info(p);
2599		reenable_swap_slots_cache_unlock();
2600		goto out_dput;
2601	}
2602
2603	reenable_swap_slots_cache_unlock();
2604
2605	/*
2606	 * Wait for swap operations protected by get/put_swap_device()
2607	 * to complete.
2608	 *
2609	 * We need synchronize_rcu() here to protect the accessing to
2610	 * the swap cache data structure.
2611	 */
2612	percpu_ref_kill(&p->users);
2613	synchronize_rcu();
2614	wait_for_completion(&p->comp);
2615
2616	flush_work(&p->discard_work);
2617
2618	destroy_swap_extents(p);
2619	if (p->flags & SWP_CONTINUED)
2620		free_swap_count_continuations(p);
2621
2622	if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
2623		atomic_dec(&nr_rotate_swap);
2624
2625	mutex_lock(&swapon_mutex);
2626	spin_lock(&swap_lock);
2627	spin_lock(&p->lock);
2628	drain_mmlist();
2629
2630	/* wait for anyone still in scan_swap_map_slots */
2631	p->highest_bit = 0;		/* cuts scans short */
2632	while (p->flags >= SWP_SCANNING) {
2633		spin_unlock(&p->lock);
2634		spin_unlock(&swap_lock);
2635		schedule_timeout_uninterruptible(1);
2636		spin_lock(&swap_lock);
2637		spin_lock(&p->lock);
2638	}
2639
2640	swap_file = p->swap_file;
2641	old_block_size = p->old_block_size;
2642	p->swap_file = NULL;
2643	p->max = 0;
2644	swap_map = p->swap_map;
2645	p->swap_map = NULL;
2646	cluster_info = p->cluster_info;
2647	p->cluster_info = NULL;
2648	frontswap_map = frontswap_map_get(p);
2649	spin_unlock(&p->lock);
2650	spin_unlock(&swap_lock);
2651	arch_swap_invalidate_area(p->type);
2652	frontswap_invalidate_area(p->type);
2653	frontswap_map_set(p, NULL);
2654	mutex_unlock(&swapon_mutex);
2655	free_percpu(p->percpu_cluster);
2656	p->percpu_cluster = NULL;
2657	free_percpu(p->cluster_next_cpu);
2658	p->cluster_next_cpu = NULL;
2659	vfree(swap_map);
2660	kvfree(cluster_info);
2661	kvfree(frontswap_map);
2662	/* Destroy swap account information */
2663	swap_cgroup_swapoff(p->type);
2664	exit_swap_address_space(p->type);
2665
2666	inode = mapping->host;
2667	if (S_ISBLK(inode->i_mode)) {
2668		struct block_device *bdev = I_BDEV(inode);
2669
2670		set_blocksize(bdev, old_block_size);
2671		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2672	}
2673
2674	inode_lock(inode);
2675	inode->i_flags &= ~S_SWAPFILE;
2676	inode_unlock(inode);
2677	filp_close(swap_file, NULL);
2678
2679	/*
2680	 * Clear the SWP_USED flag after all resources are freed so that swapon
2681	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2682	 * not hold p->lock after we cleared its SWP_WRITEOK.
2683	 */
2684	spin_lock(&swap_lock);
2685	p->flags = 0;
2686	spin_unlock(&swap_lock);
2687
2688	err = 0;
2689	atomic_inc(&proc_poll_event);
2690	wake_up_interruptible(&proc_poll_wait);
2691
2692out_dput:
2693	filp_close(victim, NULL);
2694out:
2695	putname(pathname);
2696	return err;
2697}
2698
2699#ifdef CONFIG_PROC_FS
2700static __poll_t swaps_poll(struct file *file, poll_table *wait)
2701{
2702	struct seq_file *seq = file->private_data;
2703
2704	poll_wait(file, &proc_poll_wait, wait);
2705
2706	if (seq->poll_event != atomic_read(&proc_poll_event)) {
2707		seq->poll_event = atomic_read(&proc_poll_event);
2708		return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2709	}
2710
2711	return EPOLLIN | EPOLLRDNORM;
2712}
2713
2714/* iterator */
2715static void *swap_start(struct seq_file *swap, loff_t *pos)
2716{
2717	struct swap_info_struct *si;
2718	int type;
2719	loff_t l = *pos;
2720
2721	mutex_lock(&swapon_mutex);
2722
2723	if (!l)
2724		return SEQ_START_TOKEN;
2725
2726	for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2727		if (!(si->flags & SWP_USED) || !si->swap_map)
2728			continue;
2729		if (!--l)
2730			return si;
2731	}
2732
2733	return NULL;
2734}
2735
2736static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2737{
2738	struct swap_info_struct *si = v;
2739	int type;
2740
2741	if (v == SEQ_START_TOKEN)
2742		type = 0;
2743	else
2744		type = si->type + 1;
2745
2746	++(*pos);
2747	for (; (si = swap_type_to_swap_info(type)); type++) {
2748		if (!(si->flags & SWP_USED) || !si->swap_map)
2749			continue;
2750		return si;
2751	}
2752
2753	return NULL;
2754}
2755
2756static void swap_stop(struct seq_file *swap, void *v)
2757{
2758	mutex_unlock(&swapon_mutex);
2759}
2760
2761static int swap_show(struct seq_file *swap, void *v)
2762{
2763	struct swap_info_struct *si = v;
2764	struct file *file;
2765	int len;
2766	unsigned int bytes, inuse;
2767
2768	if (si == SEQ_START_TOKEN) {
2769		seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2770		return 0;
2771	}
2772
2773	bytes = si->pages << (PAGE_SHIFT - 10);
2774	inuse = si->inuse_pages << (PAGE_SHIFT - 10);
2775
2776	file = si->swap_file;
2777	len = seq_file_path(swap, file, " \t\n\\");
2778	seq_printf(swap, "%*s%s\t%u\t%s%u\t%s%d\n",
2779			len < 40 ? 40 - len : 1, " ",
2780			S_ISBLK(file_inode(file)->i_mode) ?
2781				"partition" : "file\t",
2782			bytes, bytes < 10000000 ? "\t" : "",
2783			inuse, inuse < 10000000 ? "\t" : "",
2784			si->prio);
2785	return 0;
2786}
2787
2788static const struct seq_operations swaps_op = {
2789	.start =	swap_start,
2790	.next =		swap_next,
2791	.stop =		swap_stop,
2792	.show =		swap_show
2793};
2794
2795static int swaps_open(struct inode *inode, struct file *file)
2796{
2797	struct seq_file *seq;
2798	int ret;
2799
2800	ret = seq_open(file, &swaps_op);
2801	if (ret)
2802		return ret;
2803
2804	seq = file->private_data;
2805	seq->poll_event = atomic_read(&proc_poll_event);
2806	return 0;
2807}
2808
2809static const struct proc_ops swaps_proc_ops = {
2810	.proc_flags	= PROC_ENTRY_PERMANENT,
2811	.proc_open	= swaps_open,
2812	.proc_read	= seq_read,
2813	.proc_lseek	= seq_lseek,
2814	.proc_release	= seq_release,
2815	.proc_poll	= swaps_poll,
2816};
2817
2818static int __init procswaps_init(void)
2819{
2820	proc_create("swaps", 0, NULL, &swaps_proc_ops);
2821	return 0;
2822}
2823__initcall(procswaps_init);
2824#endif /* CONFIG_PROC_FS */
2825
2826#ifdef MAX_SWAPFILES_CHECK
2827static int __init max_swapfiles_check(void)
2828{
2829	MAX_SWAPFILES_CHECK();
2830	return 0;
2831}
2832late_initcall(max_swapfiles_check);
2833#endif
2834
2835static struct swap_info_struct *alloc_swap_info(void)
2836{
2837	struct swap_info_struct *p;
2838	struct swap_info_struct *defer = NULL;
2839	unsigned int type;
2840	int i;
2841
2842	p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2843	if (!p)
2844		return ERR_PTR(-ENOMEM);
2845
2846	if (percpu_ref_init(&p->users, swap_users_ref_free,
2847			    PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
2848		kvfree(p);
2849		return ERR_PTR(-ENOMEM);
2850	}
2851
2852	spin_lock(&swap_lock);
2853	for (type = 0; type < nr_swapfiles; type++) {
2854		if (!(swap_info[type]->flags & SWP_USED))
2855			break;
2856	}
2857	if (type >= MAX_SWAPFILES) {
2858		spin_unlock(&swap_lock);
2859		percpu_ref_exit(&p->users);
2860		kvfree(p);
2861		return ERR_PTR(-EPERM);
2862	}
2863	if (type >= nr_swapfiles) {
2864		p->type = type;
2865		/*
2866		 * Publish the swap_info_struct after initializing it.
2867		 * Note that kvzalloc() above zeroes all its fields.
2868		 */
2869		smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
2870		nr_swapfiles++;
2871	} else {
2872		defer = p;
2873		p = swap_info[type];
2874		/*
2875		 * Do not memset this entry: a racing procfs swap_next()
2876		 * would be relying on p->type to remain valid.
2877		 */
2878	}
2879	p->swap_extent_root = RB_ROOT;
2880	plist_node_init(&p->list, 0);
2881	for_each_node(i)
2882		plist_node_init(&p->avail_lists[i], 0);
2883	p->flags = SWP_USED;
2884	spin_unlock(&swap_lock);
2885	if (defer) {
2886		percpu_ref_exit(&defer->users);
2887		kvfree(defer);
2888	}
2889	spin_lock_init(&p->lock);
2890	spin_lock_init(&p->cont_lock);
2891	init_completion(&p->comp);
2892
2893	return p;
2894}
2895
2896static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2897{
2898	int error;
2899
2900	if (S_ISBLK(inode->i_mode)) {
2901		p->bdev = blkdev_get_by_dev(inode->i_rdev,
2902				   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2903		if (IS_ERR(p->bdev)) {
2904			error = PTR_ERR(p->bdev);
2905			p->bdev = NULL;
2906			return error;
2907		}
2908		p->old_block_size = block_size(p->bdev);
2909		error = set_blocksize(p->bdev, PAGE_SIZE);
2910		if (error < 0)
2911			return error;
2912		/*
2913		 * Zoned block devices contain zones that have a sequential
2914		 * write only restriction.  Hence zoned block devices are not
2915		 * suitable for swapping.  Disallow them here.
2916		 */
2917		if (blk_queue_is_zoned(p->bdev->bd_disk->queue))
2918			return -EINVAL;
2919		p->flags |= SWP_BLKDEV;
2920	} else if (S_ISREG(inode->i_mode)) {
2921		p->bdev = inode->i_sb->s_bdev;
2922	}
2923
2924	return 0;
2925}
2926
2927
2928/*
2929 * Find out how many pages are allowed for a single swap device. There
2930 * are two limiting factors:
2931 * 1) the number of bits for the swap offset in the swp_entry_t type, and
2932 * 2) the number of bits in the swap pte, as defined by the different
2933 * architectures.
2934 *
2935 * In order to find the largest possible bit mask, a swap entry with
2936 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
2937 * decoded to a swp_entry_t again, and finally the swap offset is
2938 * extracted.
2939 *
2940 * This will mask all the bits from the initial ~0UL mask that can't
2941 * be encoded in either the swp_entry_t or the architecture definition
2942 * of a swap pte.
2943 */
2944unsigned long generic_max_swapfile_size(void)
2945{
2946	return swp_offset(pte_to_swp_entry(
2947			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2948}
2949
2950/* Can be overridden by an architecture for additional checks. */
2951__weak unsigned long max_swapfile_size(void)
2952{
2953	return generic_max_swapfile_size();
2954}
2955
2956static unsigned long read_swap_header(struct swap_info_struct *p,
2957					union swap_header *swap_header,
2958					struct inode *inode)
2959{
2960	int i;
2961	unsigned long maxpages;
2962	unsigned long swapfilepages;
2963	unsigned long last_page;
2964
2965	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2966		pr_err("Unable to find swap-space signature\n");
2967		return 0;
2968	}
2969
2970	/* swap partition endianness hack... */
2971	if (swab32(swap_header->info.version) == 1) {
2972		swab32s(&swap_header->info.version);
2973		swab32s(&swap_header->info.last_page);
2974		swab32s(&swap_header->info.nr_badpages);
2975		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2976			return 0;
2977		for (i = 0; i < swap_header->info.nr_badpages; i++)
2978			swab32s(&swap_header->info.badpages[i]);
2979	}
2980	/* Check the swap header's sub-version */
2981	if (swap_header->info.version != 1) {
2982		pr_warn("Unable to handle swap header version %d\n",
2983			swap_header->info.version);
2984		return 0;
2985	}
2986
2987	p->lowest_bit  = 1;
2988	p->cluster_next = 1;
2989	p->cluster_nr = 0;
2990
2991	maxpages = max_swapfile_size();
2992	last_page = swap_header->info.last_page;
2993	if (!last_page) {
2994		pr_warn("Empty swap-file\n");
2995		return 0;
2996	}
2997	if (last_page > maxpages) {
2998		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2999			maxpages << (PAGE_SHIFT - 10),
3000			last_page << (PAGE_SHIFT - 10));
3001	}
3002	if (maxpages > last_page) {
3003		maxpages = last_page + 1;
3004		/* p->max is an unsigned int: don't overflow it */
3005		if ((unsigned int)maxpages == 0)
3006			maxpages = UINT_MAX;
3007	}
3008	p->highest_bit = maxpages - 1;
3009
3010	if (!maxpages)
3011		return 0;
3012	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3013	if (swapfilepages && maxpages > swapfilepages) {
3014		pr_warn("Swap area shorter than signature indicates\n");
3015		return 0;
3016	}
3017	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
3018		return 0;
3019	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3020		return 0;
3021
3022	return maxpages;
3023}
3024
3025#define SWAP_CLUSTER_INFO_COLS						\
3026	DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3027#define SWAP_CLUSTER_SPACE_COLS						\
3028	DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3029#define SWAP_CLUSTER_COLS						\
3030	max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3031
3032static int setup_swap_map_and_extents(struct swap_info_struct *p,
3033					union swap_header *swap_header,
3034					unsigned char *swap_map,
3035					struct swap_cluster_info *cluster_info,
3036					unsigned long maxpages,
3037					sector_t *span)
3038{
3039	unsigned int j, k;
3040	unsigned int nr_good_pages;
3041	int nr_extents;
3042	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3043	unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
3044	unsigned long i, idx;
3045
3046	nr_good_pages = maxpages - 1;	/* omit header page */
3047
3048	cluster_list_init(&p->free_clusters);
3049	cluster_list_init(&p->discard_clusters);
3050
3051	for (i = 0; i < swap_header->info.nr_badpages; i++) {
3052		unsigned int page_nr = swap_header->info.badpages[i];
3053		if (page_nr == 0 || page_nr > swap_header->info.last_page)
3054			return -EINVAL;
3055		if (page_nr < maxpages) {
3056			swap_map[page_nr] = SWAP_MAP_BAD;
3057			nr_good_pages--;
3058			/*
3059			 * Haven't marked the cluster free yet, no list
3060			 * operation involved
3061			 */
3062			inc_cluster_info_page(p, cluster_info, page_nr);
3063		}
3064	}
3065
3066	/* Haven't marked the cluster free yet, no list operation involved */
3067	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3068		inc_cluster_info_page(p, cluster_info, i);
3069
3070	if (nr_good_pages) {
3071		swap_map[0] = SWAP_MAP_BAD;
3072		/*
3073		 * Not mark the cluster free yet, no list
3074		 * operation involved
3075		 */
3076		inc_cluster_info_page(p, cluster_info, 0);
3077		p->max = maxpages;
3078		p->pages = nr_good_pages;
3079		nr_extents = setup_swap_extents(p, span);
3080		if (nr_extents < 0)
3081			return nr_extents;
3082		nr_good_pages = p->pages;
3083	}
3084	if (!nr_good_pages) {
3085		pr_warn("Empty swap-file\n");
3086		return -EINVAL;
3087	}
3088
3089	if (!cluster_info)
3090		return nr_extents;
3091
3092
3093	/*
3094	 * Reduce false cache line sharing between cluster_info and
3095	 * sharing same address space.
3096	 */
3097	for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3098		j = (k + col) % SWAP_CLUSTER_COLS;
3099		for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3100			idx = i * SWAP_CLUSTER_COLS + j;
3101			if (idx >= nr_clusters)
3102				continue;
3103			if (cluster_count(&cluster_info[idx]))
3104				continue;
3105			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
3106			cluster_list_add_tail(&p->free_clusters, cluster_info,
3107					      idx);
3108		}
3109	}
3110	return nr_extents;
3111}
3112
3113/*
3114 * Helper to sys_swapon determining if a given swap
3115 * backing device queue supports DISCARD operations.
3116 */
3117static bool swap_discardable(struct swap_info_struct *si)
3118{
3119	struct request_queue *q = bdev_get_queue(si->bdev);
3120
3121	if (!q || !blk_queue_discard(q))
3122		return false;
3123
3124	return true;
3125}
3126
3127SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3128{
3129	struct swap_info_struct *p;
3130	struct filename *name;
3131	struct file *swap_file = NULL;
3132	struct address_space *mapping;
 
3133	int prio;
3134	int error;
3135	union swap_header *swap_header;
3136	int nr_extents;
3137	sector_t span;
3138	unsigned long maxpages;
3139	unsigned char *swap_map = NULL;
3140	struct swap_cluster_info *cluster_info = NULL;
3141	unsigned long *frontswap_map = NULL;
3142	struct page *page = NULL;
3143	struct inode *inode = NULL;
3144	bool inced_nr_rotate_swap = false;
3145
3146	if (swap_flags & ~SWAP_FLAGS_VALID)
3147		return -EINVAL;
3148
3149	if (!capable(CAP_SYS_ADMIN))
3150		return -EPERM;
3151
3152	if (!swap_avail_heads)
3153		return -ENOMEM;
3154
3155	p = alloc_swap_info();
3156	if (IS_ERR(p))
3157		return PTR_ERR(p);
3158
3159	INIT_WORK(&p->discard_work, swap_discard_work);
3160
3161	name = getname(specialfile);
3162	if (IS_ERR(name)) {
3163		error = PTR_ERR(name);
3164		name = NULL;
3165		goto bad_swap;
3166	}
3167	swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3168	if (IS_ERR(swap_file)) {
3169		error = PTR_ERR(swap_file);
3170		swap_file = NULL;
3171		goto bad_swap;
3172	}
3173
3174	p->swap_file = swap_file;
3175	mapping = swap_file->f_mapping;
 
3176	inode = mapping->host;
3177
3178	error = claim_swapfile(p, inode);
3179	if (unlikely(error))
3180		goto bad_swap;
3181
3182	inode_lock(inode);
 
 
 
 
3183	if (IS_SWAPFILE(inode)) {
3184		error = -EBUSY;
3185		goto bad_swap_unlock_inode;
3186	}
3187
3188	/*
3189	 * Read the swap header.
3190	 */
3191	if (!mapping->a_ops->readpage) {
3192		error = -EINVAL;
3193		goto bad_swap_unlock_inode;
3194	}
3195	page = read_mapping_page(mapping, 0, swap_file);
3196	if (IS_ERR(page)) {
3197		error = PTR_ERR(page);
3198		goto bad_swap_unlock_inode;
3199	}
3200	swap_header = kmap(page);
3201
3202	maxpages = read_swap_header(p, swap_header, inode);
3203	if (unlikely(!maxpages)) {
3204		error = -EINVAL;
3205		goto bad_swap_unlock_inode;
3206	}
3207
3208	/* OK, set up the swap map and apply the bad block list */
3209	swap_map = vzalloc(maxpages);
3210	if (!swap_map) {
3211		error = -ENOMEM;
3212		goto bad_swap_unlock_inode;
3213	}
3214
3215	if (p->bdev && blk_queue_stable_writes(p->bdev->bd_disk->queue))
3216		p->flags |= SWP_STABLE_WRITES;
3217
3218	if (p->bdev && p->bdev->bd_disk->fops->rw_page)
3219		p->flags |= SWP_SYNCHRONOUS_IO;
3220
3221	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
3222		int cpu;
3223		unsigned long ci, nr_cluster;
3224
3225		p->flags |= SWP_SOLIDSTATE;
3226		p->cluster_next_cpu = alloc_percpu(unsigned int);
3227		if (!p->cluster_next_cpu) {
3228			error = -ENOMEM;
3229			goto bad_swap_unlock_inode;
3230		}
3231		/*
3232		 * select a random position to start with to help wear leveling
3233		 * SSD
3234		 */
3235		for_each_possible_cpu(cpu) {
3236			per_cpu(*p->cluster_next_cpu, cpu) =
3237				1 + prandom_u32_max(p->highest_bit);
3238		}
3239		nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3240
3241		cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
3242					GFP_KERNEL);
3243		if (!cluster_info) {
3244			error = -ENOMEM;
3245			goto bad_swap_unlock_inode;
3246		}
3247
3248		for (ci = 0; ci < nr_cluster; ci++)
3249			spin_lock_init(&((cluster_info + ci)->lock));
3250
3251		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3252		if (!p->percpu_cluster) {
3253			error = -ENOMEM;
3254			goto bad_swap_unlock_inode;
3255		}
3256		for_each_possible_cpu(cpu) {
3257			struct percpu_cluster *cluster;
3258			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3259			cluster_set_null(&cluster->index);
3260		}
3261	} else {
3262		atomic_inc(&nr_rotate_swap);
3263		inced_nr_rotate_swap = true;
3264	}
3265
3266	error = swap_cgroup_swapon(p->type, maxpages);
3267	if (error)
3268		goto bad_swap_unlock_inode;
3269
3270	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3271		cluster_info, maxpages, &span);
3272	if (unlikely(nr_extents < 0)) {
3273		error = nr_extents;
3274		goto bad_swap_unlock_inode;
3275	}
3276	/* frontswap enabled? set up bit-per-page map for frontswap */
3277	if (IS_ENABLED(CONFIG_FRONTSWAP))
3278		frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
3279					 sizeof(long),
3280					 GFP_KERNEL);
3281
3282	if (p->bdev && (swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
 
3283		/*
3284		 * When discard is enabled for swap with no particular
3285		 * policy flagged, we set all swap discard flags here in
3286		 * order to sustain backward compatibility with older
3287		 * swapon(8) releases.
3288		 */
3289		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3290			     SWP_PAGE_DISCARD);
3291
3292		/*
3293		 * By flagging sys_swapon, a sysadmin can tell us to
3294		 * either do single-time area discards only, or to just
3295		 * perform discards for released swap page-clusters.
3296		 * Now it's time to adjust the p->flags accordingly.
3297		 */
3298		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3299			p->flags &= ~SWP_PAGE_DISCARD;
3300		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3301			p->flags &= ~SWP_AREA_DISCARD;
3302
3303		/* issue a swapon-time discard if it's still required */
3304		if (p->flags & SWP_AREA_DISCARD) {
3305			int err = discard_swap(p);
3306			if (unlikely(err))
3307				pr_err("swapon: discard_swap(%p): %d\n",
3308					p, err);
3309		}
3310	}
3311
3312	error = init_swap_address_space(p->type, maxpages);
3313	if (error)
3314		goto bad_swap_unlock_inode;
3315
3316	/*
3317	 * Flush any pending IO and dirty mappings before we start using this
3318	 * swap device.
3319	 */
3320	inode->i_flags |= S_SWAPFILE;
3321	error = inode_drain_writes(inode);
3322	if (error) {
3323		inode->i_flags &= ~S_SWAPFILE;
3324		goto free_swap_address_space;
3325	}
3326
3327	mutex_lock(&swapon_mutex);
3328	prio = -1;
3329	if (swap_flags & SWAP_FLAG_PREFER)
3330		prio =
3331		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3332	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
3333
3334	pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
3335		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
3336		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
3337		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3338		(p->flags & SWP_DISCARDABLE) ? "D" : "",
3339		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
3340		(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
3341		(frontswap_map) ? "FS" : "");
3342
3343	mutex_unlock(&swapon_mutex);
3344	atomic_inc(&proc_poll_event);
3345	wake_up_interruptible(&proc_poll_wait);
3346
3347	error = 0;
3348	goto out;
3349free_swap_address_space:
3350	exit_swap_address_space(p->type);
3351bad_swap_unlock_inode:
3352	inode_unlock(inode);
3353bad_swap:
3354	free_percpu(p->percpu_cluster);
3355	p->percpu_cluster = NULL;
3356	free_percpu(p->cluster_next_cpu);
3357	p->cluster_next_cpu = NULL;
3358	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
3359		set_blocksize(p->bdev, p->old_block_size);
3360		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3361	}
3362	inode = NULL;
3363	destroy_swap_extents(p);
3364	swap_cgroup_swapoff(p->type);
3365	spin_lock(&swap_lock);
3366	p->swap_file = NULL;
3367	p->flags = 0;
3368	spin_unlock(&swap_lock);
3369	vfree(swap_map);
3370	kvfree(cluster_info);
3371	kvfree(frontswap_map);
3372	if (inced_nr_rotate_swap)
3373		atomic_dec(&nr_rotate_swap);
3374	if (swap_file)
3375		filp_close(swap_file, NULL);
3376out:
3377	if (page && !IS_ERR(page)) {
3378		kunmap(page);
3379		put_page(page);
3380	}
3381	if (name)
3382		putname(name);
3383	if (inode)
3384		inode_unlock(inode);
3385	if (!error)
3386		enable_swap_slots_cache();
3387	return error;
3388}
3389
3390void si_swapinfo(struct sysinfo *val)
3391{
3392	unsigned int type;
3393	unsigned long nr_to_be_unused = 0;
3394
3395	spin_lock(&swap_lock);
3396	for (type = 0; type < nr_swapfiles; type++) {
3397		struct swap_info_struct *si = swap_info[type];
3398
3399		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3400			nr_to_be_unused += si->inuse_pages;
3401	}
3402	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3403	val->totalswap = total_swap_pages + nr_to_be_unused;
3404	spin_unlock(&swap_lock);
3405}
3406
3407/*
3408 * Verify that a swap entry is valid and increment its swap map count.
3409 *
3410 * Returns error code in following case.
3411 * - success -> 0
3412 * - swp_entry is invalid -> EINVAL
3413 * - swp_entry is migration entry -> EINVAL
3414 * - swap-cache reference is requested but there is already one. -> EEXIST
3415 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3416 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3417 */
3418static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3419{
3420	struct swap_info_struct *p;
3421	struct swap_cluster_info *ci;
3422	unsigned long offset;
3423	unsigned char count;
3424	unsigned char has_cache;
3425	int err;
3426
3427	p = get_swap_device(entry);
3428	if (!p)
3429		return -EINVAL;
3430
3431	offset = swp_offset(entry);
3432	ci = lock_cluster_or_swap_info(p, offset);
3433
3434	count = p->swap_map[offset];
3435
3436	/*
3437	 * swapin_readahead() doesn't check if a swap entry is valid, so the
3438	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3439	 */
3440	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3441		err = -ENOENT;
3442		goto unlock_out;
3443	}
3444
3445	has_cache = count & SWAP_HAS_CACHE;
3446	count &= ~SWAP_HAS_CACHE;
3447	err = 0;
3448
3449	if (usage == SWAP_HAS_CACHE) {
3450
3451		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
3452		if (!has_cache && count)
3453			has_cache = SWAP_HAS_CACHE;
3454		else if (has_cache)		/* someone else added cache */
3455			err = -EEXIST;
3456		else				/* no users remaining */
3457			err = -ENOENT;
3458
3459	} else if (count || has_cache) {
3460
3461		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3462			count += usage;
3463		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3464			err = -EINVAL;
3465		else if (swap_count_continued(p, offset, count))
3466			count = COUNT_CONTINUED;
3467		else
3468			err = -ENOMEM;
3469	} else
3470		err = -ENOENT;			/* unused swap entry */
3471
3472	WRITE_ONCE(p->swap_map[offset], count | has_cache);
3473
3474unlock_out:
3475	unlock_cluster_or_swap_info(p, ci);
3476	if (p)
3477		put_swap_device(p);
3478	return err;
3479}
3480
3481/*
3482 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3483 * (in which case its reference count is never incremented).
3484 */
3485void swap_shmem_alloc(swp_entry_t entry)
3486{
3487	__swap_duplicate(entry, SWAP_MAP_SHMEM);
3488}
3489
3490/*
3491 * Increase reference count of swap entry by 1.
3492 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3493 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3494 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3495 * might occur if a page table entry has got corrupted.
3496 */
3497int swap_duplicate(swp_entry_t entry)
3498{
3499	int err = 0;
3500
3501	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3502		err = add_swap_count_continuation(entry, GFP_ATOMIC);
3503	return err;
3504}
3505
3506/*
3507 * @entry: swap entry for which we allocate swap cache.
3508 *
3509 * Called when allocating swap cache for existing swap entry,
3510 * This can return error codes. Returns 0 at success.
3511 * -EEXIST means there is a swap cache.
3512 * Note: return code is different from swap_duplicate().
3513 */
3514int swapcache_prepare(swp_entry_t entry)
3515{
3516	return __swap_duplicate(entry, SWAP_HAS_CACHE);
3517}
3518
3519struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3520{
3521	return swap_type_to_swap_info(swp_type(entry));
3522}
3523
3524struct swap_info_struct *page_swap_info(struct page *page)
3525{
3526	swp_entry_t entry = { .val = page_private(page) };
3527	return swp_swap_info(entry);
3528}
3529
3530/*
3531 * out-of-line __page_file_ methods to avoid include hell.
3532 */
3533struct address_space *__page_file_mapping(struct page *page)
3534{
3535	return page_swap_info(page)->swap_file->f_mapping;
3536}
3537EXPORT_SYMBOL_GPL(__page_file_mapping);
3538
3539pgoff_t __page_file_index(struct page *page)
3540{
3541	swp_entry_t swap = { .val = page_private(page) };
3542	return swp_offset(swap);
3543}
3544EXPORT_SYMBOL_GPL(__page_file_index);
3545
3546/*
3547 * add_swap_count_continuation - called when a swap count is duplicated
3548 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3549 * page of the original vmalloc'ed swap_map, to hold the continuation count
3550 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3551 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3552 *
3553 * These continuation pages are seldom referenced: the common paths all work
3554 * on the original swap_map, only referring to a continuation page when the
3555 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3556 *
3557 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3558 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3559 * can be called after dropping locks.
3560 */
3561int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3562{
3563	struct swap_info_struct *si;
3564	struct swap_cluster_info *ci;
3565	struct page *head;
3566	struct page *page;
3567	struct page *list_page;
3568	pgoff_t offset;
3569	unsigned char count;
3570	int ret = 0;
3571
3572	/*
3573	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3574	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3575	 */
3576	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3577
3578	si = get_swap_device(entry);
3579	if (!si) {
3580		/*
3581		 * An acceptable race has occurred since the failing
3582		 * __swap_duplicate(): the swap device may be swapoff
3583		 */
3584		goto outer;
3585	}
3586	spin_lock(&si->lock);
3587
3588	offset = swp_offset(entry);
3589
3590	ci = lock_cluster(si, offset);
3591
3592	count = swap_count(si->swap_map[offset]);
3593
3594	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3595		/*
3596		 * The higher the swap count, the more likely it is that tasks
3597		 * will race to add swap count continuation: we need to avoid
3598		 * over-provisioning.
3599		 */
3600		goto out;
3601	}
3602
3603	if (!page) {
3604		ret = -ENOMEM;
3605		goto out;
3606	}
3607
3608	/*
3609	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
3610	 * no architecture is using highmem pages for kernel page tables: so it
3611	 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
3612	 */
3613	head = vmalloc_to_page(si->swap_map + offset);
3614	offset &= ~PAGE_MASK;
3615
3616	spin_lock(&si->cont_lock);
3617	/*
3618	 * Page allocation does not initialize the page's lru field,
3619	 * but it does always reset its private field.
3620	 */
3621	if (!page_private(head)) {
3622		BUG_ON(count & COUNT_CONTINUED);
3623		INIT_LIST_HEAD(&head->lru);
3624		set_page_private(head, SWP_CONTINUED);
3625		si->flags |= SWP_CONTINUED;
3626	}
3627
3628	list_for_each_entry(list_page, &head->lru, lru) {
3629		unsigned char *map;
3630
3631		/*
3632		 * If the previous map said no continuation, but we've found
3633		 * a continuation page, free our allocation and use this one.
3634		 */
3635		if (!(count & COUNT_CONTINUED))
3636			goto out_unlock_cont;
3637
3638		map = kmap_atomic(list_page) + offset;
3639		count = *map;
3640		kunmap_atomic(map);
3641
3642		/*
3643		 * If this continuation count now has some space in it,
3644		 * free our allocation and use this one.
3645		 */
3646		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3647			goto out_unlock_cont;
3648	}
3649
3650	list_add_tail(&page->lru, &head->lru);
3651	page = NULL;			/* now it's attached, don't free it */
3652out_unlock_cont:
3653	spin_unlock(&si->cont_lock);
3654out:
3655	unlock_cluster(ci);
3656	spin_unlock(&si->lock);
3657	put_swap_device(si);
3658outer:
3659	if (page)
3660		__free_page(page);
3661	return ret;
3662}
3663
3664/*
3665 * swap_count_continued - when the original swap_map count is incremented
3666 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3667 * into, carry if so, or else fail until a new continuation page is allocated;
3668 * when the original swap_map count is decremented from 0 with continuation,
3669 * borrow from the continuation and report whether it still holds more.
3670 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3671 * lock.
3672 */
3673static bool swap_count_continued(struct swap_info_struct *si,
3674				 pgoff_t offset, unsigned char count)
3675{
3676	struct page *head;
3677	struct page *page;
3678	unsigned char *map;
3679	bool ret;
3680
3681	head = vmalloc_to_page(si->swap_map + offset);
3682	if (page_private(head) != SWP_CONTINUED) {
3683		BUG_ON(count & COUNT_CONTINUED);
3684		return false;		/* need to add count continuation */
3685	}
3686
3687	spin_lock(&si->cont_lock);
3688	offset &= ~PAGE_MASK;
3689	page = list_next_entry(head, lru);
3690	map = kmap_atomic(page) + offset;
3691
3692	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
3693		goto init_map;		/* jump over SWAP_CONT_MAX checks */
3694
3695	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3696		/*
3697		 * Think of how you add 1 to 999
3698		 */
3699		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3700			kunmap_atomic(map);
3701			page = list_next_entry(page, lru);
3702			BUG_ON(page == head);
3703			map = kmap_atomic(page) + offset;
3704		}
3705		if (*map == SWAP_CONT_MAX) {
3706			kunmap_atomic(map);
3707			page = list_next_entry(page, lru);
3708			if (page == head) {
3709				ret = false;	/* add count continuation */
3710				goto out;
3711			}
3712			map = kmap_atomic(page) + offset;
3713init_map:		*map = 0;		/* we didn't zero the page */
3714		}
3715		*map += 1;
3716		kunmap_atomic(map);
3717		while ((page = list_prev_entry(page, lru)) != head) {
3718			map = kmap_atomic(page) + offset;
3719			*map = COUNT_CONTINUED;
3720			kunmap_atomic(map);
3721		}
3722		ret = true;			/* incremented */
3723
3724	} else {				/* decrementing */
3725		/*
3726		 * Think of how you subtract 1 from 1000
3727		 */
3728		BUG_ON(count != COUNT_CONTINUED);
3729		while (*map == COUNT_CONTINUED) {
3730			kunmap_atomic(map);
3731			page = list_next_entry(page, lru);
3732			BUG_ON(page == head);
3733			map = kmap_atomic(page) + offset;
3734		}
3735		BUG_ON(*map == 0);
3736		*map -= 1;
3737		if (*map == 0)
3738			count = 0;
3739		kunmap_atomic(map);
3740		while ((page = list_prev_entry(page, lru)) != head) {
3741			map = kmap_atomic(page) + offset;
3742			*map = SWAP_CONT_MAX | count;
3743			count = COUNT_CONTINUED;
3744			kunmap_atomic(map);
3745		}
3746		ret = count == COUNT_CONTINUED;
3747	}
3748out:
3749	spin_unlock(&si->cont_lock);
3750	return ret;
3751}
3752
3753/*
3754 * free_swap_count_continuations - swapoff free all the continuation pages
3755 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3756 */
3757static void free_swap_count_continuations(struct swap_info_struct *si)
3758{
3759	pgoff_t offset;
3760
3761	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3762		struct page *head;
3763		head = vmalloc_to_page(si->swap_map + offset);
3764		if (page_private(head)) {
3765			struct page *page, *next;
3766
3767			list_for_each_entry_safe(page, next, &head->lru, lru) {
3768				list_del(&page->lru);
3769				__free_page(page);
3770			}
3771		}
3772	}
3773}
3774
3775#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3776void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
3777{
3778	struct swap_info_struct *si, *next;
3779	int nid = page_to_nid(page);
3780
3781	if (!(gfp_mask & __GFP_IO))
3782		return;
3783
3784	if (!blk_cgroup_congested())
3785		return;
3786
3787	/*
3788	 * We've already scheduled a throttle, avoid taking the global swap
3789	 * lock.
3790	 */
3791	if (current->throttle_queue)
3792		return;
3793
3794	spin_lock(&swap_avail_lock);
3795	plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3796				  avail_lists[nid]) {
3797		if (si->bdev) {
3798			blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);
3799			break;
3800		}
3801	}
3802	spin_unlock(&swap_avail_lock);
3803}
3804#endif
3805
3806static int __init swapfile_init(void)
3807{
3808	int nid;
3809
3810	swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3811					 GFP_KERNEL);
3812	if (!swap_avail_heads) {
3813		pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3814		return -ENOMEM;
3815	}
3816
3817	for_each_node(nid)
3818		plist_head_init(&swap_avail_heads[nid]);
 
 
 
 
 
 
 
3819
3820	return 0;
3821}
3822subsys_initcall(swapfile_init);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/swapfile.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *  Swap reorganised 29.12.95, Stephen Tweedie
   7 */
   8
   9#include <linux/blkdev.h>
  10#include <linux/mm.h>
  11#include <linux/sched/mm.h>
  12#include <linux/sched/task.h>
  13#include <linux/hugetlb.h>
  14#include <linux/mman.h>
  15#include <linux/slab.h>
  16#include <linux/kernel_stat.h>
  17#include <linux/swap.h>
  18#include <linux/vmalloc.h>
  19#include <linux/pagemap.h>
  20#include <linux/namei.h>
  21#include <linux/shmem_fs.h>
  22#include <linux/blk-cgroup.h>
  23#include <linux/random.h>
  24#include <linux/writeback.h>
  25#include <linux/proc_fs.h>
  26#include <linux/seq_file.h>
  27#include <linux/init.h>
  28#include <linux/ksm.h>
  29#include <linux/rmap.h>
  30#include <linux/security.h>
  31#include <linux/backing-dev.h>
  32#include <linux/mutex.h>
  33#include <linux/capability.h>
  34#include <linux/syscalls.h>
  35#include <linux/memcontrol.h>
  36#include <linux/poll.h>
  37#include <linux/oom.h>
  38#include <linux/frontswap.h>
  39#include <linux/swapfile.h>
  40#include <linux/export.h>
  41#include <linux/swap_slots.h>
  42#include <linux/sort.h>
  43#include <linux/completion.h>
  44
  45#include <asm/tlbflush.h>
  46#include <linux/swapops.h>
  47#include <linux/swap_cgroup.h>
  48#include "swap.h"
  49
  50static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  51				 unsigned char);
  52static void free_swap_count_continuations(struct swap_info_struct *);
  53
  54static DEFINE_SPINLOCK(swap_lock);
  55static unsigned int nr_swapfiles;
  56atomic_long_t nr_swap_pages;
  57/*
  58 * Some modules use swappable objects and may try to swap them out under
  59 * memory pressure (via the shrinker). Before doing so, they may wish to
  60 * check to see if any swap space is available.
  61 */
  62EXPORT_SYMBOL_GPL(nr_swap_pages);
  63/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  64long total_swap_pages;
  65static int least_priority = -1;
  66unsigned long swapfile_maximum_size;
  67#ifdef CONFIG_MIGRATION
  68bool swap_migration_ad_supported;
  69#endif	/* CONFIG_MIGRATION */
  70
  71static const char Bad_file[] = "Bad swap file entry ";
  72static const char Unused_file[] = "Unused swap file entry ";
  73static const char Bad_offset[] = "Bad swap offset entry ";
  74static const char Unused_offset[] = "Unused swap offset entry ";
  75
  76/*
  77 * all active swap_info_structs
  78 * protected with swap_lock, and ordered by priority.
  79 */
  80static PLIST_HEAD(swap_active_head);
  81
  82/*
  83 * all available (active, not full) swap_info_structs
  84 * protected with swap_avail_lock, ordered by priority.
  85 * This is used by folio_alloc_swap() instead of swap_active_head
  86 * because swap_active_head includes all swap_info_structs,
  87 * but folio_alloc_swap() doesn't need to look at full ones.
  88 * This uses its own lock instead of swap_lock because when a
  89 * swap_info_struct changes between not-full/full, it needs to
  90 * add/remove itself to/from this list, but the swap_info_struct->lock
  91 * is held and the locking order requires swap_lock to be taken
  92 * before any swap_info_struct->lock.
  93 */
  94static struct plist_head *swap_avail_heads;
  95static DEFINE_SPINLOCK(swap_avail_lock);
  96
  97struct swap_info_struct *swap_info[MAX_SWAPFILES];
  98
  99static DEFINE_MUTEX(swapon_mutex);
 100
 101static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
 102/* Activity counter to indicate that a swapon or swapoff has occurred */
 103static atomic_t proc_poll_event = ATOMIC_INIT(0);
 104
 105atomic_t nr_rotate_swap = ATOMIC_INIT(0);
 106
 107static struct swap_info_struct *swap_type_to_swap_info(int type)
 108{
 109	if (type >= MAX_SWAPFILES)
 110		return NULL;
 111
 112	return READ_ONCE(swap_info[type]); /* rcu_dereference() */
 113}
 114
 115static inline unsigned char swap_count(unsigned char ent)
 116{
 117	return ent & ~SWAP_HAS_CACHE;	/* may include COUNT_CONTINUED flag */
 118}
 119
 120/* Reclaim the swap entry anyway if possible */
 121#define TTRS_ANYWAY		0x1
 122/*
 123 * Reclaim the swap entry if there are no more mappings of the
 124 * corresponding page
 125 */
 126#define TTRS_UNMAPPED		0x2
 127/* Reclaim the swap entry if swap is getting full*/
 128#define TTRS_FULL		0x4
 129
 130/* returns 1 if swap entry is freed */
 131static int __try_to_reclaim_swap(struct swap_info_struct *si,
 132				 unsigned long offset, unsigned long flags)
 133{
 134	swp_entry_t entry = swp_entry(si->type, offset);
 135	struct folio *folio;
 136	int ret = 0;
 137
 138	folio = filemap_get_folio(swap_address_space(entry), offset);
 139	if (!folio)
 140		return 0;
 141	/*
 142	 * When this function is called from scan_swap_map_slots() and it's
 143	 * called by vmscan.c at reclaiming folios. So we hold a folio lock
 144	 * here. We have to use trylock for avoiding deadlock. This is a special
 145	 * case and you should use folio_free_swap() with explicit folio_lock()
 146	 * in usual operations.
 147	 */
 148	if (folio_trylock(folio)) {
 149		if ((flags & TTRS_ANYWAY) ||
 150		    ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
 151		    ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)))
 152			ret = folio_free_swap(folio);
 153		folio_unlock(folio);
 154	}
 155	folio_put(folio);
 156	return ret;
 157}
 158
 159static inline struct swap_extent *first_se(struct swap_info_struct *sis)
 160{
 161	struct rb_node *rb = rb_first(&sis->swap_extent_root);
 162	return rb_entry(rb, struct swap_extent, rb_node);
 163}
 164
 165static inline struct swap_extent *next_se(struct swap_extent *se)
 166{
 167	struct rb_node *rb = rb_next(&se->rb_node);
 168	return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
 169}
 170
 171/*
 172 * swapon tell device that all the old swap contents can be discarded,
 173 * to allow the swap device to optimize its wear-levelling.
 174 */
 175static int discard_swap(struct swap_info_struct *si)
 176{
 177	struct swap_extent *se;
 178	sector_t start_block;
 179	sector_t nr_blocks;
 180	int err = 0;
 181
 182	/* Do not discard the swap header page! */
 183	se = first_se(si);
 184	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
 185	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
 186	if (nr_blocks) {
 187		err = blkdev_issue_discard(si->bdev, start_block,
 188				nr_blocks, GFP_KERNEL);
 189		if (err)
 190			return err;
 191		cond_resched();
 192	}
 193
 194	for (se = next_se(se); se; se = next_se(se)) {
 195		start_block = se->start_block << (PAGE_SHIFT - 9);
 196		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 197
 198		err = blkdev_issue_discard(si->bdev, start_block,
 199				nr_blocks, GFP_KERNEL);
 200		if (err)
 201			break;
 202
 203		cond_resched();
 204	}
 205	return err;		/* That will often be -EOPNOTSUPP */
 206}
 207
 208static struct swap_extent *
 209offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
 210{
 211	struct swap_extent *se;
 212	struct rb_node *rb;
 213
 214	rb = sis->swap_extent_root.rb_node;
 215	while (rb) {
 216		se = rb_entry(rb, struct swap_extent, rb_node);
 217		if (offset < se->start_page)
 218			rb = rb->rb_left;
 219		else if (offset >= se->start_page + se->nr_pages)
 220			rb = rb->rb_right;
 221		else
 222			return se;
 223	}
 224	/* It *must* be present */
 225	BUG();
 226}
 227
 228sector_t swap_page_sector(struct page *page)
 229{
 230	struct swap_info_struct *sis = page_swap_info(page);
 231	struct swap_extent *se;
 232	sector_t sector;
 233	pgoff_t offset;
 234
 235	offset = __page_file_index(page);
 236	se = offset_to_swap_extent(sis, offset);
 237	sector = se->start_block + (offset - se->start_page);
 238	return sector << (PAGE_SHIFT - 9);
 239}
 240
 241/*
 242 * swap allocation tell device that a cluster of swap can now be discarded,
 243 * to allow the swap device to optimize its wear-levelling.
 244 */
 245static void discard_swap_cluster(struct swap_info_struct *si,
 246				 pgoff_t start_page, pgoff_t nr_pages)
 247{
 248	struct swap_extent *se = offset_to_swap_extent(si, start_page);
 249
 250	while (nr_pages) {
 251		pgoff_t offset = start_page - se->start_page;
 252		sector_t start_block = se->start_block + offset;
 253		sector_t nr_blocks = se->nr_pages - offset;
 254
 255		if (nr_blocks > nr_pages)
 256			nr_blocks = nr_pages;
 257		start_page += nr_blocks;
 258		nr_pages -= nr_blocks;
 259
 260		start_block <<= PAGE_SHIFT - 9;
 261		nr_blocks <<= PAGE_SHIFT - 9;
 262		if (blkdev_issue_discard(si->bdev, start_block,
 263					nr_blocks, GFP_NOIO))
 264			break;
 265
 266		se = next_se(se);
 267	}
 268}
 269
 270#ifdef CONFIG_THP_SWAP
 271#define SWAPFILE_CLUSTER	HPAGE_PMD_NR
 272
 273#define swap_entry_size(size)	(size)
 274#else
 275#define SWAPFILE_CLUSTER	256
 276
 277/*
 278 * Define swap_entry_size() as constant to let compiler to optimize
 279 * out some code if !CONFIG_THP_SWAP
 280 */
 281#define swap_entry_size(size)	1
 282#endif
 283#define LATENCY_LIMIT		256
 284
 285static inline void cluster_set_flag(struct swap_cluster_info *info,
 286	unsigned int flag)
 287{
 288	info->flags = flag;
 289}
 290
 291static inline unsigned int cluster_count(struct swap_cluster_info *info)
 292{
 293	return info->data;
 294}
 295
 296static inline void cluster_set_count(struct swap_cluster_info *info,
 297				     unsigned int c)
 298{
 299	info->data = c;
 300}
 301
 302static inline void cluster_set_count_flag(struct swap_cluster_info *info,
 303					 unsigned int c, unsigned int f)
 304{
 305	info->flags = f;
 306	info->data = c;
 307}
 308
 309static inline unsigned int cluster_next(struct swap_cluster_info *info)
 310{
 311	return info->data;
 312}
 313
 314static inline void cluster_set_next(struct swap_cluster_info *info,
 315				    unsigned int n)
 316{
 317	info->data = n;
 318}
 319
 320static inline void cluster_set_next_flag(struct swap_cluster_info *info,
 321					 unsigned int n, unsigned int f)
 322{
 323	info->flags = f;
 324	info->data = n;
 325}
 326
 327static inline bool cluster_is_free(struct swap_cluster_info *info)
 328{
 329	return info->flags & CLUSTER_FLAG_FREE;
 330}
 331
 332static inline bool cluster_is_null(struct swap_cluster_info *info)
 333{
 334	return info->flags & CLUSTER_FLAG_NEXT_NULL;
 335}
 336
 337static inline void cluster_set_null(struct swap_cluster_info *info)
 338{
 339	info->flags = CLUSTER_FLAG_NEXT_NULL;
 340	info->data = 0;
 341}
 342
 343static inline bool cluster_is_huge(struct swap_cluster_info *info)
 344{
 345	if (IS_ENABLED(CONFIG_THP_SWAP))
 346		return info->flags & CLUSTER_FLAG_HUGE;
 347	return false;
 348}
 349
 350static inline void cluster_clear_huge(struct swap_cluster_info *info)
 351{
 352	info->flags &= ~CLUSTER_FLAG_HUGE;
 353}
 354
 355static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
 356						     unsigned long offset)
 357{
 358	struct swap_cluster_info *ci;
 359
 360	ci = si->cluster_info;
 361	if (ci) {
 362		ci += offset / SWAPFILE_CLUSTER;
 363		spin_lock(&ci->lock);
 364	}
 365	return ci;
 366}
 367
 368static inline void unlock_cluster(struct swap_cluster_info *ci)
 369{
 370	if (ci)
 371		spin_unlock(&ci->lock);
 372}
 373
 374/*
 375 * Determine the locking method in use for this device.  Return
 376 * swap_cluster_info if SSD-style cluster-based locking is in place.
 377 */
 378static inline struct swap_cluster_info *lock_cluster_or_swap_info(
 379		struct swap_info_struct *si, unsigned long offset)
 380{
 381	struct swap_cluster_info *ci;
 382
 383	/* Try to use fine-grained SSD-style locking if available: */
 384	ci = lock_cluster(si, offset);
 385	/* Otherwise, fall back to traditional, coarse locking: */
 386	if (!ci)
 387		spin_lock(&si->lock);
 388
 389	return ci;
 390}
 391
 392static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
 393					       struct swap_cluster_info *ci)
 394{
 395	if (ci)
 396		unlock_cluster(ci);
 397	else
 398		spin_unlock(&si->lock);
 399}
 400
 401static inline bool cluster_list_empty(struct swap_cluster_list *list)
 402{
 403	return cluster_is_null(&list->head);
 404}
 405
 406static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
 407{
 408	return cluster_next(&list->head);
 409}
 410
 411static void cluster_list_init(struct swap_cluster_list *list)
 412{
 413	cluster_set_null(&list->head);
 414	cluster_set_null(&list->tail);
 415}
 416
 417static void cluster_list_add_tail(struct swap_cluster_list *list,
 418				  struct swap_cluster_info *ci,
 419				  unsigned int idx)
 420{
 421	if (cluster_list_empty(list)) {
 422		cluster_set_next_flag(&list->head, idx, 0);
 423		cluster_set_next_flag(&list->tail, idx, 0);
 424	} else {
 425		struct swap_cluster_info *ci_tail;
 426		unsigned int tail = cluster_next(&list->tail);
 427
 428		/*
 429		 * Nested cluster lock, but both cluster locks are
 430		 * only acquired when we held swap_info_struct->lock
 431		 */
 432		ci_tail = ci + tail;
 433		spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
 434		cluster_set_next(ci_tail, idx);
 435		spin_unlock(&ci_tail->lock);
 436		cluster_set_next_flag(&list->tail, idx, 0);
 437	}
 438}
 439
 440static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
 441					   struct swap_cluster_info *ci)
 442{
 443	unsigned int idx;
 444
 445	idx = cluster_next(&list->head);
 446	if (cluster_next(&list->tail) == idx) {
 447		cluster_set_null(&list->head);
 448		cluster_set_null(&list->tail);
 449	} else
 450		cluster_set_next_flag(&list->head,
 451				      cluster_next(&ci[idx]), 0);
 452
 453	return idx;
 454}
 455
 456/* Add a cluster to discard list and schedule it to do discard */
 457static void swap_cluster_schedule_discard(struct swap_info_struct *si,
 458		unsigned int idx)
 459{
 460	/*
 461	 * If scan_swap_map_slots() can't find a free cluster, it will check
 462	 * si->swap_map directly. To make sure the discarding cluster isn't
 463	 * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
 464	 * It will be cleared after discard
 465	 */
 466	memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 467			SWAP_MAP_BAD, SWAPFILE_CLUSTER);
 468
 469	cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
 470
 471	schedule_work(&si->discard_work);
 472}
 473
 474static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
 475{
 476	struct swap_cluster_info *ci = si->cluster_info;
 477
 478	cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
 479	cluster_list_add_tail(&si->free_clusters, ci, idx);
 480}
 481
 482/*
 483 * Doing discard actually. After a cluster discard is finished, the cluster
 484 * will be added to free cluster list. caller should hold si->lock.
 485*/
 486static void swap_do_scheduled_discard(struct swap_info_struct *si)
 487{
 488	struct swap_cluster_info *info, *ci;
 489	unsigned int idx;
 490
 491	info = si->cluster_info;
 492
 493	while (!cluster_list_empty(&si->discard_clusters)) {
 494		idx = cluster_list_del_first(&si->discard_clusters, info);
 495		spin_unlock(&si->lock);
 496
 497		discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
 498				SWAPFILE_CLUSTER);
 499
 500		spin_lock(&si->lock);
 501		ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
 502		__free_cluster(si, idx);
 503		memset(si->swap_map + idx * SWAPFILE_CLUSTER,
 504				0, SWAPFILE_CLUSTER);
 505		unlock_cluster(ci);
 506	}
 507}
 508
 509static void swap_discard_work(struct work_struct *work)
 510{
 511	struct swap_info_struct *si;
 512
 513	si = container_of(work, struct swap_info_struct, discard_work);
 514
 515	spin_lock(&si->lock);
 516	swap_do_scheduled_discard(si);
 517	spin_unlock(&si->lock);
 518}
 519
 520static void swap_users_ref_free(struct percpu_ref *ref)
 521{
 522	struct swap_info_struct *si;
 523
 524	si = container_of(ref, struct swap_info_struct, users);
 525	complete(&si->comp);
 526}
 527
 528static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
 529{
 530	struct swap_cluster_info *ci = si->cluster_info;
 531
 532	VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
 533	cluster_list_del_first(&si->free_clusters, ci);
 534	cluster_set_count_flag(ci + idx, 0, 0);
 535}
 536
 537static void free_cluster(struct swap_info_struct *si, unsigned long idx)
 538{
 539	struct swap_cluster_info *ci = si->cluster_info + idx;
 540
 541	VM_BUG_ON(cluster_count(ci) != 0);
 542	/*
 543	 * If the swap is discardable, prepare discard the cluster
 544	 * instead of free it immediately. The cluster will be freed
 545	 * after discard.
 546	 */
 547	if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
 548	    (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
 549		swap_cluster_schedule_discard(si, idx);
 550		return;
 551	}
 552
 553	__free_cluster(si, idx);
 554}
 555
 556/*
 557 * The cluster corresponding to page_nr will be used. The cluster will be
 558 * removed from free cluster list and its usage counter will be increased.
 559 */
 560static void inc_cluster_info_page(struct swap_info_struct *p,
 561	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 562{
 563	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 564
 565	if (!cluster_info)
 566		return;
 567	if (cluster_is_free(&cluster_info[idx]))
 568		alloc_cluster(p, idx);
 569
 570	VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
 571	cluster_set_count(&cluster_info[idx],
 572		cluster_count(&cluster_info[idx]) + 1);
 573}
 574
 575/*
 576 * The cluster corresponding to page_nr decreases one usage. If the usage
 577 * counter becomes 0, which means no page in the cluster is in using, we can
 578 * optionally discard the cluster and add it to free cluster list.
 579 */
 580static void dec_cluster_info_page(struct swap_info_struct *p,
 581	struct swap_cluster_info *cluster_info, unsigned long page_nr)
 582{
 583	unsigned long idx = page_nr / SWAPFILE_CLUSTER;
 584
 585	if (!cluster_info)
 586		return;
 587
 588	VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
 589	cluster_set_count(&cluster_info[idx],
 590		cluster_count(&cluster_info[idx]) - 1);
 591
 592	if (cluster_count(&cluster_info[idx]) == 0)
 593		free_cluster(p, idx);
 594}
 595
 596/*
 597 * It's possible scan_swap_map_slots() uses a free cluster in the middle of free
 598 * cluster list. Avoiding such abuse to avoid list corruption.
 599 */
 600static bool
 601scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
 602	unsigned long offset)
 603{
 604	struct percpu_cluster *percpu_cluster;
 605	bool conflict;
 606
 607	offset /= SWAPFILE_CLUSTER;
 608	conflict = !cluster_list_empty(&si->free_clusters) &&
 609		offset != cluster_list_first(&si->free_clusters) &&
 610		cluster_is_free(&si->cluster_info[offset]);
 611
 612	if (!conflict)
 613		return false;
 614
 615	percpu_cluster = this_cpu_ptr(si->percpu_cluster);
 616	cluster_set_null(&percpu_cluster->index);
 617	return true;
 618}
 619
 620/*
 621 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
 622 * might involve allocating a new cluster for current CPU too.
 623 */
 624static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
 625	unsigned long *offset, unsigned long *scan_base)
 626{
 627	struct percpu_cluster *cluster;
 628	struct swap_cluster_info *ci;
 629	unsigned long tmp, max;
 630
 631new_cluster:
 632	cluster = this_cpu_ptr(si->percpu_cluster);
 633	if (cluster_is_null(&cluster->index)) {
 634		if (!cluster_list_empty(&si->free_clusters)) {
 635			cluster->index = si->free_clusters.head;
 636			cluster->next = cluster_next(&cluster->index) *
 637					SWAPFILE_CLUSTER;
 638		} else if (!cluster_list_empty(&si->discard_clusters)) {
 639			/*
 640			 * we don't have free cluster but have some clusters in
 641			 * discarding, do discard now and reclaim them, then
 642			 * reread cluster_next_cpu since we dropped si->lock
 643			 */
 644			swap_do_scheduled_discard(si);
 645			*scan_base = this_cpu_read(*si->cluster_next_cpu);
 646			*offset = *scan_base;
 647			goto new_cluster;
 648		} else
 649			return false;
 650	}
 651
 652	/*
 653	 * Other CPUs can use our cluster if they can't find a free cluster,
 654	 * check if there is still free entry in the cluster
 655	 */
 656	tmp = cluster->next;
 657	max = min_t(unsigned long, si->max,
 658		    (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
 659	if (tmp < max) {
 660		ci = lock_cluster(si, tmp);
 661		while (tmp < max) {
 662			if (!si->swap_map[tmp])
 663				break;
 664			tmp++;
 665		}
 666		unlock_cluster(ci);
 667	}
 668	if (tmp >= max) {
 669		cluster_set_null(&cluster->index);
 670		goto new_cluster;
 671	}
 672	cluster->next = tmp + 1;
 673	*offset = tmp;
 674	*scan_base = tmp;
 675	return true;
 676}
 677
 678static void __del_from_avail_list(struct swap_info_struct *p)
 679{
 680	int nid;
 681
 682	for_each_node(nid)
 683		plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
 684}
 685
 686static void del_from_avail_list(struct swap_info_struct *p)
 687{
 688	spin_lock(&swap_avail_lock);
 689	__del_from_avail_list(p);
 690	spin_unlock(&swap_avail_lock);
 691}
 692
 693static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
 694			     unsigned int nr_entries)
 695{
 696	unsigned int end = offset + nr_entries - 1;
 697
 698	if (offset == si->lowest_bit)
 699		si->lowest_bit += nr_entries;
 700	if (end == si->highest_bit)
 701		WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
 702	WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries);
 703	if (si->inuse_pages == si->pages) {
 704		si->lowest_bit = si->max;
 705		si->highest_bit = 0;
 706		del_from_avail_list(si);
 707	}
 708}
 709
 710static void add_to_avail_list(struct swap_info_struct *p)
 711{
 712	int nid;
 713
 714	spin_lock(&swap_avail_lock);
 715	for_each_node(nid) {
 716		WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
 717		plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
 718	}
 719	spin_unlock(&swap_avail_lock);
 720}
 721
 722static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
 723			    unsigned int nr_entries)
 724{
 725	unsigned long begin = offset;
 726	unsigned long end = offset + nr_entries - 1;
 727	void (*swap_slot_free_notify)(struct block_device *, unsigned long);
 728
 729	if (offset < si->lowest_bit)
 730		si->lowest_bit = offset;
 731	if (end > si->highest_bit) {
 732		bool was_full = !si->highest_bit;
 733
 734		WRITE_ONCE(si->highest_bit, end);
 735		if (was_full && (si->flags & SWP_WRITEOK))
 736			add_to_avail_list(si);
 737	}
 738	atomic_long_add(nr_entries, &nr_swap_pages);
 739	WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
 740	if (si->flags & SWP_BLKDEV)
 741		swap_slot_free_notify =
 742			si->bdev->bd_disk->fops->swap_slot_free_notify;
 743	else
 744		swap_slot_free_notify = NULL;
 745	while (offset <= end) {
 746		arch_swap_invalidate_page(si->type, offset);
 747		frontswap_invalidate_page(si->type, offset);
 748		if (swap_slot_free_notify)
 749			swap_slot_free_notify(si->bdev, offset);
 750		offset++;
 751	}
 752	clear_shadow_from_swap_cache(si->type, begin, end);
 753}
 754
 755static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
 756{
 757	unsigned long prev;
 758
 759	if (!(si->flags & SWP_SOLIDSTATE)) {
 760		si->cluster_next = next;
 761		return;
 762	}
 763
 764	prev = this_cpu_read(*si->cluster_next_cpu);
 765	/*
 766	 * Cross the swap address space size aligned trunk, choose
 767	 * another trunk randomly to avoid lock contention on swap
 768	 * address space if possible.
 769	 */
 770	if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) !=
 771	    (next >> SWAP_ADDRESS_SPACE_SHIFT)) {
 772		/* No free swap slots available */
 773		if (si->highest_bit <= si->lowest_bit)
 774			return;
 775		next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit);
 
 776		next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
 777		next = max_t(unsigned int, next, si->lowest_bit);
 778	}
 779	this_cpu_write(*si->cluster_next_cpu, next);
 780}
 781
 782static bool swap_offset_available_and_locked(struct swap_info_struct *si,
 783					     unsigned long offset)
 784{
 785	if (data_race(!si->swap_map[offset])) {
 786		spin_lock(&si->lock);
 787		return true;
 788	}
 789
 790	if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
 791		spin_lock(&si->lock);
 792		return true;
 793	}
 794
 795	return false;
 796}
 797
 798static int scan_swap_map_slots(struct swap_info_struct *si,
 799			       unsigned char usage, int nr,
 800			       swp_entry_t slots[])
 801{
 802	struct swap_cluster_info *ci;
 803	unsigned long offset;
 804	unsigned long scan_base;
 805	unsigned long last_in_cluster = 0;
 806	int latency_ration = LATENCY_LIMIT;
 807	int n_ret = 0;
 808	bool scanned_many = false;
 809
 810	/*
 811	 * We try to cluster swap pages by allocating them sequentially
 812	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
 813	 * way, however, we resort to first-free allocation, starting
 814	 * a new cluster.  This prevents us from scattering swap pages
 815	 * all over the entire swap partition, so that we reduce
 816	 * overall disk seek times between swap pages.  -- sct
 817	 * But we do now try to find an empty cluster.  -Andrea
 818	 * And we let swap pages go all over an SSD partition.  Hugh
 819	 */
 820
 821	si->flags += SWP_SCANNING;
 822	/*
 823	 * Use percpu scan base for SSD to reduce lock contention on
 824	 * cluster and swap cache.  For HDD, sequential access is more
 825	 * important.
 826	 */
 827	if (si->flags & SWP_SOLIDSTATE)
 828		scan_base = this_cpu_read(*si->cluster_next_cpu);
 829	else
 830		scan_base = si->cluster_next;
 831	offset = scan_base;
 832
 833	/* SSD algorithm */
 834	if (si->cluster_info) {
 835		if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 836			goto scan;
 837	} else if (unlikely(!si->cluster_nr--)) {
 838		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
 839			si->cluster_nr = SWAPFILE_CLUSTER - 1;
 840			goto checks;
 841		}
 842
 843		spin_unlock(&si->lock);
 844
 845		/*
 846		 * If seek is expensive, start searching for new cluster from
 847		 * start of partition, to minimize the span of allocated swap.
 848		 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
 849		 * case, just handled by scan_swap_map_try_ssd_cluster() above.
 850		 */
 851		scan_base = offset = si->lowest_bit;
 852		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
 853
 854		/* Locate the first empty (unaligned) cluster */
 855		for (; last_in_cluster <= si->highest_bit; offset++) {
 856			if (si->swap_map[offset])
 857				last_in_cluster = offset + SWAPFILE_CLUSTER;
 858			else if (offset == last_in_cluster) {
 859				spin_lock(&si->lock);
 860				offset -= SWAPFILE_CLUSTER - 1;
 861				si->cluster_next = offset;
 862				si->cluster_nr = SWAPFILE_CLUSTER - 1;
 863				goto checks;
 864			}
 865			if (unlikely(--latency_ration < 0)) {
 866				cond_resched();
 867				latency_ration = LATENCY_LIMIT;
 868			}
 869		}
 870
 871		offset = scan_base;
 872		spin_lock(&si->lock);
 873		si->cluster_nr = SWAPFILE_CLUSTER - 1;
 874	}
 875
 876checks:
 877	if (si->cluster_info) {
 878		while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
 879		/* take a break if we already got some slots */
 880			if (n_ret)
 881				goto done;
 882			if (!scan_swap_map_try_ssd_cluster(si, &offset,
 883							&scan_base))
 884				goto scan;
 885		}
 886	}
 887	if (!(si->flags & SWP_WRITEOK))
 888		goto no_page;
 889	if (!si->highest_bit)
 890		goto no_page;
 891	if (offset > si->highest_bit)
 892		scan_base = offset = si->lowest_bit;
 893
 894	ci = lock_cluster(si, offset);
 895	/* reuse swap entry of cache-only swap if not busy. */
 896	if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
 897		int swap_was_freed;
 898		unlock_cluster(ci);
 899		spin_unlock(&si->lock);
 900		swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
 901		spin_lock(&si->lock);
 902		/* entry was freed successfully, try to use this again */
 903		if (swap_was_freed)
 904			goto checks;
 905		goto scan; /* check next one */
 906	}
 907
 908	if (si->swap_map[offset]) {
 909		unlock_cluster(ci);
 910		if (!n_ret)
 911			goto scan;
 912		else
 913			goto done;
 914	}
 915	WRITE_ONCE(si->swap_map[offset], usage);
 916	inc_cluster_info_page(si, si->cluster_info, offset);
 917	unlock_cluster(ci);
 918
 919	swap_range_alloc(si, offset, 1);
 920	slots[n_ret++] = swp_entry(si->type, offset);
 921
 922	/* got enough slots or reach max slots? */
 923	if ((n_ret == nr) || (offset >= si->highest_bit))
 924		goto done;
 925
 926	/* search for next available slot */
 927
 928	/* time to take a break? */
 929	if (unlikely(--latency_ration < 0)) {
 930		if (n_ret)
 931			goto done;
 932		spin_unlock(&si->lock);
 933		cond_resched();
 934		spin_lock(&si->lock);
 935		latency_ration = LATENCY_LIMIT;
 936	}
 937
 938	/* try to get more slots in cluster */
 939	if (si->cluster_info) {
 940		if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
 941			goto checks;
 942	} else if (si->cluster_nr && !si->swap_map[++offset]) {
 943		/* non-ssd case, still more slots in cluster? */
 944		--si->cluster_nr;
 945		goto checks;
 946	}
 947
 948	/*
 949	 * Even if there's no free clusters available (fragmented),
 950	 * try to scan a little more quickly with lock held unless we
 951	 * have scanned too many slots already.
 952	 */
 953	if (!scanned_many) {
 954		unsigned long scan_limit;
 955
 956		if (offset < scan_base)
 957			scan_limit = scan_base;
 958		else
 959			scan_limit = si->highest_bit;
 960		for (; offset <= scan_limit && --latency_ration > 0;
 961		     offset++) {
 962			if (!si->swap_map[offset])
 963				goto checks;
 964		}
 965	}
 966
 967done:
 968	set_cluster_next(si, offset + 1);
 969	si->flags -= SWP_SCANNING;
 970	return n_ret;
 971
 972scan:
 973	spin_unlock(&si->lock);
 974	while (++offset <= READ_ONCE(si->highest_bit)) {
 
 
 
 
 
 
 
 
 
 975		if (unlikely(--latency_ration < 0)) {
 976			cond_resched();
 977			latency_ration = LATENCY_LIMIT;
 978			scanned_many = true;
 979		}
 980		if (swap_offset_available_and_locked(si, offset))
 981			goto checks;
 982	}
 983	offset = si->lowest_bit;
 984	while (offset < scan_base) {
 
 
 
 
 
 
 
 
 
 985		if (unlikely(--latency_ration < 0)) {
 986			cond_resched();
 987			latency_ration = LATENCY_LIMIT;
 988			scanned_many = true;
 989		}
 990		if (swap_offset_available_and_locked(si, offset))
 991			goto checks;
 992		offset++;
 993	}
 994	spin_lock(&si->lock);
 995
 996no_page:
 997	si->flags -= SWP_SCANNING;
 998	return n_ret;
 999}
1000
1001static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
1002{
1003	unsigned long idx;
1004	struct swap_cluster_info *ci;
1005	unsigned long offset;
1006
1007	/*
1008	 * Should not even be attempting cluster allocations when huge
1009	 * page swap is disabled.  Warn and fail the allocation.
1010	 */
1011	if (!IS_ENABLED(CONFIG_THP_SWAP)) {
1012		VM_WARN_ON_ONCE(1);
1013		return 0;
1014	}
1015
1016	if (cluster_list_empty(&si->free_clusters))
1017		return 0;
1018
1019	idx = cluster_list_first(&si->free_clusters);
1020	offset = idx * SWAPFILE_CLUSTER;
1021	ci = lock_cluster(si, offset);
1022	alloc_cluster(si, idx);
1023	cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
1024
1025	memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
1026	unlock_cluster(ci);
1027	swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
1028	*slot = swp_entry(si->type, offset);
1029
1030	return 1;
1031}
1032
1033static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
1034{
1035	unsigned long offset = idx * SWAPFILE_CLUSTER;
1036	struct swap_cluster_info *ci;
1037
1038	ci = lock_cluster(si, offset);
1039	memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
1040	cluster_set_count_flag(ci, 0, 0);
1041	free_cluster(si, idx);
1042	unlock_cluster(ci);
1043	swap_range_free(si, offset, SWAPFILE_CLUSTER);
1044}
1045
1046int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
1047{
1048	unsigned long size = swap_entry_size(entry_size);
1049	struct swap_info_struct *si, *next;
1050	long avail_pgs;
1051	int n_ret = 0;
1052	int node;
1053
1054	/* Only single cluster request supported */
1055	WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
1056
1057	spin_lock(&swap_avail_lock);
1058
1059	avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1060	if (avail_pgs <= 0) {
1061		spin_unlock(&swap_avail_lock);
1062		goto noswap;
1063	}
1064
1065	n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
1066
1067	atomic_long_sub(n_goal * size, &nr_swap_pages);
1068
1069start_over:
1070	node = numa_node_id();
1071	plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1072		/* requeue si to after same-priority siblings */
1073		plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1074		spin_unlock(&swap_avail_lock);
1075		spin_lock(&si->lock);
1076		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1077			spin_lock(&swap_avail_lock);
1078			if (plist_node_empty(&si->avail_lists[node])) {
1079				spin_unlock(&si->lock);
1080				goto nextsi;
1081			}
1082			WARN(!si->highest_bit,
1083			     "swap_info %d in list but !highest_bit\n",
1084			     si->type);
1085			WARN(!(si->flags & SWP_WRITEOK),
1086			     "swap_info %d in list but !SWP_WRITEOK\n",
1087			     si->type);
1088			__del_from_avail_list(si);
1089			spin_unlock(&si->lock);
1090			goto nextsi;
1091		}
1092		if (size == SWAPFILE_CLUSTER) {
1093			if (si->flags & SWP_BLKDEV)
1094				n_ret = swap_alloc_cluster(si, swp_entries);
1095		} else
1096			n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1097						    n_goal, swp_entries);
1098		spin_unlock(&si->lock);
1099		if (n_ret || size == SWAPFILE_CLUSTER)
1100			goto check_out;
1101		pr_debug("scan_swap_map of si %d failed to find offset\n",
1102			si->type);
1103		cond_resched();
1104
1105		spin_lock(&swap_avail_lock);
1106nextsi:
1107		/*
1108		 * if we got here, it's likely that si was almost full before,
1109		 * and since scan_swap_map_slots() can drop the si->lock,
1110		 * multiple callers probably all tried to get a page from the
1111		 * same si and it filled up before we could get one; or, the si
1112		 * filled up between us dropping swap_avail_lock and taking
1113		 * si->lock. Since we dropped the swap_avail_lock, the
1114		 * swap_avail_head list may have been modified; so if next is
1115		 * still in the swap_avail_head list then try it, otherwise
1116		 * start over if we have not gotten any slots.
1117		 */
1118		if (plist_node_empty(&next->avail_lists[node]))
1119			goto start_over;
1120	}
1121
1122	spin_unlock(&swap_avail_lock);
1123
1124check_out:
1125	if (n_ret < n_goal)
1126		atomic_long_add((long)(n_goal - n_ret) * size,
1127				&nr_swap_pages);
1128noswap:
1129	return n_ret;
1130}
1131
1132static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1133{
1134	struct swap_info_struct *p;
1135	unsigned long offset;
1136
1137	if (!entry.val)
1138		goto out;
1139	p = swp_swap_info(entry);
1140	if (!p)
1141		goto bad_nofile;
1142	if (data_race(!(p->flags & SWP_USED)))
1143		goto bad_device;
1144	offset = swp_offset(entry);
1145	if (offset >= p->max)
1146		goto bad_offset;
1147	if (data_race(!p->swap_map[swp_offset(entry)]))
1148		goto bad_free;
1149	return p;
1150
1151bad_free:
1152	pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1153	goto out;
1154bad_offset:
1155	pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1156	goto out;
1157bad_device:
1158	pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1159	goto out;
1160bad_nofile:
1161	pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1162out:
1163	return NULL;
1164}
1165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1166static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1167					struct swap_info_struct *q)
1168{
1169	struct swap_info_struct *p;
1170
1171	p = _swap_info_get(entry);
1172
1173	if (p != q) {
1174		if (q != NULL)
1175			spin_unlock(&q->lock);
1176		if (p != NULL)
1177			spin_lock(&p->lock);
1178	}
1179	return p;
1180}
1181
1182static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
1183					      unsigned long offset,
1184					      unsigned char usage)
1185{
1186	unsigned char count;
1187	unsigned char has_cache;
1188
1189	count = p->swap_map[offset];
1190
1191	has_cache = count & SWAP_HAS_CACHE;
1192	count &= ~SWAP_HAS_CACHE;
1193
1194	if (usage == SWAP_HAS_CACHE) {
1195		VM_BUG_ON(!has_cache);
1196		has_cache = 0;
1197	} else if (count == SWAP_MAP_SHMEM) {
1198		/*
1199		 * Or we could insist on shmem.c using a special
1200		 * swap_shmem_free() and free_shmem_swap_and_cache()...
1201		 */
1202		count = 0;
1203	} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1204		if (count == COUNT_CONTINUED) {
1205			if (swap_count_continued(p, offset, count))
1206				count = SWAP_MAP_MAX | COUNT_CONTINUED;
1207			else
1208				count = SWAP_MAP_MAX;
1209		} else
1210			count--;
1211	}
1212
1213	usage = count | has_cache;
1214	if (usage)
1215		WRITE_ONCE(p->swap_map[offset], usage);
1216	else
1217		WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE);
1218
1219	return usage;
1220}
1221
1222/*
1223 * Check whether swap entry is valid in the swap device.  If so,
1224 * return pointer to swap_info_struct, and keep the swap entry valid
1225 * via preventing the swap device from being swapoff, until
1226 * put_swap_device() is called.  Otherwise return NULL.
1227 *
1228 * Notice that swapoff or swapoff+swapon can still happen before the
1229 * percpu_ref_tryget_live() in get_swap_device() or after the
1230 * percpu_ref_put() in put_swap_device() if there isn't any other way
1231 * to prevent swapoff, such as page lock, page table lock, etc.  The
1232 * caller must be prepared for that.  For example, the following
1233 * situation is possible.
1234 *
1235 *   CPU1				CPU2
1236 *   do_swap_page()
1237 *     ...				swapoff+swapon
1238 *     __read_swap_cache_async()
1239 *       swapcache_prepare()
1240 *         __swap_duplicate()
1241 *           // check swap_map
1242 *     // verify PTE not changed
1243 *
1244 * In __swap_duplicate(), the swap_map need to be checked before
1245 * changing partly because the specified swap entry may be for another
1246 * swap device which has been swapoff.  And in do_swap_page(), after
1247 * the page is read from the swap device, the PTE is verified not
1248 * changed with the page table locked to check whether the swap device
1249 * has been swapoff or swapoff+swapon.
1250 */
1251struct swap_info_struct *get_swap_device(swp_entry_t entry)
1252{
1253	struct swap_info_struct *si;
1254	unsigned long offset;
1255
1256	if (!entry.val)
1257		goto out;
1258	si = swp_swap_info(entry);
1259	if (!si)
1260		goto bad_nofile;
1261	if (!percpu_ref_tryget_live(&si->users))
1262		goto out;
1263	/*
1264	 * Guarantee the si->users are checked before accessing other
1265	 * fields of swap_info_struct.
1266	 *
1267	 * Paired with the spin_unlock() after setup_swap_info() in
1268	 * enable_swap_info().
1269	 */
1270	smp_rmb();
1271	offset = swp_offset(entry);
1272	if (offset >= si->max)
1273		goto put_out;
1274
1275	return si;
1276bad_nofile:
1277	pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1278out:
1279	return NULL;
1280put_out:
1281	pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1282	percpu_ref_put(&si->users);
1283	return NULL;
1284}
1285
1286static unsigned char __swap_entry_free(struct swap_info_struct *p,
1287				       swp_entry_t entry)
1288{
1289	struct swap_cluster_info *ci;
1290	unsigned long offset = swp_offset(entry);
1291	unsigned char usage;
1292
1293	ci = lock_cluster_or_swap_info(p, offset);
1294	usage = __swap_entry_free_locked(p, offset, 1);
1295	unlock_cluster_or_swap_info(p, ci);
1296	if (!usage)
1297		free_swap_slot(entry);
1298
1299	return usage;
1300}
1301
1302static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1303{
1304	struct swap_cluster_info *ci;
1305	unsigned long offset = swp_offset(entry);
1306	unsigned char count;
1307
1308	ci = lock_cluster(p, offset);
1309	count = p->swap_map[offset];
1310	VM_BUG_ON(count != SWAP_HAS_CACHE);
1311	p->swap_map[offset] = 0;
1312	dec_cluster_info_page(p, p->cluster_info, offset);
1313	unlock_cluster(ci);
1314
1315	mem_cgroup_uncharge_swap(entry, 1);
1316	swap_range_free(p, offset, 1);
1317}
1318
1319/*
1320 * Caller has made sure that the swap device corresponding to entry
1321 * is still around or has not been recycled.
1322 */
1323void swap_free(swp_entry_t entry)
1324{
1325	struct swap_info_struct *p;
1326
1327	p = _swap_info_get(entry);
1328	if (p)
1329		__swap_entry_free(p, entry);
1330}
1331
1332/*
1333 * Called after dropping swapcache to decrease refcnt to swap entries.
1334 */
1335void put_swap_folio(struct folio *folio, swp_entry_t entry)
1336{
1337	unsigned long offset = swp_offset(entry);
1338	unsigned long idx = offset / SWAPFILE_CLUSTER;
1339	struct swap_cluster_info *ci;
1340	struct swap_info_struct *si;
1341	unsigned char *map;
1342	unsigned int i, free_entries = 0;
1343	unsigned char val;
1344	int size = swap_entry_size(folio_nr_pages(folio));
1345
1346	si = _swap_info_get(entry);
1347	if (!si)
1348		return;
1349
1350	ci = lock_cluster_or_swap_info(si, offset);
1351	if (size == SWAPFILE_CLUSTER) {
1352		VM_BUG_ON(!cluster_is_huge(ci));
1353		map = si->swap_map + offset;
1354		for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1355			val = map[i];
1356			VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1357			if (val == SWAP_HAS_CACHE)
1358				free_entries++;
1359		}
1360		cluster_clear_huge(ci);
1361		if (free_entries == SWAPFILE_CLUSTER) {
1362			unlock_cluster_or_swap_info(si, ci);
1363			spin_lock(&si->lock);
1364			mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1365			swap_free_cluster(si, idx);
1366			spin_unlock(&si->lock);
1367			return;
1368		}
1369	}
1370	for (i = 0; i < size; i++, entry.val++) {
1371		if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1372			unlock_cluster_or_swap_info(si, ci);
1373			free_swap_slot(entry);
1374			if (i == size - 1)
1375				return;
1376			lock_cluster_or_swap_info(si, offset);
1377		}
1378	}
1379	unlock_cluster_or_swap_info(si, ci);
1380}
1381
1382#ifdef CONFIG_THP_SWAP
1383int split_swap_cluster(swp_entry_t entry)
1384{
1385	struct swap_info_struct *si;
1386	struct swap_cluster_info *ci;
1387	unsigned long offset = swp_offset(entry);
1388
1389	si = _swap_info_get(entry);
1390	if (!si)
1391		return -EBUSY;
1392	ci = lock_cluster(si, offset);
1393	cluster_clear_huge(ci);
1394	unlock_cluster(ci);
1395	return 0;
1396}
1397#endif
1398
1399static int swp_entry_cmp(const void *ent1, const void *ent2)
1400{
1401	const swp_entry_t *e1 = ent1, *e2 = ent2;
1402
1403	return (int)swp_type(*e1) - (int)swp_type(*e2);
1404}
1405
1406void swapcache_free_entries(swp_entry_t *entries, int n)
1407{
1408	struct swap_info_struct *p, *prev;
1409	int i;
1410
1411	if (n <= 0)
1412		return;
1413
1414	prev = NULL;
1415	p = NULL;
1416
1417	/*
1418	 * Sort swap entries by swap device, so each lock is only taken once.
1419	 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1420	 * so low that it isn't necessary to optimize further.
1421	 */
1422	if (nr_swapfiles > 1)
1423		sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1424	for (i = 0; i < n; ++i) {
1425		p = swap_info_get_cont(entries[i], prev);
1426		if (p)
1427			swap_entry_free(p, entries[i]);
1428		prev = p;
1429	}
1430	if (p)
1431		spin_unlock(&p->lock);
1432}
1433
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1434int __swap_count(swp_entry_t entry)
1435{
1436	struct swap_info_struct *si;
1437	pgoff_t offset = swp_offset(entry);
1438	int count = 0;
1439
1440	si = get_swap_device(entry);
1441	if (si) {
1442		count = swap_count(si->swap_map[offset]);
1443		put_swap_device(si);
1444	}
1445	return count;
1446}
1447
1448/*
1449 * How many references to @entry are currently swapped out?
1450 * This does not give an exact answer when swap count is continued,
1451 * but does include the high COUNT_CONTINUED flag to allow for that.
1452 */
1453static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1454{
 
1455	pgoff_t offset = swp_offset(entry);
1456	struct swap_cluster_info *ci;
1457	int count;
1458
1459	ci = lock_cluster_or_swap_info(si, offset);
1460	count = swap_count(si->swap_map[offset]);
1461	unlock_cluster_or_swap_info(si, ci);
1462	return count;
1463}
1464
1465/*
1466 * How many references to @entry are currently swapped out?
1467 * This does not give an exact answer when swap count is continued,
1468 * but does include the high COUNT_CONTINUED flag to allow for that.
1469 */
1470int __swp_swapcount(swp_entry_t entry)
1471{
1472	int count = 0;
1473	struct swap_info_struct *si;
1474
1475	si = get_swap_device(entry);
1476	if (si) {
1477		count = swap_swapcount(si, entry);
1478		put_swap_device(si);
1479	}
1480	return count;
1481}
1482
1483/*
1484 * How many references to @entry are currently swapped out?
1485 * This considers COUNT_CONTINUED so it returns exact answer.
1486 */
1487int swp_swapcount(swp_entry_t entry)
1488{
1489	int count, tmp_count, n;
1490	struct swap_info_struct *p;
1491	struct swap_cluster_info *ci;
1492	struct page *page;
1493	pgoff_t offset;
1494	unsigned char *map;
1495
1496	p = _swap_info_get(entry);
1497	if (!p)
1498		return 0;
1499
1500	offset = swp_offset(entry);
1501
1502	ci = lock_cluster_or_swap_info(p, offset);
1503
1504	count = swap_count(p->swap_map[offset]);
1505	if (!(count & COUNT_CONTINUED))
1506		goto out;
1507
1508	count &= ~COUNT_CONTINUED;
1509	n = SWAP_MAP_MAX + 1;
1510
1511	page = vmalloc_to_page(p->swap_map + offset);
1512	offset &= ~PAGE_MASK;
1513	VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1514
1515	do {
1516		page = list_next_entry(page, lru);
1517		map = kmap_atomic(page);
1518		tmp_count = map[offset];
1519		kunmap_atomic(map);
1520
1521		count += (tmp_count & ~COUNT_CONTINUED) * n;
1522		n *= (SWAP_CONT_MAX + 1);
1523	} while (tmp_count & COUNT_CONTINUED);
1524out:
1525	unlock_cluster_or_swap_info(p, ci);
1526	return count;
1527}
1528
1529static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1530					 swp_entry_t entry)
1531{
1532	struct swap_cluster_info *ci;
1533	unsigned char *map = si->swap_map;
1534	unsigned long roffset = swp_offset(entry);
1535	unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1536	int i;
1537	bool ret = false;
1538
1539	ci = lock_cluster_or_swap_info(si, offset);
1540	if (!ci || !cluster_is_huge(ci)) {
1541		if (swap_count(map[roffset]))
1542			ret = true;
1543		goto unlock_out;
1544	}
1545	for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1546		if (swap_count(map[offset + i])) {
1547			ret = true;
1548			break;
1549		}
1550	}
1551unlock_out:
1552	unlock_cluster_or_swap_info(si, ci);
1553	return ret;
1554}
1555
1556static bool folio_swapped(struct folio *folio)
1557{
1558	swp_entry_t entry = folio_swap_entry(folio);
1559	struct swap_info_struct *si = _swap_info_get(entry);
1560
1561	if (!si)
1562		return false;
1563
1564	if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
1565		return swap_swapcount(si, entry) != 0;
 
 
 
 
 
1566
1567	return swap_page_trans_huge_swapped(si, entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1568}
1569
1570/**
1571 * folio_free_swap() - Free the swap space used for this folio.
1572 * @folio: The folio to remove.
 
 
1573 *
1574 * If swap is getting full, or if there are no more mappings of this folio,
1575 * then call folio_free_swap to free its swap space.
1576 *
1577 * Return: true if we were able to release the swap space.
1578 */
1579bool folio_free_swap(struct folio *folio)
1580{
1581	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1582
1583	if (!folio_test_swapcache(folio))
1584		return false;
1585	if (folio_test_writeback(folio))
1586		return false;
1587	if (folio_swapped(folio))
1588		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1589
1590	/*
1591	 * Once hibernation has begun to create its image of memory,
1592	 * there's a danger that one of the calls to folio_free_swap()
1593	 * - most probably a call from __try_to_reclaim_swap() while
1594	 * hibernation is allocating its own swap pages for the image,
1595	 * but conceivably even a call from memory reclaim - will free
1596	 * the swap from a folio which has already been recorded in the
1597	 * image as a clean swapcache folio, and then reuse its swap for
1598	 * another page of the image.  On waking from hibernation, the
1599	 * original folio might be freed under memory pressure, then
1600	 * later read back in from swap, now with the wrong data.
1601	 *
1602	 * Hibernation suspends storage while it is writing the image
1603	 * to disk so check that here.
1604	 */
1605	if (pm_suspended_storage())
1606		return false;
1607
1608	delete_from_swap_cache(folio);
1609	folio_set_dirty(folio);
1610	return true;
 
1611}
1612
1613/*
1614 * Free the swap entry like above, but also try to
1615 * free the page cache entry if it is the last user.
1616 */
1617int free_swap_and_cache(swp_entry_t entry)
1618{
1619	struct swap_info_struct *p;
1620	unsigned char count;
1621
1622	if (non_swap_entry(entry))
1623		return 1;
1624
1625	p = _swap_info_get(entry);
1626	if (p) {
1627		count = __swap_entry_free(p, entry);
1628		if (count == SWAP_HAS_CACHE &&
1629		    !swap_page_trans_huge_swapped(p, entry))
1630			__try_to_reclaim_swap(p, swp_offset(entry),
1631					      TTRS_UNMAPPED | TTRS_FULL);
1632	}
1633	return p != NULL;
1634}
1635
1636#ifdef CONFIG_HIBERNATION
1637
1638swp_entry_t get_swap_page_of_type(int type)
1639{
1640	struct swap_info_struct *si = swap_type_to_swap_info(type);
1641	swp_entry_t entry = {0};
1642
1643	if (!si)
1644		goto fail;
1645
1646	/* This is called for allocating swap entry, not cache */
1647	spin_lock(&si->lock);
1648	if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry))
1649		atomic_long_dec(&nr_swap_pages);
1650	spin_unlock(&si->lock);
1651fail:
1652	return entry;
1653}
1654
1655/*
1656 * Find the swap type that corresponds to given device (if any).
1657 *
1658 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1659 * from 0, in which the swap header is expected to be located.
1660 *
1661 * This is needed for the suspend to disk (aka swsusp).
1662 */
1663int swap_type_of(dev_t device, sector_t offset)
1664{
1665	int type;
1666
1667	if (!device)
1668		return -1;
1669
1670	spin_lock(&swap_lock);
1671	for (type = 0; type < nr_swapfiles; type++) {
1672		struct swap_info_struct *sis = swap_info[type];
1673
1674		if (!(sis->flags & SWP_WRITEOK))
1675			continue;
1676
1677		if (device == sis->bdev->bd_dev) {
1678			struct swap_extent *se = first_se(sis);
1679
1680			if (se->start_block == offset) {
1681				spin_unlock(&swap_lock);
1682				return type;
1683			}
1684		}
1685	}
1686	spin_unlock(&swap_lock);
1687	return -ENODEV;
1688}
1689
1690int find_first_swap(dev_t *device)
1691{
1692	int type;
1693
1694	spin_lock(&swap_lock);
1695	for (type = 0; type < nr_swapfiles; type++) {
1696		struct swap_info_struct *sis = swap_info[type];
1697
1698		if (!(sis->flags & SWP_WRITEOK))
1699			continue;
1700		*device = sis->bdev->bd_dev;
1701		spin_unlock(&swap_lock);
1702		return type;
1703	}
1704	spin_unlock(&swap_lock);
1705	return -ENODEV;
1706}
1707
1708/*
1709 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1710 * corresponding to given index in swap_info (swap type).
1711 */
1712sector_t swapdev_block(int type, pgoff_t offset)
1713{
1714	struct swap_info_struct *si = swap_type_to_swap_info(type);
1715	struct swap_extent *se;
1716
1717	if (!si || !(si->flags & SWP_WRITEOK))
1718		return 0;
1719	se = offset_to_swap_extent(si, offset);
1720	return se->start_block + (offset - se->start_page);
1721}
1722
1723/*
1724 * Return either the total number of swap pages of given type, or the number
1725 * of free pages of that type (depending on @free)
1726 *
1727 * This is needed for software suspend
1728 */
1729unsigned int count_swap_pages(int type, int free)
1730{
1731	unsigned int n = 0;
1732
1733	spin_lock(&swap_lock);
1734	if ((unsigned int)type < nr_swapfiles) {
1735		struct swap_info_struct *sis = swap_info[type];
1736
1737		spin_lock(&sis->lock);
1738		if (sis->flags & SWP_WRITEOK) {
1739			n = sis->pages;
1740			if (free)
1741				n -= sis->inuse_pages;
1742		}
1743		spin_unlock(&sis->lock);
1744	}
1745	spin_unlock(&swap_lock);
1746	return n;
1747}
1748#endif /* CONFIG_HIBERNATION */
1749
1750static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1751{
1752	return pte_same(pte_swp_clear_flags(pte), swp_pte);
1753}
1754
1755/*
1756 * No need to decide whether this PTE shares the swap entry with others,
1757 * just let do_wp_page work it out if a write is requested later - to
1758 * force COW, vm_page_prot omits write permission from any private vma.
1759 */
1760static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1761		unsigned long addr, swp_entry_t entry, struct folio *folio)
1762{
1763	struct page *page = folio_file_page(folio, swp_offset(entry));
1764	struct page *swapcache;
1765	spinlock_t *ptl;
1766	pte_t *pte, new_pte;
1767	bool hwposioned = false;
1768	int ret = 1;
1769
1770	swapcache = page;
1771	page = ksm_might_need_to_copy(page, vma, addr);
1772	if (unlikely(!page))
1773		return -ENOMEM;
1774	else if (unlikely(PTR_ERR(page) == -EHWPOISON))
1775		hwposioned = true;
1776
1777	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1778	if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1779		ret = 0;
1780		goto out;
1781	}
1782
1783	if (unlikely(hwposioned || !PageUptodate(page))) {
1784		swp_entry_t swp_entry;
1785
1786		dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1787		if (hwposioned) {
1788			swp_entry = make_hwpoison_entry(swapcache);
1789			page = swapcache;
1790		} else {
1791			swp_entry = make_swapin_error_entry();
1792		}
1793		new_pte = swp_entry_to_pte(swp_entry);
1794		ret = 0;
1795		goto setpte;
1796	}
1797
1798	/* See do_swap_page() */
1799	BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
1800	BUG_ON(PageAnon(page) && PageAnonExclusive(page));
1801
1802	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1803	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1804	get_page(page);
 
 
1805	if (page == swapcache) {
1806		rmap_t rmap_flags = RMAP_NONE;
1807
1808		/*
1809		 * See do_swap_page(): PageWriteback() would be problematic.
1810		 * However, we do a wait_on_page_writeback() just before this
1811		 * call and have the page locked.
1812		 */
1813		VM_BUG_ON_PAGE(PageWriteback(page), page);
1814		if (pte_swp_exclusive(*pte))
1815			rmap_flags |= RMAP_EXCLUSIVE;
1816
1817		page_add_anon_rmap(page, vma, addr, rmap_flags);
1818	} else { /* ksm created a completely new copy */
1819		page_add_new_anon_rmap(page, vma, addr);
1820		lru_cache_add_inactive_or_unevictable(page, vma);
1821	}
1822	new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
1823	if (pte_swp_soft_dirty(*pte))
1824		new_pte = pte_mksoft_dirty(new_pte);
1825	if (pte_swp_uffd_wp(*pte))
1826		new_pte = pte_mkuffd_wp(new_pte);
1827setpte:
1828	set_pte_at(vma->vm_mm, addr, pte, new_pte);
1829	swap_free(entry);
1830out:
1831	pte_unmap_unlock(pte, ptl);
1832	if (page != swapcache) {
1833		unlock_page(page);
1834		put_page(page);
1835	}
1836	return ret;
1837}
1838
1839static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1840			unsigned long addr, unsigned long end,
1841			unsigned int type)
 
1842{
 
1843	swp_entry_t entry;
1844	pte_t *pte;
1845	struct swap_info_struct *si;
 
1846	int ret = 0;
1847	volatile unsigned char *swap_map;
1848
1849	si = swap_info[type];
1850	pte = pte_offset_map(pmd, addr);
1851	do {
1852		struct folio *folio;
1853		unsigned long offset;
1854
1855		if (!is_swap_pte(*pte))
1856			continue;
1857
1858		entry = pte_to_swp_entry(*pte);
1859		if (swp_type(entry) != type)
1860			continue;
1861
1862		offset = swp_offset(entry);
 
 
 
1863		pte_unmap(pte);
1864		swap_map = &si->swap_map[offset];
1865		folio = swap_cache_get_folio(entry, vma, addr);
1866		if (!folio) {
1867			struct page *page;
1868			struct vm_fault vmf = {
1869				.vma = vma,
1870				.address = addr,
1871				.real_address = addr,
1872				.pmd = pmd,
1873			};
1874
1875			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
1876						&vmf);
1877			if (page)
1878				folio = page_folio(page);
1879		}
1880		if (!folio) {
1881			if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
1882				goto try_next;
1883			return -ENOMEM;
1884		}
1885
1886		folio_lock(folio);
1887		folio_wait_writeback(folio);
1888		ret = unuse_pte(vma, pmd, addr, entry, folio);
1889		if (ret < 0) {
1890			folio_unlock(folio);
1891			folio_put(folio);
1892			goto out;
1893		}
1894
1895		folio_free_swap(folio);
1896		folio_unlock(folio);
1897		folio_put(folio);
 
 
 
 
 
1898try_next:
1899		pte = pte_offset_map(pmd, addr);
1900	} while (pte++, addr += PAGE_SIZE, addr != end);
1901	pte_unmap(pte - 1);
1902
1903	ret = 0;
1904out:
1905	return ret;
1906}
1907
1908static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1909				unsigned long addr, unsigned long end,
1910				unsigned int type)
 
1911{
1912	pmd_t *pmd;
1913	unsigned long next;
1914	int ret;
1915
1916	pmd = pmd_offset(pud, addr);
1917	do {
1918		cond_resched();
1919		next = pmd_addr_end(addr, end);
1920		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1921			continue;
1922		ret = unuse_pte_range(vma, pmd, addr, next, type);
 
1923		if (ret)
1924			return ret;
1925	} while (pmd++, addr = next, addr != end);
1926	return 0;
1927}
1928
1929static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
1930				unsigned long addr, unsigned long end,
1931				unsigned int type)
 
1932{
1933	pud_t *pud;
1934	unsigned long next;
1935	int ret;
1936
1937	pud = pud_offset(p4d, addr);
1938	do {
1939		next = pud_addr_end(addr, end);
1940		if (pud_none_or_clear_bad(pud))
1941			continue;
1942		ret = unuse_pmd_range(vma, pud, addr, next, type);
 
1943		if (ret)
1944			return ret;
1945	} while (pud++, addr = next, addr != end);
1946	return 0;
1947}
1948
1949static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
1950				unsigned long addr, unsigned long end,
1951				unsigned int type)
 
1952{
1953	p4d_t *p4d;
1954	unsigned long next;
1955	int ret;
1956
1957	p4d = p4d_offset(pgd, addr);
1958	do {
1959		next = p4d_addr_end(addr, end);
1960		if (p4d_none_or_clear_bad(p4d))
1961			continue;
1962		ret = unuse_pud_range(vma, p4d, addr, next, type);
 
1963		if (ret)
1964			return ret;
1965	} while (p4d++, addr = next, addr != end);
1966	return 0;
1967}
1968
1969static int unuse_vma(struct vm_area_struct *vma, unsigned int type)
 
1970{
1971	pgd_t *pgd;
1972	unsigned long addr, end, next;
1973	int ret;
1974
1975	addr = vma->vm_start;
1976	end = vma->vm_end;
1977
1978	pgd = pgd_offset(vma->vm_mm, addr);
1979	do {
1980		next = pgd_addr_end(addr, end);
1981		if (pgd_none_or_clear_bad(pgd))
1982			continue;
1983		ret = unuse_p4d_range(vma, pgd, addr, next, type);
 
1984		if (ret)
1985			return ret;
1986	} while (pgd++, addr = next, addr != end);
1987	return 0;
1988}
1989
1990static int unuse_mm(struct mm_struct *mm, unsigned int type)
 
1991{
1992	struct vm_area_struct *vma;
1993	int ret = 0;
1994	VMA_ITERATOR(vmi, mm, 0);
1995
1996	mmap_read_lock(mm);
1997	for_each_vma(vmi, vma) {
1998		if (vma->anon_vma) {
1999			ret = unuse_vma(vma, type);
 
2000			if (ret)
2001				break;
2002		}
2003
2004		cond_resched();
2005	}
2006	mmap_read_unlock(mm);
2007	return ret;
2008}
2009
2010/*
2011 * Scan swap_map from current position to next entry still in use.
2012 * Return 0 if there are no inuse entries after prev till end of
2013 * the map.
2014 */
2015static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2016					unsigned int prev)
2017{
2018	unsigned int i;
2019	unsigned char count;
2020
2021	/*
2022	 * No need for swap_lock here: we're just looking
2023	 * for whether an entry is in use, not modifying it; false
2024	 * hits are okay, and sys_swapoff() has already prevented new
2025	 * allocations from this area (while holding swap_lock).
2026	 */
2027	for (i = prev + 1; i < si->max; i++) {
2028		count = READ_ONCE(si->swap_map[i]);
2029		if (count && swap_count(count) != SWAP_MAP_BAD)
2030			break;
 
2031		if ((i % LATENCY_LIMIT) == 0)
2032			cond_resched();
2033	}
2034
2035	if (i == si->max)
2036		i = 0;
2037
2038	return i;
2039}
2040
2041static int try_to_unuse(unsigned int type)
 
 
 
 
 
2042{
2043	struct mm_struct *prev_mm;
2044	struct mm_struct *mm;
2045	struct list_head *p;
2046	int retval = 0;
2047	struct swap_info_struct *si = swap_info[type];
2048	struct folio *folio;
2049	swp_entry_t entry;
2050	unsigned int i;
2051
2052	if (!READ_ONCE(si->inuse_pages))
2053		return 0;
2054
 
 
 
2055retry:
2056	retval = shmem_unuse(type);
2057	if (retval)
2058		return retval;
2059
2060	prev_mm = &init_mm;
2061	mmget(prev_mm);
2062
2063	spin_lock(&mmlist_lock);
2064	p = &init_mm.mmlist;
2065	while (READ_ONCE(si->inuse_pages) &&
2066	       !signal_pending(current) &&
2067	       (p = p->next) != &init_mm.mmlist) {
2068
2069		mm = list_entry(p, struct mm_struct, mmlist);
2070		if (!mmget_not_zero(mm))
2071			continue;
2072		spin_unlock(&mmlist_lock);
2073		mmput(prev_mm);
2074		prev_mm = mm;
2075		retval = unuse_mm(mm, type);
 
2076		if (retval) {
2077			mmput(prev_mm);
2078			return retval;
2079		}
2080
2081		/*
2082		 * Make sure that we aren't completely killing
2083		 * interactive performance.
2084		 */
2085		cond_resched();
2086		spin_lock(&mmlist_lock);
2087	}
2088	spin_unlock(&mmlist_lock);
2089
2090	mmput(prev_mm);
2091
2092	i = 0;
2093	while (READ_ONCE(si->inuse_pages) &&
2094	       !signal_pending(current) &&
2095	       (i = find_next_to_unuse(si, i)) != 0) {
2096
2097		entry = swp_entry(type, i);
2098		folio = filemap_get_folio(swap_address_space(entry), i);
2099		if (!folio)
2100			continue;
2101
2102		/*
2103		 * It is conceivable that a racing task removed this folio from
2104		 * swap cache just before we acquired the page lock. The folio
2105		 * might even be back in swap cache on another swap area. But
2106		 * that is okay, folio_free_swap() only removes stale folios.
 
 
 
 
 
 
 
 
 
 
 
2107		 */
2108		folio_lock(folio);
2109		folio_wait_writeback(folio);
2110		folio_free_swap(folio);
2111		folio_unlock(folio);
2112		folio_put(folio);
2113	}
2114
2115	/*
2116	 * Lets check again to see if there are still swap entries in the map.
2117	 * If yes, we would need to do retry the unuse logic again.
2118	 * Under global memory pressure, swap entries can be reinserted back
2119	 * into process space after the mmlist loop above passes over them.
2120	 *
2121	 * Limit the number of retries? No: when mmget_not_zero()
2122	 * above fails, that mm is likely to be freeing swap from
2123	 * exit_mmap(), which proceeds at its own independent pace;
2124	 * and even shmem_writepage() could have been preempted after
2125	 * folio_alloc_swap(), temporarily hiding that swap.  It's easy
2126	 * and robust (though cpu-intensive) just to keep retrying.
2127	 */
2128	if (READ_ONCE(si->inuse_pages)) {
2129		if (!signal_pending(current))
2130			goto retry;
2131		return -EINTR;
2132	}
2133
2134	return 0;
2135}
2136
2137/*
2138 * After a successful try_to_unuse, if no swap is now in use, we know
2139 * we can empty the mmlist.  swap_lock must be held on entry and exit.
2140 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2141 * added to the mmlist just after page_duplicate - before would be racy.
2142 */
2143static void drain_mmlist(void)
2144{
2145	struct list_head *p, *next;
2146	unsigned int type;
2147
2148	for (type = 0; type < nr_swapfiles; type++)
2149		if (swap_info[type]->inuse_pages)
2150			return;
2151	spin_lock(&mmlist_lock);
2152	list_for_each_safe(p, next, &init_mm.mmlist)
2153		list_del_init(p);
2154	spin_unlock(&mmlist_lock);
2155}
2156
2157/*
2158 * Free all of a swapdev's extent information
2159 */
2160static void destroy_swap_extents(struct swap_info_struct *sis)
2161{
2162	while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2163		struct rb_node *rb = sis->swap_extent_root.rb_node;
2164		struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2165
2166		rb_erase(rb, &sis->swap_extent_root);
2167		kfree(se);
2168	}
2169
2170	if (sis->flags & SWP_ACTIVATED) {
2171		struct file *swap_file = sis->swap_file;
2172		struct address_space *mapping = swap_file->f_mapping;
2173
2174		sis->flags &= ~SWP_ACTIVATED;
2175		if (mapping->a_ops->swap_deactivate)
2176			mapping->a_ops->swap_deactivate(swap_file);
2177	}
2178}
2179
2180/*
2181 * Add a block range (and the corresponding page range) into this swapdev's
2182 * extent tree.
2183 *
2184 * This function rather assumes that it is called in ascending page order.
2185 */
2186int
2187add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2188		unsigned long nr_pages, sector_t start_block)
2189{
2190	struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2191	struct swap_extent *se;
2192	struct swap_extent *new_se;
2193
2194	/*
2195	 * place the new node at the right most since the
2196	 * function is called in ascending page order.
2197	 */
2198	while (*link) {
2199		parent = *link;
2200		link = &parent->rb_right;
2201	}
2202
2203	if (parent) {
2204		se = rb_entry(parent, struct swap_extent, rb_node);
2205		BUG_ON(se->start_page + se->nr_pages != start_page);
2206		if (se->start_block + se->nr_pages == start_block) {
2207			/* Merge it */
2208			se->nr_pages += nr_pages;
2209			return 0;
2210		}
2211	}
2212
2213	/* No merge, insert a new extent. */
2214	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2215	if (new_se == NULL)
2216		return -ENOMEM;
2217	new_se->start_page = start_page;
2218	new_se->nr_pages = nr_pages;
2219	new_se->start_block = start_block;
2220
2221	rb_link_node(&new_se->rb_node, parent, link);
2222	rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2223	return 1;
2224}
2225EXPORT_SYMBOL_GPL(add_swap_extent);
2226
2227/*
2228 * A `swap extent' is a simple thing which maps a contiguous range of pages
2229 * onto a contiguous range of disk blocks.  A rbtree of swap extents is
2230 * built at swapon time and is then used at swap_writepage/swap_readpage
2231 * time for locating where on disk a page belongs.
2232 *
2233 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2234 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2235 * swap files identically.
2236 *
2237 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2238 * extent rbtree operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
2239 * swapfiles are handled *identically* after swapon time.
2240 *
2241 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2242 * and will parse them into a rbtree, in PAGE_SIZE chunks.  If some stray
2243 * blocks are found which do not fall within the PAGE_SIZE alignment
2244 * requirements, they are simply tossed out - we will never use those blocks
2245 * for swapping.
2246 *
2247 * For all swap devices we set S_SWAPFILE across the life of the swapon.  This
2248 * prevents users from writing to the swap device, which will corrupt memory.
2249 *
2250 * The amount of disk space which a single swap extent represents varies.
2251 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
2252 * extents in the rbtree. - akpm.
 
 
 
2253 */
2254static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2255{
2256	struct file *swap_file = sis->swap_file;
2257	struct address_space *mapping = swap_file->f_mapping;
2258	struct inode *inode = mapping->host;
2259	int ret;
2260
2261	if (S_ISBLK(inode->i_mode)) {
2262		ret = add_swap_extent(sis, 0, sis->max, 0);
2263		*span = sis->pages;
2264		return ret;
2265	}
2266
2267	if (mapping->a_ops->swap_activate) {
2268		ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2269		if (ret < 0)
2270			return ret;
2271		sis->flags |= SWP_ACTIVATED;
2272		if ((sis->flags & SWP_FS_OPS) &&
2273		    sio_pool_init() != 0) {
2274			destroy_swap_extents(sis);
2275			return -ENOMEM;
2276		}
2277		return ret;
2278	}
2279
2280	return generic_swapfile_activate(sis, swap_file, span);
2281}
2282
2283static int swap_node(struct swap_info_struct *p)
2284{
2285	struct block_device *bdev;
2286
2287	if (p->bdev)
2288		bdev = p->bdev;
2289	else
2290		bdev = p->swap_file->f_inode->i_sb->s_bdev;
2291
2292	return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2293}
2294
2295static void setup_swap_info(struct swap_info_struct *p, int prio,
2296			    unsigned char *swap_map,
2297			    struct swap_cluster_info *cluster_info)
2298{
2299	int i;
2300
2301	if (prio >= 0)
2302		p->prio = prio;
2303	else
2304		p->prio = --least_priority;
2305	/*
2306	 * the plist prio is negated because plist ordering is
2307	 * low-to-high, while swap ordering is high-to-low
2308	 */
2309	p->list.prio = -p->prio;
2310	for_each_node(i) {
2311		if (p->prio >= 0)
2312			p->avail_lists[i].prio = -p->prio;
2313		else {
2314			if (swap_node(p) == i)
2315				p->avail_lists[i].prio = 1;
2316			else
2317				p->avail_lists[i].prio = -p->prio;
2318		}
2319	}
2320	p->swap_map = swap_map;
2321	p->cluster_info = cluster_info;
2322}
2323
2324static void _enable_swap_info(struct swap_info_struct *p)
2325{
2326	p->flags |= SWP_WRITEOK;
2327	atomic_long_add(p->pages, &nr_swap_pages);
2328	total_swap_pages += p->pages;
2329
2330	assert_spin_locked(&swap_lock);
2331	/*
2332	 * both lists are plists, and thus priority ordered.
2333	 * swap_active_head needs to be priority ordered for swapoff(),
2334	 * which on removal of any swap_info_struct with an auto-assigned
2335	 * (i.e. negative) priority increments the auto-assigned priority
2336	 * of any lower-priority swap_info_structs.
2337	 * swap_avail_head needs to be priority ordered for folio_alloc_swap(),
2338	 * which allocates swap pages from the highest available priority
2339	 * swap_info_struct.
2340	 */
2341	plist_add(&p->list, &swap_active_head);
2342	add_to_avail_list(p);
2343}
2344
2345static void enable_swap_info(struct swap_info_struct *p, int prio,
2346				unsigned char *swap_map,
2347				struct swap_cluster_info *cluster_info,
2348				unsigned long *frontswap_map)
2349{
2350	if (IS_ENABLED(CONFIG_FRONTSWAP))
2351		frontswap_init(p->type, frontswap_map);
2352	spin_lock(&swap_lock);
2353	spin_lock(&p->lock);
2354	setup_swap_info(p, prio, swap_map, cluster_info);
2355	spin_unlock(&p->lock);
2356	spin_unlock(&swap_lock);
2357	/*
2358	 * Finished initializing swap device, now it's safe to reference it.
2359	 */
2360	percpu_ref_resurrect(&p->users);
2361	spin_lock(&swap_lock);
2362	spin_lock(&p->lock);
2363	_enable_swap_info(p);
2364	spin_unlock(&p->lock);
2365	spin_unlock(&swap_lock);
2366}
2367
2368static void reinsert_swap_info(struct swap_info_struct *p)
2369{
2370	spin_lock(&swap_lock);
2371	spin_lock(&p->lock);
2372	setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2373	_enable_swap_info(p);
2374	spin_unlock(&p->lock);
2375	spin_unlock(&swap_lock);
2376}
2377
2378bool has_usable_swap(void)
2379{
2380	bool ret = true;
2381
2382	spin_lock(&swap_lock);
2383	if (plist_head_empty(&swap_active_head))
2384		ret = false;
2385	spin_unlock(&swap_lock);
2386	return ret;
2387}
2388
2389SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2390{
2391	struct swap_info_struct *p = NULL;
2392	unsigned char *swap_map;
2393	struct swap_cluster_info *cluster_info;
2394	unsigned long *frontswap_map;
2395	struct file *swap_file, *victim;
2396	struct address_space *mapping;
2397	struct inode *inode;
2398	struct filename *pathname;
2399	int err, found = 0;
2400	unsigned int old_block_size;
2401
2402	if (!capable(CAP_SYS_ADMIN))
2403		return -EPERM;
2404
2405	BUG_ON(!current->mm);
2406
2407	pathname = getname(specialfile);
2408	if (IS_ERR(pathname))
2409		return PTR_ERR(pathname);
2410
2411	victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2412	err = PTR_ERR(victim);
2413	if (IS_ERR(victim))
2414		goto out;
2415
2416	mapping = victim->f_mapping;
2417	spin_lock(&swap_lock);
2418	plist_for_each_entry(p, &swap_active_head, list) {
2419		if (p->flags & SWP_WRITEOK) {
2420			if (p->swap_file->f_mapping == mapping) {
2421				found = 1;
2422				break;
2423			}
2424		}
2425	}
2426	if (!found) {
2427		err = -EINVAL;
2428		spin_unlock(&swap_lock);
2429		goto out_dput;
2430	}
2431	if (!security_vm_enough_memory_mm(current->mm, p->pages))
2432		vm_unacct_memory(p->pages);
2433	else {
2434		err = -ENOMEM;
2435		spin_unlock(&swap_lock);
2436		goto out_dput;
2437	}
2438	del_from_avail_list(p);
2439	spin_lock(&p->lock);
2440	if (p->prio < 0) {
2441		struct swap_info_struct *si = p;
2442		int nid;
2443
2444		plist_for_each_entry_continue(si, &swap_active_head, list) {
2445			si->prio++;
2446			si->list.prio--;
2447			for_each_node(nid) {
2448				if (si->avail_lists[nid].prio != 1)
2449					si->avail_lists[nid].prio--;
2450			}
2451		}
2452		least_priority++;
2453	}
2454	plist_del(&p->list, &swap_active_head);
2455	atomic_long_sub(p->pages, &nr_swap_pages);
2456	total_swap_pages -= p->pages;
2457	p->flags &= ~SWP_WRITEOK;
2458	spin_unlock(&p->lock);
2459	spin_unlock(&swap_lock);
2460
2461	disable_swap_slots_cache_lock();
2462
2463	set_current_oom_origin();
2464	err = try_to_unuse(p->type);
2465	clear_current_oom_origin();
2466
2467	if (err) {
2468		/* re-insert swap space back into swap_list */
2469		reinsert_swap_info(p);
2470		reenable_swap_slots_cache_unlock();
2471		goto out_dput;
2472	}
2473
2474	reenable_swap_slots_cache_unlock();
2475
2476	/*
2477	 * Wait for swap operations protected by get/put_swap_device()
2478	 * to complete.
2479	 *
2480	 * We need synchronize_rcu() here to protect the accessing to
2481	 * the swap cache data structure.
2482	 */
2483	percpu_ref_kill(&p->users);
2484	synchronize_rcu();
2485	wait_for_completion(&p->comp);
2486
2487	flush_work(&p->discard_work);
2488
2489	destroy_swap_extents(p);
2490	if (p->flags & SWP_CONTINUED)
2491		free_swap_count_continuations(p);
2492
2493	if (!p->bdev || !bdev_nonrot(p->bdev))
2494		atomic_dec(&nr_rotate_swap);
2495
2496	mutex_lock(&swapon_mutex);
2497	spin_lock(&swap_lock);
2498	spin_lock(&p->lock);
2499	drain_mmlist();
2500
2501	/* wait for anyone still in scan_swap_map_slots */
2502	p->highest_bit = 0;		/* cuts scans short */
2503	while (p->flags >= SWP_SCANNING) {
2504		spin_unlock(&p->lock);
2505		spin_unlock(&swap_lock);
2506		schedule_timeout_uninterruptible(1);
2507		spin_lock(&swap_lock);
2508		spin_lock(&p->lock);
2509	}
2510
2511	swap_file = p->swap_file;
2512	old_block_size = p->old_block_size;
2513	p->swap_file = NULL;
2514	p->max = 0;
2515	swap_map = p->swap_map;
2516	p->swap_map = NULL;
2517	cluster_info = p->cluster_info;
2518	p->cluster_info = NULL;
2519	frontswap_map = frontswap_map_get(p);
2520	spin_unlock(&p->lock);
2521	spin_unlock(&swap_lock);
2522	arch_swap_invalidate_area(p->type);
2523	frontswap_invalidate_area(p->type);
2524	frontswap_map_set(p, NULL);
2525	mutex_unlock(&swapon_mutex);
2526	free_percpu(p->percpu_cluster);
2527	p->percpu_cluster = NULL;
2528	free_percpu(p->cluster_next_cpu);
2529	p->cluster_next_cpu = NULL;
2530	vfree(swap_map);
2531	kvfree(cluster_info);
2532	kvfree(frontswap_map);
2533	/* Destroy swap account information */
2534	swap_cgroup_swapoff(p->type);
2535	exit_swap_address_space(p->type);
2536
2537	inode = mapping->host;
2538	if (S_ISBLK(inode->i_mode)) {
2539		struct block_device *bdev = I_BDEV(inode);
2540
2541		set_blocksize(bdev, old_block_size);
2542		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2543	}
2544
2545	inode_lock(inode);
2546	inode->i_flags &= ~S_SWAPFILE;
2547	inode_unlock(inode);
2548	filp_close(swap_file, NULL);
2549
2550	/*
2551	 * Clear the SWP_USED flag after all resources are freed so that swapon
2552	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
2553	 * not hold p->lock after we cleared its SWP_WRITEOK.
2554	 */
2555	spin_lock(&swap_lock);
2556	p->flags = 0;
2557	spin_unlock(&swap_lock);
2558
2559	err = 0;
2560	atomic_inc(&proc_poll_event);
2561	wake_up_interruptible(&proc_poll_wait);
2562
2563out_dput:
2564	filp_close(victim, NULL);
2565out:
2566	putname(pathname);
2567	return err;
2568}
2569
2570#ifdef CONFIG_PROC_FS
2571static __poll_t swaps_poll(struct file *file, poll_table *wait)
2572{
2573	struct seq_file *seq = file->private_data;
2574
2575	poll_wait(file, &proc_poll_wait, wait);
2576
2577	if (seq->poll_event != atomic_read(&proc_poll_event)) {
2578		seq->poll_event = atomic_read(&proc_poll_event);
2579		return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2580	}
2581
2582	return EPOLLIN | EPOLLRDNORM;
2583}
2584
2585/* iterator */
2586static void *swap_start(struct seq_file *swap, loff_t *pos)
2587{
2588	struct swap_info_struct *si;
2589	int type;
2590	loff_t l = *pos;
2591
2592	mutex_lock(&swapon_mutex);
2593
2594	if (!l)
2595		return SEQ_START_TOKEN;
2596
2597	for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2598		if (!(si->flags & SWP_USED) || !si->swap_map)
2599			continue;
2600		if (!--l)
2601			return si;
2602	}
2603
2604	return NULL;
2605}
2606
2607static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2608{
2609	struct swap_info_struct *si = v;
2610	int type;
2611
2612	if (v == SEQ_START_TOKEN)
2613		type = 0;
2614	else
2615		type = si->type + 1;
2616
2617	++(*pos);
2618	for (; (si = swap_type_to_swap_info(type)); type++) {
2619		if (!(si->flags & SWP_USED) || !si->swap_map)
2620			continue;
2621		return si;
2622	}
2623
2624	return NULL;
2625}
2626
2627static void swap_stop(struct seq_file *swap, void *v)
2628{
2629	mutex_unlock(&swapon_mutex);
2630}
2631
2632static int swap_show(struct seq_file *swap, void *v)
2633{
2634	struct swap_info_struct *si = v;
2635	struct file *file;
2636	int len;
2637	unsigned long bytes, inuse;
2638
2639	if (si == SEQ_START_TOKEN) {
2640		seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2641		return 0;
2642	}
2643
2644	bytes = si->pages << (PAGE_SHIFT - 10);
2645	inuse = READ_ONCE(si->inuse_pages) << (PAGE_SHIFT - 10);
2646
2647	file = si->swap_file;
2648	len = seq_file_path(swap, file, " \t\n\\");
2649	seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n",
2650			len < 40 ? 40 - len : 1, " ",
2651			S_ISBLK(file_inode(file)->i_mode) ?
2652				"partition" : "file\t",
2653			bytes, bytes < 10000000 ? "\t" : "",
2654			inuse, inuse < 10000000 ? "\t" : "",
2655			si->prio);
2656	return 0;
2657}
2658
2659static const struct seq_operations swaps_op = {
2660	.start =	swap_start,
2661	.next =		swap_next,
2662	.stop =		swap_stop,
2663	.show =		swap_show
2664};
2665
2666static int swaps_open(struct inode *inode, struct file *file)
2667{
2668	struct seq_file *seq;
2669	int ret;
2670
2671	ret = seq_open(file, &swaps_op);
2672	if (ret)
2673		return ret;
2674
2675	seq = file->private_data;
2676	seq->poll_event = atomic_read(&proc_poll_event);
2677	return 0;
2678}
2679
2680static const struct proc_ops swaps_proc_ops = {
2681	.proc_flags	= PROC_ENTRY_PERMANENT,
2682	.proc_open	= swaps_open,
2683	.proc_read	= seq_read,
2684	.proc_lseek	= seq_lseek,
2685	.proc_release	= seq_release,
2686	.proc_poll	= swaps_poll,
2687};
2688
2689static int __init procswaps_init(void)
2690{
2691	proc_create("swaps", 0, NULL, &swaps_proc_ops);
2692	return 0;
2693}
2694__initcall(procswaps_init);
2695#endif /* CONFIG_PROC_FS */
2696
2697#ifdef MAX_SWAPFILES_CHECK
2698static int __init max_swapfiles_check(void)
2699{
2700	MAX_SWAPFILES_CHECK();
2701	return 0;
2702}
2703late_initcall(max_swapfiles_check);
2704#endif
2705
2706static struct swap_info_struct *alloc_swap_info(void)
2707{
2708	struct swap_info_struct *p;
2709	struct swap_info_struct *defer = NULL;
2710	unsigned int type;
2711	int i;
2712
2713	p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2714	if (!p)
2715		return ERR_PTR(-ENOMEM);
2716
2717	if (percpu_ref_init(&p->users, swap_users_ref_free,
2718			    PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
2719		kvfree(p);
2720		return ERR_PTR(-ENOMEM);
2721	}
2722
2723	spin_lock(&swap_lock);
2724	for (type = 0; type < nr_swapfiles; type++) {
2725		if (!(swap_info[type]->flags & SWP_USED))
2726			break;
2727	}
2728	if (type >= MAX_SWAPFILES) {
2729		spin_unlock(&swap_lock);
2730		percpu_ref_exit(&p->users);
2731		kvfree(p);
2732		return ERR_PTR(-EPERM);
2733	}
2734	if (type >= nr_swapfiles) {
2735		p->type = type;
2736		/*
2737		 * Publish the swap_info_struct after initializing it.
2738		 * Note that kvzalloc() above zeroes all its fields.
2739		 */
2740		smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
2741		nr_swapfiles++;
2742	} else {
2743		defer = p;
2744		p = swap_info[type];
2745		/*
2746		 * Do not memset this entry: a racing procfs swap_next()
2747		 * would be relying on p->type to remain valid.
2748		 */
2749	}
2750	p->swap_extent_root = RB_ROOT;
2751	plist_node_init(&p->list, 0);
2752	for_each_node(i)
2753		plist_node_init(&p->avail_lists[i], 0);
2754	p->flags = SWP_USED;
2755	spin_unlock(&swap_lock);
2756	if (defer) {
2757		percpu_ref_exit(&defer->users);
2758		kvfree(defer);
2759	}
2760	spin_lock_init(&p->lock);
2761	spin_lock_init(&p->cont_lock);
2762	init_completion(&p->comp);
2763
2764	return p;
2765}
2766
2767static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2768{
2769	int error;
2770
2771	if (S_ISBLK(inode->i_mode)) {
2772		p->bdev = blkdev_get_by_dev(inode->i_rdev,
2773				   FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2774		if (IS_ERR(p->bdev)) {
2775			error = PTR_ERR(p->bdev);
2776			p->bdev = NULL;
2777			return error;
2778		}
2779		p->old_block_size = block_size(p->bdev);
2780		error = set_blocksize(p->bdev, PAGE_SIZE);
2781		if (error < 0)
2782			return error;
2783		/*
2784		 * Zoned block devices contain zones that have a sequential
2785		 * write only restriction.  Hence zoned block devices are not
2786		 * suitable for swapping.  Disallow them here.
2787		 */
2788		if (bdev_is_zoned(p->bdev))
2789			return -EINVAL;
2790		p->flags |= SWP_BLKDEV;
2791	} else if (S_ISREG(inode->i_mode)) {
2792		p->bdev = inode->i_sb->s_bdev;
2793	}
2794
2795	return 0;
2796}
2797
2798
2799/*
2800 * Find out how many pages are allowed for a single swap device. There
2801 * are two limiting factors:
2802 * 1) the number of bits for the swap offset in the swp_entry_t type, and
2803 * 2) the number of bits in the swap pte, as defined by the different
2804 * architectures.
2805 *
2806 * In order to find the largest possible bit mask, a swap entry with
2807 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
2808 * decoded to a swp_entry_t again, and finally the swap offset is
2809 * extracted.
2810 *
2811 * This will mask all the bits from the initial ~0UL mask that can't
2812 * be encoded in either the swp_entry_t or the architecture definition
2813 * of a swap pte.
2814 */
2815unsigned long generic_max_swapfile_size(void)
2816{
2817	return swp_offset(pte_to_swp_entry(
2818			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2819}
2820
2821/* Can be overridden by an architecture for additional checks. */
2822__weak unsigned long arch_max_swapfile_size(void)
2823{
2824	return generic_max_swapfile_size();
2825}
2826
2827static unsigned long read_swap_header(struct swap_info_struct *p,
2828					union swap_header *swap_header,
2829					struct inode *inode)
2830{
2831	int i;
2832	unsigned long maxpages;
2833	unsigned long swapfilepages;
2834	unsigned long last_page;
2835
2836	if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2837		pr_err("Unable to find swap-space signature\n");
2838		return 0;
2839	}
2840
2841	/* swap partition endianness hack... */
2842	if (swab32(swap_header->info.version) == 1) {
2843		swab32s(&swap_header->info.version);
2844		swab32s(&swap_header->info.last_page);
2845		swab32s(&swap_header->info.nr_badpages);
2846		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2847			return 0;
2848		for (i = 0; i < swap_header->info.nr_badpages; i++)
2849			swab32s(&swap_header->info.badpages[i]);
2850	}
2851	/* Check the swap header's sub-version */
2852	if (swap_header->info.version != 1) {
2853		pr_warn("Unable to handle swap header version %d\n",
2854			swap_header->info.version);
2855		return 0;
2856	}
2857
2858	p->lowest_bit  = 1;
2859	p->cluster_next = 1;
2860	p->cluster_nr = 0;
2861
2862	maxpages = swapfile_maximum_size;
2863	last_page = swap_header->info.last_page;
2864	if (!last_page) {
2865		pr_warn("Empty swap-file\n");
2866		return 0;
2867	}
2868	if (last_page > maxpages) {
2869		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2870			maxpages << (PAGE_SHIFT - 10),
2871			last_page << (PAGE_SHIFT - 10));
2872	}
2873	if (maxpages > last_page) {
2874		maxpages = last_page + 1;
2875		/* p->max is an unsigned int: don't overflow it */
2876		if ((unsigned int)maxpages == 0)
2877			maxpages = UINT_MAX;
2878	}
2879	p->highest_bit = maxpages - 1;
2880
2881	if (!maxpages)
2882		return 0;
2883	swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2884	if (swapfilepages && maxpages > swapfilepages) {
2885		pr_warn("Swap area shorter than signature indicates\n");
2886		return 0;
2887	}
2888	if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
2889		return 0;
2890	if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2891		return 0;
2892
2893	return maxpages;
2894}
2895
2896#define SWAP_CLUSTER_INFO_COLS						\
2897	DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
2898#define SWAP_CLUSTER_SPACE_COLS						\
2899	DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
2900#define SWAP_CLUSTER_COLS						\
2901	max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
2902
2903static int setup_swap_map_and_extents(struct swap_info_struct *p,
2904					union swap_header *swap_header,
2905					unsigned char *swap_map,
2906					struct swap_cluster_info *cluster_info,
2907					unsigned long maxpages,
2908					sector_t *span)
2909{
2910	unsigned int j, k;
2911	unsigned int nr_good_pages;
2912	int nr_extents;
2913	unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
2914	unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
2915	unsigned long i, idx;
2916
2917	nr_good_pages = maxpages - 1;	/* omit header page */
2918
2919	cluster_list_init(&p->free_clusters);
2920	cluster_list_init(&p->discard_clusters);
2921
2922	for (i = 0; i < swap_header->info.nr_badpages; i++) {
2923		unsigned int page_nr = swap_header->info.badpages[i];
2924		if (page_nr == 0 || page_nr > swap_header->info.last_page)
2925			return -EINVAL;
2926		if (page_nr < maxpages) {
2927			swap_map[page_nr] = SWAP_MAP_BAD;
2928			nr_good_pages--;
2929			/*
2930			 * Haven't marked the cluster free yet, no list
2931			 * operation involved
2932			 */
2933			inc_cluster_info_page(p, cluster_info, page_nr);
2934		}
2935	}
2936
2937	/* Haven't marked the cluster free yet, no list operation involved */
2938	for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
2939		inc_cluster_info_page(p, cluster_info, i);
2940
2941	if (nr_good_pages) {
2942		swap_map[0] = SWAP_MAP_BAD;
2943		/*
2944		 * Not mark the cluster free yet, no list
2945		 * operation involved
2946		 */
2947		inc_cluster_info_page(p, cluster_info, 0);
2948		p->max = maxpages;
2949		p->pages = nr_good_pages;
2950		nr_extents = setup_swap_extents(p, span);
2951		if (nr_extents < 0)
2952			return nr_extents;
2953		nr_good_pages = p->pages;
2954	}
2955	if (!nr_good_pages) {
2956		pr_warn("Empty swap-file\n");
2957		return -EINVAL;
2958	}
2959
2960	if (!cluster_info)
2961		return nr_extents;
2962
2963
2964	/*
2965	 * Reduce false cache line sharing between cluster_info and
2966	 * sharing same address space.
2967	 */
2968	for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
2969		j = (k + col) % SWAP_CLUSTER_COLS;
2970		for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
2971			idx = i * SWAP_CLUSTER_COLS + j;
2972			if (idx >= nr_clusters)
2973				continue;
2974			if (cluster_count(&cluster_info[idx]))
2975				continue;
2976			cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
2977			cluster_list_add_tail(&p->free_clusters, cluster_info,
2978					      idx);
2979		}
2980	}
2981	return nr_extents;
2982}
2983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2984SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2985{
2986	struct swap_info_struct *p;
2987	struct filename *name;
2988	struct file *swap_file = NULL;
2989	struct address_space *mapping;
2990	struct dentry *dentry;
2991	int prio;
2992	int error;
2993	union swap_header *swap_header;
2994	int nr_extents;
2995	sector_t span;
2996	unsigned long maxpages;
2997	unsigned char *swap_map = NULL;
2998	struct swap_cluster_info *cluster_info = NULL;
2999	unsigned long *frontswap_map = NULL;
3000	struct page *page = NULL;
3001	struct inode *inode = NULL;
3002	bool inced_nr_rotate_swap = false;
3003
3004	if (swap_flags & ~SWAP_FLAGS_VALID)
3005		return -EINVAL;
3006
3007	if (!capable(CAP_SYS_ADMIN))
3008		return -EPERM;
3009
3010	if (!swap_avail_heads)
3011		return -ENOMEM;
3012
3013	p = alloc_swap_info();
3014	if (IS_ERR(p))
3015		return PTR_ERR(p);
3016
3017	INIT_WORK(&p->discard_work, swap_discard_work);
3018
3019	name = getname(specialfile);
3020	if (IS_ERR(name)) {
3021		error = PTR_ERR(name);
3022		name = NULL;
3023		goto bad_swap;
3024	}
3025	swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3026	if (IS_ERR(swap_file)) {
3027		error = PTR_ERR(swap_file);
3028		swap_file = NULL;
3029		goto bad_swap;
3030	}
3031
3032	p->swap_file = swap_file;
3033	mapping = swap_file->f_mapping;
3034	dentry = swap_file->f_path.dentry;
3035	inode = mapping->host;
3036
3037	error = claim_swapfile(p, inode);
3038	if (unlikely(error))
3039		goto bad_swap;
3040
3041	inode_lock(inode);
3042	if (d_unlinked(dentry) || cant_mount(dentry)) {
3043		error = -ENOENT;
3044		goto bad_swap_unlock_inode;
3045	}
3046	if (IS_SWAPFILE(inode)) {
3047		error = -EBUSY;
3048		goto bad_swap_unlock_inode;
3049	}
3050
3051	/*
3052	 * Read the swap header.
3053	 */
3054	if (!mapping->a_ops->read_folio) {
3055		error = -EINVAL;
3056		goto bad_swap_unlock_inode;
3057	}
3058	page = read_mapping_page(mapping, 0, swap_file);
3059	if (IS_ERR(page)) {
3060		error = PTR_ERR(page);
3061		goto bad_swap_unlock_inode;
3062	}
3063	swap_header = kmap(page);
3064
3065	maxpages = read_swap_header(p, swap_header, inode);
3066	if (unlikely(!maxpages)) {
3067		error = -EINVAL;
3068		goto bad_swap_unlock_inode;
3069	}
3070
3071	/* OK, set up the swap map and apply the bad block list */
3072	swap_map = vzalloc(maxpages);
3073	if (!swap_map) {
3074		error = -ENOMEM;
3075		goto bad_swap_unlock_inode;
3076	}
3077
3078	if (p->bdev && bdev_stable_writes(p->bdev))
3079		p->flags |= SWP_STABLE_WRITES;
3080
3081	if (p->bdev && p->bdev->bd_disk->fops->rw_page)
3082		p->flags |= SWP_SYNCHRONOUS_IO;
3083
3084	if (p->bdev && bdev_nonrot(p->bdev)) {
3085		int cpu;
3086		unsigned long ci, nr_cluster;
3087
3088		p->flags |= SWP_SOLIDSTATE;
3089		p->cluster_next_cpu = alloc_percpu(unsigned int);
3090		if (!p->cluster_next_cpu) {
3091			error = -ENOMEM;
3092			goto bad_swap_unlock_inode;
3093		}
3094		/*
3095		 * select a random position to start with to help wear leveling
3096		 * SSD
3097		 */
3098		for_each_possible_cpu(cpu) {
3099			per_cpu(*p->cluster_next_cpu, cpu) =
3100				get_random_u32_inclusive(1, p->highest_bit);
3101		}
3102		nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3103
3104		cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
3105					GFP_KERNEL);
3106		if (!cluster_info) {
3107			error = -ENOMEM;
3108			goto bad_swap_unlock_inode;
3109		}
3110
3111		for (ci = 0; ci < nr_cluster; ci++)
3112			spin_lock_init(&((cluster_info + ci)->lock));
3113
3114		p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3115		if (!p->percpu_cluster) {
3116			error = -ENOMEM;
3117			goto bad_swap_unlock_inode;
3118		}
3119		for_each_possible_cpu(cpu) {
3120			struct percpu_cluster *cluster;
3121			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3122			cluster_set_null(&cluster->index);
3123		}
3124	} else {
3125		atomic_inc(&nr_rotate_swap);
3126		inced_nr_rotate_swap = true;
3127	}
3128
3129	error = swap_cgroup_swapon(p->type, maxpages);
3130	if (error)
3131		goto bad_swap_unlock_inode;
3132
3133	nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3134		cluster_info, maxpages, &span);
3135	if (unlikely(nr_extents < 0)) {
3136		error = nr_extents;
3137		goto bad_swap_unlock_inode;
3138	}
3139	/* frontswap enabled? set up bit-per-page map for frontswap */
3140	if (IS_ENABLED(CONFIG_FRONTSWAP))
3141		frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
3142					 sizeof(long),
3143					 GFP_KERNEL);
3144
3145	if ((swap_flags & SWAP_FLAG_DISCARD) &&
3146	    p->bdev && bdev_max_discard_sectors(p->bdev)) {
3147		/*
3148		 * When discard is enabled for swap with no particular
3149		 * policy flagged, we set all swap discard flags here in
3150		 * order to sustain backward compatibility with older
3151		 * swapon(8) releases.
3152		 */
3153		p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3154			     SWP_PAGE_DISCARD);
3155
3156		/*
3157		 * By flagging sys_swapon, a sysadmin can tell us to
3158		 * either do single-time area discards only, or to just
3159		 * perform discards for released swap page-clusters.
3160		 * Now it's time to adjust the p->flags accordingly.
3161		 */
3162		if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3163			p->flags &= ~SWP_PAGE_DISCARD;
3164		else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3165			p->flags &= ~SWP_AREA_DISCARD;
3166
3167		/* issue a swapon-time discard if it's still required */
3168		if (p->flags & SWP_AREA_DISCARD) {
3169			int err = discard_swap(p);
3170			if (unlikely(err))
3171				pr_err("swapon: discard_swap(%p): %d\n",
3172					p, err);
3173		}
3174	}
3175
3176	error = init_swap_address_space(p->type, maxpages);
3177	if (error)
3178		goto bad_swap_unlock_inode;
3179
3180	/*
3181	 * Flush any pending IO and dirty mappings before we start using this
3182	 * swap device.
3183	 */
3184	inode->i_flags |= S_SWAPFILE;
3185	error = inode_drain_writes(inode);
3186	if (error) {
3187		inode->i_flags &= ~S_SWAPFILE;
3188		goto free_swap_address_space;
3189	}
3190
3191	mutex_lock(&swapon_mutex);
3192	prio = -1;
3193	if (swap_flags & SWAP_FLAG_PREFER)
3194		prio =
3195		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3196	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
3197
3198	pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
3199		p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
3200		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
3201		(p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3202		(p->flags & SWP_DISCARDABLE) ? "D" : "",
3203		(p->flags & SWP_AREA_DISCARD) ? "s" : "",
3204		(p->flags & SWP_PAGE_DISCARD) ? "c" : "",
3205		(frontswap_map) ? "FS" : "");
3206
3207	mutex_unlock(&swapon_mutex);
3208	atomic_inc(&proc_poll_event);
3209	wake_up_interruptible(&proc_poll_wait);
3210
3211	error = 0;
3212	goto out;
3213free_swap_address_space:
3214	exit_swap_address_space(p->type);
3215bad_swap_unlock_inode:
3216	inode_unlock(inode);
3217bad_swap:
3218	free_percpu(p->percpu_cluster);
3219	p->percpu_cluster = NULL;
3220	free_percpu(p->cluster_next_cpu);
3221	p->cluster_next_cpu = NULL;
3222	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
3223		set_blocksize(p->bdev, p->old_block_size);
3224		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3225	}
3226	inode = NULL;
3227	destroy_swap_extents(p);
3228	swap_cgroup_swapoff(p->type);
3229	spin_lock(&swap_lock);
3230	p->swap_file = NULL;
3231	p->flags = 0;
3232	spin_unlock(&swap_lock);
3233	vfree(swap_map);
3234	kvfree(cluster_info);
3235	kvfree(frontswap_map);
3236	if (inced_nr_rotate_swap)
3237		atomic_dec(&nr_rotate_swap);
3238	if (swap_file)
3239		filp_close(swap_file, NULL);
3240out:
3241	if (page && !IS_ERR(page)) {
3242		kunmap(page);
3243		put_page(page);
3244	}
3245	if (name)
3246		putname(name);
3247	if (inode)
3248		inode_unlock(inode);
3249	if (!error)
3250		enable_swap_slots_cache();
3251	return error;
3252}
3253
3254void si_swapinfo(struct sysinfo *val)
3255{
3256	unsigned int type;
3257	unsigned long nr_to_be_unused = 0;
3258
3259	spin_lock(&swap_lock);
3260	for (type = 0; type < nr_swapfiles; type++) {
3261		struct swap_info_struct *si = swap_info[type];
3262
3263		if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3264			nr_to_be_unused += READ_ONCE(si->inuse_pages);
3265	}
3266	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3267	val->totalswap = total_swap_pages + nr_to_be_unused;
3268	spin_unlock(&swap_lock);
3269}
3270
3271/*
3272 * Verify that a swap entry is valid and increment its swap map count.
3273 *
3274 * Returns error code in following case.
3275 * - success -> 0
3276 * - swp_entry is invalid -> EINVAL
3277 * - swp_entry is migration entry -> EINVAL
3278 * - swap-cache reference is requested but there is already one. -> EEXIST
3279 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3280 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3281 */
3282static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3283{
3284	struct swap_info_struct *p;
3285	struct swap_cluster_info *ci;
3286	unsigned long offset;
3287	unsigned char count;
3288	unsigned char has_cache;
3289	int err;
3290
3291	p = get_swap_device(entry);
3292	if (!p)
3293		return -EINVAL;
3294
3295	offset = swp_offset(entry);
3296	ci = lock_cluster_or_swap_info(p, offset);
3297
3298	count = p->swap_map[offset];
3299
3300	/*
3301	 * swapin_readahead() doesn't check if a swap entry is valid, so the
3302	 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3303	 */
3304	if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3305		err = -ENOENT;
3306		goto unlock_out;
3307	}
3308
3309	has_cache = count & SWAP_HAS_CACHE;
3310	count &= ~SWAP_HAS_CACHE;
3311	err = 0;
3312
3313	if (usage == SWAP_HAS_CACHE) {
3314
3315		/* set SWAP_HAS_CACHE if there is no cache and entry is used */
3316		if (!has_cache && count)
3317			has_cache = SWAP_HAS_CACHE;
3318		else if (has_cache)		/* someone else added cache */
3319			err = -EEXIST;
3320		else				/* no users remaining */
3321			err = -ENOENT;
3322
3323	} else if (count || has_cache) {
3324
3325		if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3326			count += usage;
3327		else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3328			err = -EINVAL;
3329		else if (swap_count_continued(p, offset, count))
3330			count = COUNT_CONTINUED;
3331		else
3332			err = -ENOMEM;
3333	} else
3334		err = -ENOENT;			/* unused swap entry */
3335
3336	WRITE_ONCE(p->swap_map[offset], count | has_cache);
3337
3338unlock_out:
3339	unlock_cluster_or_swap_info(p, ci);
3340	put_swap_device(p);
 
3341	return err;
3342}
3343
3344/*
3345 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3346 * (in which case its reference count is never incremented).
3347 */
3348void swap_shmem_alloc(swp_entry_t entry)
3349{
3350	__swap_duplicate(entry, SWAP_MAP_SHMEM);
3351}
3352
3353/*
3354 * Increase reference count of swap entry by 1.
3355 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3356 * but could not be atomically allocated.  Returns 0, just as if it succeeded,
3357 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3358 * might occur if a page table entry has got corrupted.
3359 */
3360int swap_duplicate(swp_entry_t entry)
3361{
3362	int err = 0;
3363
3364	while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3365		err = add_swap_count_continuation(entry, GFP_ATOMIC);
3366	return err;
3367}
3368
3369/*
3370 * @entry: swap entry for which we allocate swap cache.
3371 *
3372 * Called when allocating swap cache for existing swap entry,
3373 * This can return error codes. Returns 0 at success.
3374 * -EEXIST means there is a swap cache.
3375 * Note: return code is different from swap_duplicate().
3376 */
3377int swapcache_prepare(swp_entry_t entry)
3378{
3379	return __swap_duplicate(entry, SWAP_HAS_CACHE);
3380}
3381
3382struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3383{
3384	return swap_type_to_swap_info(swp_type(entry));
3385}
3386
3387struct swap_info_struct *page_swap_info(struct page *page)
3388{
3389	swp_entry_t entry = { .val = page_private(page) };
3390	return swp_swap_info(entry);
3391}
3392
3393/*
3394 * out-of-line methods to avoid include hell.
3395 */
3396struct address_space *swapcache_mapping(struct folio *folio)
3397{
3398	return page_swap_info(&folio->page)->swap_file->f_mapping;
3399}
3400EXPORT_SYMBOL_GPL(swapcache_mapping);
3401
3402pgoff_t __page_file_index(struct page *page)
3403{
3404	swp_entry_t swap = { .val = page_private(page) };
3405	return swp_offset(swap);
3406}
3407EXPORT_SYMBOL_GPL(__page_file_index);
3408
3409/*
3410 * add_swap_count_continuation - called when a swap count is duplicated
3411 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3412 * page of the original vmalloc'ed swap_map, to hold the continuation count
3413 * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
3414 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3415 *
3416 * These continuation pages are seldom referenced: the common paths all work
3417 * on the original swap_map, only referring to a continuation page when the
3418 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3419 *
3420 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3421 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3422 * can be called after dropping locks.
3423 */
3424int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3425{
3426	struct swap_info_struct *si;
3427	struct swap_cluster_info *ci;
3428	struct page *head;
3429	struct page *page;
3430	struct page *list_page;
3431	pgoff_t offset;
3432	unsigned char count;
3433	int ret = 0;
3434
3435	/*
3436	 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3437	 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3438	 */
3439	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3440
3441	si = get_swap_device(entry);
3442	if (!si) {
3443		/*
3444		 * An acceptable race has occurred since the failing
3445		 * __swap_duplicate(): the swap device may be swapoff
3446		 */
3447		goto outer;
3448	}
3449	spin_lock(&si->lock);
3450
3451	offset = swp_offset(entry);
3452
3453	ci = lock_cluster(si, offset);
3454
3455	count = swap_count(si->swap_map[offset]);
3456
3457	if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3458		/*
3459		 * The higher the swap count, the more likely it is that tasks
3460		 * will race to add swap count continuation: we need to avoid
3461		 * over-provisioning.
3462		 */
3463		goto out;
3464	}
3465
3466	if (!page) {
3467		ret = -ENOMEM;
3468		goto out;
3469	}
3470
3471	/*
3472	 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
3473	 * no architecture is using highmem pages for kernel page tables: so it
3474	 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
3475	 */
3476	head = vmalloc_to_page(si->swap_map + offset);
3477	offset &= ~PAGE_MASK;
3478
3479	spin_lock(&si->cont_lock);
3480	/*
3481	 * Page allocation does not initialize the page's lru field,
3482	 * but it does always reset its private field.
3483	 */
3484	if (!page_private(head)) {
3485		BUG_ON(count & COUNT_CONTINUED);
3486		INIT_LIST_HEAD(&head->lru);
3487		set_page_private(head, SWP_CONTINUED);
3488		si->flags |= SWP_CONTINUED;
3489	}
3490
3491	list_for_each_entry(list_page, &head->lru, lru) {
3492		unsigned char *map;
3493
3494		/*
3495		 * If the previous map said no continuation, but we've found
3496		 * a continuation page, free our allocation and use this one.
3497		 */
3498		if (!(count & COUNT_CONTINUED))
3499			goto out_unlock_cont;
3500
3501		map = kmap_atomic(list_page) + offset;
3502		count = *map;
3503		kunmap_atomic(map);
3504
3505		/*
3506		 * If this continuation count now has some space in it,
3507		 * free our allocation and use this one.
3508		 */
3509		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3510			goto out_unlock_cont;
3511	}
3512
3513	list_add_tail(&page->lru, &head->lru);
3514	page = NULL;			/* now it's attached, don't free it */
3515out_unlock_cont:
3516	spin_unlock(&si->cont_lock);
3517out:
3518	unlock_cluster(ci);
3519	spin_unlock(&si->lock);
3520	put_swap_device(si);
3521outer:
3522	if (page)
3523		__free_page(page);
3524	return ret;
3525}
3526
3527/*
3528 * swap_count_continued - when the original swap_map count is incremented
3529 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3530 * into, carry if so, or else fail until a new continuation page is allocated;
3531 * when the original swap_map count is decremented from 0 with continuation,
3532 * borrow from the continuation and report whether it still holds more.
3533 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3534 * lock.
3535 */
3536static bool swap_count_continued(struct swap_info_struct *si,
3537				 pgoff_t offset, unsigned char count)
3538{
3539	struct page *head;
3540	struct page *page;
3541	unsigned char *map;
3542	bool ret;
3543
3544	head = vmalloc_to_page(si->swap_map + offset);
3545	if (page_private(head) != SWP_CONTINUED) {
3546		BUG_ON(count & COUNT_CONTINUED);
3547		return false;		/* need to add count continuation */
3548	}
3549
3550	spin_lock(&si->cont_lock);
3551	offset &= ~PAGE_MASK;
3552	page = list_next_entry(head, lru);
3553	map = kmap_atomic(page) + offset;
3554
3555	if (count == SWAP_MAP_MAX)	/* initial increment from swap_map */
3556		goto init_map;		/* jump over SWAP_CONT_MAX checks */
3557
3558	if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3559		/*
3560		 * Think of how you add 1 to 999
3561		 */
3562		while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3563			kunmap_atomic(map);
3564			page = list_next_entry(page, lru);
3565			BUG_ON(page == head);
3566			map = kmap_atomic(page) + offset;
3567		}
3568		if (*map == SWAP_CONT_MAX) {
3569			kunmap_atomic(map);
3570			page = list_next_entry(page, lru);
3571			if (page == head) {
3572				ret = false;	/* add count continuation */
3573				goto out;
3574			}
3575			map = kmap_atomic(page) + offset;
3576init_map:		*map = 0;		/* we didn't zero the page */
3577		}
3578		*map += 1;
3579		kunmap_atomic(map);
3580		while ((page = list_prev_entry(page, lru)) != head) {
3581			map = kmap_atomic(page) + offset;
3582			*map = COUNT_CONTINUED;
3583			kunmap_atomic(map);
3584		}
3585		ret = true;			/* incremented */
3586
3587	} else {				/* decrementing */
3588		/*
3589		 * Think of how you subtract 1 from 1000
3590		 */
3591		BUG_ON(count != COUNT_CONTINUED);
3592		while (*map == COUNT_CONTINUED) {
3593			kunmap_atomic(map);
3594			page = list_next_entry(page, lru);
3595			BUG_ON(page == head);
3596			map = kmap_atomic(page) + offset;
3597		}
3598		BUG_ON(*map == 0);
3599		*map -= 1;
3600		if (*map == 0)
3601			count = 0;
3602		kunmap_atomic(map);
3603		while ((page = list_prev_entry(page, lru)) != head) {
3604			map = kmap_atomic(page) + offset;
3605			*map = SWAP_CONT_MAX | count;
3606			count = COUNT_CONTINUED;
3607			kunmap_atomic(map);
3608		}
3609		ret = count == COUNT_CONTINUED;
3610	}
3611out:
3612	spin_unlock(&si->cont_lock);
3613	return ret;
3614}
3615
3616/*
3617 * free_swap_count_continuations - swapoff free all the continuation pages
3618 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3619 */
3620static void free_swap_count_continuations(struct swap_info_struct *si)
3621{
3622	pgoff_t offset;
3623
3624	for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3625		struct page *head;
3626		head = vmalloc_to_page(si->swap_map + offset);
3627		if (page_private(head)) {
3628			struct page *page, *next;
3629
3630			list_for_each_entry_safe(page, next, &head->lru, lru) {
3631				list_del(&page->lru);
3632				__free_page(page);
3633			}
3634		}
3635	}
3636}
3637
3638#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3639void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
3640{
3641	struct swap_info_struct *si, *next;
3642	int nid = page_to_nid(page);
3643
3644	if (!(gfp_mask & __GFP_IO))
3645		return;
3646
3647	if (!blk_cgroup_congested())
3648		return;
3649
3650	/*
3651	 * We've already scheduled a throttle, avoid taking the global swap
3652	 * lock.
3653	 */
3654	if (current->throttle_queue)
3655		return;
3656
3657	spin_lock(&swap_avail_lock);
3658	plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3659				  avail_lists[nid]) {
3660		if (si->bdev) {
3661			blkcg_schedule_throttle(si->bdev->bd_disk, true);
3662			break;
3663		}
3664	}
3665	spin_unlock(&swap_avail_lock);
3666}
3667#endif
3668
3669static int __init swapfile_init(void)
3670{
3671	int nid;
3672
3673	swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3674					 GFP_KERNEL);
3675	if (!swap_avail_heads) {
3676		pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3677		return -ENOMEM;
3678	}
3679
3680	for_each_node(nid)
3681		plist_head_init(&swap_avail_heads[nid]);
3682
3683	swapfile_maximum_size = arch_max_swapfile_size();
3684
3685#ifdef CONFIG_MIGRATION
3686	if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
3687		swap_migration_ad_supported = true;
3688#endif	/* CONFIG_MIGRATION */
3689
3690	return 0;
3691}
3692subsys_initcall(swapfile_init);