Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * zswap.c - zswap driver file
   3 *
   4 * zswap is a backend for frontswap that takes pages that are in the process
   5 * of being swapped out and attempts to compress and store them in a
   6 * RAM-based memory pool.  This can result in a significant I/O reduction on
   7 * the swap device and, in the case where decompressing from RAM is faster
   8 * than reading from the swap device, can also improve workload performance.
   9 *
  10 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU General Public License
  14 * as published by the Free Software Foundation; either version 2
  15 * of the License, or (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21*/
  22
  23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24
  25#include <linux/module.h>
  26#include <linux/cpu.h>
  27#include <linux/highmem.h>
  28#include <linux/slab.h>
  29#include <linux/spinlock.h>
  30#include <linux/types.h>
  31#include <linux/atomic.h>
  32#include <linux/frontswap.h>
  33#include <linux/rbtree.h>
  34#include <linux/swap.h>
  35#include <linux/crypto.h>
 
 
  36#include <linux/mempool.h>
  37#include <linux/zpool.h>
  38
 
  39#include <linux/mm_types.h>
  40#include <linux/page-flags.h>
  41#include <linux/swapops.h>
  42#include <linux/writeback.h>
  43#include <linux/pagemap.h>
 
 
 
 
 
  44
  45/*********************************
  46* statistics
  47**********************************/
  48/* Total bytes used by the compressed storage */
  49static u64 zswap_pool_total_size;
  50/* The number of compressed pages currently stored in zswap */
  51static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
 
 
  52
  53/*
  54 * The statistics below are not protected from concurrent access for
  55 * performance reasons so they may not be a 100% accurate.  However,
  56 * they do provide useful information on roughly how many times a
  57 * certain event is occurring.
  58*/
  59
  60/* Pool limit was hit (see zswap_max_pool_percent) */
  61static u64 zswap_pool_limit_hit;
  62/* Pages written back when pool limit was reached */
  63static u64 zswap_written_back_pages;
  64/* Store failed due to a reclaim failure after pool limit was reached */
  65static u64 zswap_reject_reclaim_fail;
 
 
  66/* Compressed page was too big for the allocator to (optimally) store */
  67static u64 zswap_reject_compress_poor;
  68/* Store failed because underlying allocator could not get memory */
  69static u64 zswap_reject_alloc_fail;
  70/* Store failed because the entry metadata could not be allocated (rare) */
  71static u64 zswap_reject_kmemcache_fail;
  72/* Duplicate store was encountered (rare) */
  73static u64 zswap_duplicate_entry;
 
 
 
  74
  75/*********************************
  76* tunables
  77**********************************/
  78
  79/* Enable/disable zswap (disabled by default) */
  80static bool zswap_enabled;
  81module_param_named(enabled, zswap_enabled, bool, 0644);
 
 
 
 
 
 
 
 
 
 
  82
  83/* Crypto compressor to use */
  84#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
  85static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
  86static int zswap_compressor_param_set(const char *,
  87				      const struct kernel_param *);
  88static struct kernel_param_ops zswap_compressor_param_ops = {
  89	.set =		zswap_compressor_param_set,
  90	.get =		param_get_charp,
  91	.free =		param_free_charp,
  92};
  93module_param_cb(compressor, &zswap_compressor_param_ops,
  94		&zswap_compressor, 0644);
  95
  96/* Compressed storage zpool to use */
  97#define ZSWAP_ZPOOL_DEFAULT "zbud"
  98static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
  99static int zswap_zpool_param_set(const char *, const struct kernel_param *);
 100static struct kernel_param_ops zswap_zpool_param_ops = {
 101	.set =		zswap_zpool_param_set,
 102	.get =		param_get_charp,
 103	.free =		param_free_charp,
 104};
 105module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
 106
 107/* The maximum percentage of memory that the compressed pool can occupy */
 108static unsigned int zswap_max_pool_percent = 20;
 109module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111/*********************************
 112* data structures
 113**********************************/
 114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 115struct zswap_pool {
 116	struct zpool *zpool;
 117	struct crypto_comp * __percpu *tfm;
 118	struct kref kref;
 119	struct list_head list;
 120	struct rcu_head rcu_head;
 121	struct notifier_block notifier;
 122	char tfm_name[CRYPTO_MAX_ALG_NAME];
 123};
 124
 
 
 
 
 
 
 
 
 
 
 
 125/*
 126 * struct zswap_entry
 127 *
 128 * This structure contains the metadata for tracking a single compressed
 129 * page within zswap.
 130 *
 131 * rbnode - links the entry into red-black tree for the appropriate swap type
 132 * offset - the swap offset for the entry.  Index into the red-black tree.
 133 * refcount - the number of outstanding reference to the entry. This is needed
 134 *            to protect against premature freeing of the entry by code
 135 *            concurrent calls to load, invalidate, and writeback.  The lock
 136 *            for the zswap_tree structure that contains the entry must
 137 *            be held while changing the refcount.  Since the lock must
 138 *            be held, there is no reason to also make refcount atomic.
 139 * length - the length in bytes of the compressed page data.  Needed during
 140 *          decompression
 
 141 * pool - the zswap_pool the entry's data is in
 142 * handle - zpool allocation handle that stores the compressed page data
 
 
 
 143 */
 144struct zswap_entry {
 145	struct rb_node rbnode;
 146	pgoff_t offset;
 147	int refcount;
 148	unsigned int length;
 149	struct zswap_pool *pool;
 150	unsigned long handle;
 151};
 152
 153struct zswap_header {
 154	swp_entry_t swpentry;
 
 155};
 156
 157/*
 158 * The tree lock in the zswap_tree struct protects a few things:
 159 * - the rbtree
 160 * - the refcount field of each entry in the tree
 161 */
 162struct zswap_tree {
 163	struct rb_root rbroot;
 164	spinlock_t lock;
 165};
 166
 167static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 
 168
 169/* RCU-protected iteration */
 170static LIST_HEAD(zswap_pools);
 171/* protects zswap_pools list modification */
 172static DEFINE_SPINLOCK(zswap_pools_lock);
 173/* pool counter to provide unique names to zpool */
 174static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 175
 176/* used by param callback function */
 177static bool zswap_init_started;
 
 
 
 
 
 
 
 
 
 
 
 178
 179/*********************************
 180* helpers and fwd declarations
 181**********************************/
 182
 
 
 
 
 
 
 183#define zswap_pool_debug(msg, p)				\
 184	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
 185		 zpool_get_type((p)->zpool))
 186
 187static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
 188static int zswap_pool_get(struct zswap_pool *pool);
 189static void zswap_pool_put(struct zswap_pool *pool);
 
 
 190
 191static const struct zpool_ops zswap_zpool_ops = {
 192	.evict = zswap_writeback_entry
 193};
 
 
 
 194
 195static bool zswap_is_full(void)
 196{
 197	return totalram_pages * zswap_max_pool_percent / 100 <
 198		DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 
 
 
 
 
 199}
 200
 201static void zswap_update_total_size(void)
 202{
 203	struct zswap_pool *pool;
 204	u64 total = 0;
 205
 206	rcu_read_lock();
 207
 208	list_for_each_entry_rcu(pool, &zswap_pools, list)
 209		total += zpool_get_total_size(pool->zpool);
 210
 211	rcu_read_unlock();
 212
 213	zswap_pool_total_size = total;
 214}
 215
 216/*********************************
 217* zswap entry functions
 218**********************************/
 219static struct kmem_cache *zswap_entry_cache;
 220
 221static int __init zswap_entry_cache_create(void)
 222{
 223	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
 224	return zswap_entry_cache == NULL;
 225}
 
 
 226
 227static void __init zswap_entry_cache_destroy(void)
 228{
 229	kmem_cache_destroy(zswap_entry_cache);
 230}
 
 
 
 
 
 
 231
 232static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
 233{
 234	struct zswap_entry *entry;
 235	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
 236	if (!entry)
 237		return NULL;
 238	entry->refcount = 1;
 239	RB_CLEAR_NODE(&entry->rbnode);
 240	return entry;
 241}
 242
 243static void zswap_entry_cache_free(struct zswap_entry *entry)
 244{
 245	kmem_cache_free(zswap_entry_cache, entry);
 246}
 247
 248/*********************************
 249* rbtree functions
 250**********************************/
 251static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
 252{
 253	struct rb_node *node = root->rb_node;
 254	struct zswap_entry *entry;
 255
 256	while (node) {
 257		entry = rb_entry(node, struct zswap_entry, rbnode);
 258		if (entry->offset > offset)
 259			node = node->rb_left;
 260		else if (entry->offset < offset)
 261			node = node->rb_right;
 262		else
 263			return entry;
 264	}
 265	return NULL;
 266}
 267
 268/*
 269 * In the case that a entry with the same offset is found, a pointer to
 270 * the existing entry is stored in dupentry and the function returns -EEXIST
 271 */
 272static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
 273			struct zswap_entry **dupentry)
 274{
 275	struct rb_node **link = &root->rb_node, *parent = NULL;
 276	struct zswap_entry *myentry;
 277
 278	while (*link) {
 279		parent = *link;
 280		myentry = rb_entry(parent, struct zswap_entry, rbnode);
 281		if (myentry->offset > entry->offset)
 282			link = &(*link)->rb_left;
 283		else if (myentry->offset < entry->offset)
 284			link = &(*link)->rb_right;
 285		else {
 286			*dupentry = myentry;
 287			return -EEXIST;
 288		}
 289	}
 290	rb_link_node(&entry->rbnode, parent, link);
 291	rb_insert_color(&entry->rbnode, root);
 292	return 0;
 293}
 294
 295static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
 296{
 297	if (!RB_EMPTY_NODE(&entry->rbnode)) {
 298		rb_erase(&entry->rbnode, root);
 299		RB_CLEAR_NODE(&entry->rbnode);
 300	}
 301}
 302
 303/*
 304 * Carries out the common pattern of freeing and entry's zpool allocation,
 305 * freeing the entry itself, and decrementing the number of stored pages.
 306 */
 307static void zswap_free_entry(struct zswap_entry *entry)
 308{
 309	zpool_free(entry->pool->zpool, entry->handle);
 310	zswap_pool_put(entry->pool);
 311	zswap_entry_cache_free(entry);
 312	atomic_dec(&zswap_stored_pages);
 313	zswap_update_total_size();
 314}
 315
 316/* caller must hold the tree lock */
 317static void zswap_entry_get(struct zswap_entry *entry)
 318{
 319	entry->refcount++;
 320}
 321
 322/* caller must hold the tree lock
 323* remove from the tree and free it, if nobody reference the entry
 324*/
 325static void zswap_entry_put(struct zswap_tree *tree,
 326			struct zswap_entry *entry)
 327{
 328	int refcount = --entry->refcount;
 329
 330	BUG_ON(refcount < 0);
 331	if (refcount == 0) {
 332		zswap_rb_erase(&tree->rbroot, entry);
 333		zswap_free_entry(entry);
 334	}
 
 
 
 
 335}
 336
 337/* caller must hold the tree lock */
 338static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
 339				pgoff_t offset)
 340{
 341	struct zswap_entry *entry;
 342
 343	entry = zswap_rb_search(root, offset);
 344	if (entry)
 345		zswap_entry_get(entry);
 346
 347	return entry;
 348}
 
 
 
 
 
 
 
 
 
 349
 350/*********************************
 351* per-cpu code
 352**********************************/
 353static DEFINE_PER_CPU(u8 *, zswap_dstmem);
 
 
 
 
 
 
 
 
 
 
 
 354
 355static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu)
 356{
 357	u8 *dst;
 358
 359	switch (action) {
 360	case CPU_UP_PREPARE:
 361		dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
 362		if (!dst) {
 363			pr_err("can't allocate compressor buffer\n");
 364			return NOTIFY_BAD;
 365		}
 366		per_cpu(zswap_dstmem, cpu) = dst;
 367		break;
 368	case CPU_DEAD:
 369	case CPU_UP_CANCELED:
 370		dst = per_cpu(zswap_dstmem, cpu);
 371		kfree(dst);
 372		per_cpu(zswap_dstmem, cpu) = NULL;
 373		break;
 374	default:
 375		break;
 376	}
 377	return NOTIFY_OK;
 378}
 379
 380static int zswap_cpu_dstmem_notifier(struct notifier_block *nb,
 381				     unsigned long action, void *pcpu)
 382{
 383	return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu);
 384}
 385
 386static struct notifier_block zswap_dstmem_notifier = {
 387	.notifier_call =	zswap_cpu_dstmem_notifier,
 388};
 389
 390static int __init zswap_cpu_dstmem_init(void)
 391{
 392	unsigned long cpu;
 393
 394	cpu_notifier_register_begin();
 395	for_each_online_cpu(cpu)
 396		if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) ==
 397		    NOTIFY_BAD)
 398			goto cleanup;
 399	__register_cpu_notifier(&zswap_dstmem_notifier);
 400	cpu_notifier_register_done();
 401	return 0;
 402
 403cleanup:
 404	for_each_online_cpu(cpu)
 405		__zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
 406	cpu_notifier_register_done();
 407	return -ENOMEM;
 408}
 409
 410static void zswap_cpu_dstmem_destroy(void)
 411{
 412	unsigned long cpu;
 
 413
 414	cpu_notifier_register_begin();
 415	for_each_online_cpu(cpu)
 416		__zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
 417	__unregister_cpu_notifier(&zswap_dstmem_notifier);
 418	cpu_notifier_register_done();
 419}
 420
 421static int __zswap_cpu_comp_notifier(struct zswap_pool *pool,
 422				     unsigned long action, unsigned long cpu)
 423{
 424	struct crypto_comp *tfm;
 425
 426	switch (action) {
 427	case CPU_UP_PREPARE:
 428		if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
 429			break;
 430		tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
 431		if (IS_ERR_OR_NULL(tfm)) {
 432			pr_err("could not alloc crypto comp %s : %ld\n",
 433			       pool->tfm_name, PTR_ERR(tfm));
 434			return NOTIFY_BAD;
 435		}
 436		*per_cpu_ptr(pool->tfm, cpu) = tfm;
 437		break;
 438	case CPU_DEAD:
 439	case CPU_UP_CANCELED:
 440		tfm = *per_cpu_ptr(pool->tfm, cpu);
 441		if (!IS_ERR_OR_NULL(tfm))
 442			crypto_free_comp(tfm);
 443		*per_cpu_ptr(pool->tfm, cpu) = NULL;
 444		break;
 445	default:
 446		break;
 447	}
 448	return NOTIFY_OK;
 449}
 450
 451static int zswap_cpu_comp_notifier(struct notifier_block *nb,
 452				   unsigned long action, void *pcpu)
 
 453{
 454	unsigned long cpu = (unsigned long)pcpu;
 455	struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier);
 456
 457	return __zswap_cpu_comp_notifier(pool, action, cpu);
 458}
 459
 460static int zswap_cpu_comp_init(struct zswap_pool *pool)
 461{
 462	unsigned long cpu;
 463
 464	memset(&pool->notifier, 0, sizeof(pool->notifier));
 465	pool->notifier.notifier_call = zswap_cpu_comp_notifier;
 466
 467	cpu_notifier_register_begin();
 468	for_each_online_cpu(cpu)
 469		if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) ==
 470		    NOTIFY_BAD)
 471			goto cleanup;
 472	__register_cpu_notifier(&pool->notifier);
 473	cpu_notifier_register_done();
 474	return 0;
 475
 476cleanup:
 477	for_each_online_cpu(cpu)
 478		__zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
 479	cpu_notifier_register_done();
 480	return -ENOMEM;
 481}
 482
 483static void zswap_cpu_comp_destroy(struct zswap_pool *pool)
 484{
 485	unsigned long cpu;
 
 486
 487	cpu_notifier_register_begin();
 488	for_each_online_cpu(cpu)
 489		__zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
 490	__unregister_cpu_notifier(&pool->notifier);
 491	cpu_notifier_register_done();
 492}
 493
 494/*********************************
 495* pool functions
 496**********************************/
 
 497
 498static struct zswap_pool *__zswap_pool_current(void)
 499{
 500	struct zswap_pool *pool;
 501
 502	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
 503	WARN_ON(!pool);
 
 504
 505	return pool;
 506}
 507
 508static struct zswap_pool *zswap_pool_current(void)
 509{
 510	assert_spin_locked(&zswap_pools_lock);
 511
 512	return __zswap_pool_current();
 513}
 514
 515static struct zswap_pool *zswap_pool_current_get(void)
 516{
 517	struct zswap_pool *pool;
 518
 519	rcu_read_lock();
 520
 521	pool = __zswap_pool_current();
 522	if (!pool || !zswap_pool_get(pool))
 523		pool = NULL;
 524
 525	rcu_read_unlock();
 526
 527	return pool;
 528}
 529
 530static struct zswap_pool *zswap_pool_last_get(void)
 531{
 532	struct zswap_pool *pool, *last = NULL;
 533
 534	rcu_read_lock();
 535
 536	list_for_each_entry_rcu(pool, &zswap_pools, list)
 537		last = pool;
 538	if (!WARN_ON(!last) && !zswap_pool_get(last))
 539		last = NULL;
 540
 541	rcu_read_unlock();
 542
 543	return last;
 544}
 545
 546/* type and compressor must be null-terminated */
 547static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 548{
 549	struct zswap_pool *pool;
 550
 551	assert_spin_locked(&zswap_pools_lock);
 552
 553	list_for_each_entry_rcu(pool, &zswap_pools, list) {
 554		if (strcmp(pool->tfm_name, compressor))
 555			continue;
 556		if (strcmp(zpool_get_type(pool->zpool), type))
 
 557			continue;
 558		/* if we can't get it, it's about to be destroyed */
 559		if (!zswap_pool_get(pool))
 560			continue;
 561		return pool;
 562	}
 563
 564	return NULL;
 565}
 566
 567static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 568{
 569	struct zswap_pool *pool;
 570	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
 571	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 572
 573	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 574	if (!pool) {
 575		pr_err("pool alloc failed\n");
 576		return NULL;
 577	}
 578
 579	/* unique name for each pool specifically required by zsmalloc */
 580	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
 581
 582	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
 583	if (!pool->zpool) {
 584		pr_err("%s zpool not available\n", type);
 585		goto error;
 586	}
 587	pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
 588
 589	strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
 590	pool->tfm = alloc_percpu(struct crypto_comp *);
 591	if (!pool->tfm) {
 592		pr_err("percpu alloc failed\n");
 593		goto error;
 594	}
 595
 596	if (zswap_cpu_comp_init(pool))
 597		goto error;
 598	pr_debug("using %s compressor\n", pool->tfm_name);
 599
 600	/* being the current pool takes 1 ref; this func expects the
 601	 * caller to always add the new pool as the current pool
 602	 */
 603	kref_init(&pool->kref);
 604	INIT_LIST_HEAD(&pool->list);
 605
 606	zswap_pool_debug("created", pool);
 607
 608	return pool;
 609
 610error:
 611	free_percpu(pool->tfm);
 612	if (pool->zpool)
 613		zpool_destroy_pool(pool->zpool);
 614	kfree(pool);
 615	return NULL;
 616}
 617
 618static __init struct zswap_pool *__zswap_pool_create_fallback(void)
 619{
 620	if (!crypto_has_comp(zswap_compressor, 0, 0)) {
 621		if (!strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
 622			pr_err("default compressor %s not available\n",
 623			       zswap_compressor);
 624			return NULL;
 625		}
 626		pr_err("compressor %s not available, using default %s\n",
 627		       zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
 628		param_free_charp(&zswap_compressor);
 629		zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
 630	}
 631	if (!zpool_has_pool(zswap_zpool_type)) {
 632		if (!strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
 633			pr_err("default zpool %s not available\n",
 634			       zswap_zpool_type);
 635			return NULL;
 636		}
 637		pr_err("zpool %s not available, using default %s\n",
 638		       zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
 639		param_free_charp(&zswap_zpool_type);
 640		zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
 641	}
 642
 643	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
 644}
 645
 646static void zswap_pool_destroy(struct zswap_pool *pool)
 647{
 648	zswap_pool_debug("destroying", pool);
 649
 650	zswap_cpu_comp_destroy(pool);
 651	free_percpu(pool->tfm);
 652	zpool_destroy_pool(pool->zpool);
 653	kfree(pool);
 654}
 655
 656static int __must_check zswap_pool_get(struct zswap_pool *pool)
 657{
 658	return kref_get_unless_zero(&pool->kref);
 659}
 660
 661static void __zswap_pool_release(struct rcu_head *head)
 662{
 663	struct zswap_pool *pool = container_of(head, typeof(*pool), rcu_head);
 664
 665	/* nobody should have been able to get a kref... */
 666	WARN_ON(kref_get_unless_zero(&pool->kref));
 667
 668	/* pool is now off zswap_pools list and has no references. */
 669	zswap_pool_destroy(pool);
 670}
 671
 672static void __zswap_pool_empty(struct kref *kref)
 673{
 674	struct zswap_pool *pool;
 675
 676	pool = container_of(kref, typeof(*pool), kref);
 677
 678	spin_lock(&zswap_pools_lock);
 679
 680	WARN_ON(pool == zswap_pool_current());
 681
 682	list_del_rcu(&pool->list);
 683	call_rcu(&pool->rcu_head, __zswap_pool_release);
 684
 685	spin_unlock(&zswap_pools_lock);
 686}
 687
 688static void zswap_pool_put(struct zswap_pool *pool)
 689{
 690	kref_put(&pool->kref, __zswap_pool_empty);
 691}
 692
 693/*********************************
 694* param callbacks
 695**********************************/
 696
 
 
 
 
 
 
 
 
 697/* val must be a null-terminated string */
 698static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 699			     char *type, char *compressor)
 700{
 701	struct zswap_pool *pool, *put_pool = NULL;
 702	char *s = strstrip((char *)val);
 703	int ret;
 
 704
 705	/* no change required */
 706	if (!strcmp(s, *(char **)kp->arg))
 707		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 708
 709	/* if this is load-time (pre-init) param setting,
 710	 * don't create a pool; that's done during init.
 711	 */
 712	if (!zswap_init_started)
 713		return param_set_charp(s, kp);
 714
 715	if (!type) {
 716		if (!zpool_has_pool(s)) {
 717			pr_err("zpool %s not available\n", s);
 718			return -ENOENT;
 719		}
 720		type = s;
 721	} else if (!compressor) {
 722		if (!crypto_has_comp(s, 0, 0)) {
 723			pr_err("compressor %s not available\n", s);
 724			return -ENOENT;
 725		}
 726		compressor = s;
 727	} else {
 728		WARN_ON(1);
 729		return -EINVAL;
 730	}
 731
 732	spin_lock(&zswap_pools_lock);
 733
 734	pool = zswap_pool_find_get(type, compressor);
 735	if (pool) {
 736		zswap_pool_debug("using existing", pool);
 
 737		list_del_rcu(&pool->list);
 738	} else {
 739		spin_unlock(&zswap_pools_lock);
 
 
 
 740		pool = zswap_pool_create(type, compressor);
 741		spin_lock(&zswap_pools_lock);
 
 
 
 
 
 
 
 
 
 742	}
 743
 744	if (pool)
 745		ret = param_set_charp(s, kp);
 746	else
 747		ret = -EINVAL;
 748
 
 
 749	if (!ret) {
 750		put_pool = zswap_pool_current();
 751		list_add_rcu(&pool->list, &zswap_pools);
 
 752	} else if (pool) {
 753		/* add the possibly pre-existing pool to the end of the pools
 754		 * list; if it's new (and empty) then it'll be removed and
 755		 * destroyed by the put after we drop the lock
 756		 */
 757		list_add_tail_rcu(&pool->list, &zswap_pools);
 758		put_pool = pool;
 759	}
 760
 761	spin_unlock(&zswap_pools_lock);
 
 
 
 
 
 
 
 
 
 
 
 762
 763	/* drop the ref from either the old current pool,
 764	 * or the new pool we failed to add
 765	 */
 766	if (put_pool)
 767		zswap_pool_put(put_pool);
 768
 769	return ret;
 770}
 771
 772static int zswap_compressor_param_set(const char *val,
 773				      const struct kernel_param *kp)
 774{
 775	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
 776}
 777
 778static int zswap_zpool_param_set(const char *val,
 779				 const struct kernel_param *kp)
 780{
 781	return __zswap_param_set(val, kp, NULL, zswap_compressor);
 782}
 783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 784/*********************************
 785* writeback code
 786**********************************/
 787/* return enum for zswap_get_swap_cache_page */
 788enum zswap_get_swap_ret {
 789	ZSWAP_SWAPCACHE_NEW,
 790	ZSWAP_SWAPCACHE_EXIST,
 791	ZSWAP_SWAPCACHE_FAIL,
 792};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793
 794/*
 795 * zswap_get_swap_cache_page
 796 *
 797 * This is an adaption of read_swap_cache_async()
 798 *
 799 * This function tries to find a page with the given swap entry
 800 * in the swapper_space address space (the swap cache).  If the page
 801 * is found, it is returned in retpage.  Otherwise, a page is allocated,
 802 * added to the swap cache, and returned in retpage.
 803 *
 804 * If success, the swap cache page is returned in retpage
 805 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
 806 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
 807 *     the new page is added to swapcache and locked
 808 * Returns ZSWAP_SWAPCACHE_FAIL on error
 809 */
 810static int zswap_get_swap_cache_page(swp_entry_t entry,
 811				struct page **retpage)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 812{
 813	bool page_was_allocated;
 
 
 
 
 
 
 814
 815	*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
 816			NULL, 0, &page_was_allocated);
 817	if (page_was_allocated)
 818		return ZSWAP_SWAPCACHE_NEW;
 819	if (!*retpage)
 820		return ZSWAP_SWAPCACHE_FAIL;
 821	return ZSWAP_SWAPCACHE_EXIST;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 822}
 823
 824/*
 825 * Attempts to free an entry by adding a page to the swap cache,
 826 * decompressing the entry data into the page, and issuing a
 827 * bio write to write the page back to the swap device.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 828 *
 829 * This can be thought of as a "resumed writeback" of the page
 830 * to the swap device.  We are basically resuming the same swap
 831 * writeback path that was intercepted with the frontswap_store()
 832 * in the first place.  After the page has been decompressed into
 833 * the swap cache, the compressed version stored by zswap can be
 834 * freed.
 835 */
 836static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
 
 837{
 838	struct zswap_header *zhdr;
 839	swp_entry_t swpentry;
 840	struct zswap_tree *tree;
 841	pgoff_t offset;
 842	struct zswap_entry *entry;
 843	struct page *page;
 844	struct crypto_comp *tfm;
 845	u8 *src, *dst;
 846	unsigned int dlen;
 847	int ret;
 848	struct writeback_control wbc = {
 849		.sync_mode = WB_SYNC_NONE,
 850	};
 851
 852	/* extract swpentry from data */
 853	zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
 854	swpentry = zhdr->swpentry; /* here */
 855	zpool_unmap_handle(pool, handle);
 856	tree = zswap_trees[swp_type(swpentry)];
 857	offset = swp_offset(swpentry);
 
 
 
 
 
 
 
 
 
 
 
 
 858
 859	/* find and ref zswap entry */
 
 
 
 
 
 
 
 
 
 860	spin_lock(&tree->lock);
 861	entry = zswap_entry_find_get(&tree->rbroot, offset);
 862	if (!entry) {
 863		/* entry was invalidated */
 864		spin_unlock(&tree->lock);
 865		return 0;
 
 
 
 866	}
 
 
 
 867	spin_unlock(&tree->lock);
 868	BUG_ON(offset != entry->offset);
 869
 870	/* try to allocate swap cache page */
 871	switch (zswap_get_swap_cache_page(swpentry, &page)) {
 872	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
 873		ret = -ENOMEM;
 874		goto fail;
 875
 876	case ZSWAP_SWAPCACHE_EXIST:
 877		/* page is already in the swap cache, ignore for now */
 878		put_page(page);
 879		ret = -EEXIST;
 880		goto fail;
 881
 882	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
 883		/* decompress */
 884		dlen = PAGE_SIZE;
 885		src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
 886				ZPOOL_MM_RO) + sizeof(struct zswap_header);
 887		dst = kmap_atomic(page);
 888		tfm = *get_cpu_ptr(entry->pool->tfm);
 889		ret = crypto_comp_decompress(tfm, src, entry->length,
 890					     dst, &dlen);
 891		put_cpu_ptr(entry->pool->tfm);
 892		kunmap_atomic(dst);
 893		zpool_unmap_handle(entry->pool->zpool, entry->handle);
 894		BUG_ON(ret);
 895		BUG_ON(dlen != PAGE_SIZE);
 896
 897		/* page is up to date */
 898		SetPageUptodate(page);
 899	}
 
 900
 901	/* move it to the tail of the inactive list after end_writeback */
 902	SetPageReclaim(page);
 903
 904	/* start writeback */
 905	__swap_writepage(page, &wbc, end_swap_bio_write);
 906	put_page(page);
 907	zswap_written_back_pages++;
 908
 909	spin_lock(&tree->lock);
 910	/* drop local reference */
 911	zswap_entry_put(tree, entry);
 
 
 
 
 
 
 
 
 
 
 
 912
 913	/*
 914	* There are two possible situations for entry here:
 915	* (1) refcount is 1(normal case),  entry is valid and on the tree
 916	* (2) refcount is 0, entry is freed and not on the tree
 917	*     because invalidate happened during writeback
 918	*  search the tree and free the entry if find entry
 919	*/
 920	if (entry == zswap_rb_search(&tree->rbroot, offset))
 921		zswap_entry_put(tree, entry);
 922	spin_unlock(&tree->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 923
 924	goto end;
 
 
 
 
 
 925
 926	/*
 927	* if we get here due to ZSWAP_SWAPCACHE_EXIST
 928	* a load may happening concurrently
 929	* it is safe and okay to not free the entry
 930	* if we free the entry in the following put
 931	* it it either okay to return !0
 932	*/
 933fail:
 934	spin_lock(&tree->lock);
 935	zswap_entry_put(tree, entry);
 936	spin_unlock(&tree->lock);
 937
 938end:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939	return ret;
 940}
 941
 942static int zswap_shrink(void)
 
 943{
 944	struct zswap_pool *pool;
 945	int ret;
 
 
 
 
 
 
 
 
 
 
 
 946
 947	pool = zswap_pool_last_get();
 948	if (!pool)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 949		return -ENOENT;
 950
 951	ret = zpool_shrink(pool->zpool, 1, NULL);
 
 
 
 
 
 
 
 952
 953	zswap_pool_put(pool);
 
 
 
 954
 955	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 956}
 957
 958/*********************************
 959* frontswap hooks
 960**********************************/
 961/* attempts to compress and store an single page */
 962static int zswap_frontswap_store(unsigned type, pgoff_t offset,
 963				struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 964{
 965	struct zswap_tree *tree = zswap_trees[type];
 
 
 966	struct zswap_entry *entry, *dupentry;
 967	struct crypto_comp *tfm;
 968	int ret;
 969	unsigned int dlen = PAGE_SIZE, len;
 970	unsigned long handle;
 971	char *buf;
 972	u8 *src, *dst;
 973	struct zswap_header *zhdr;
 974
 975	if (!zswap_enabled || !tree) {
 976		ret = -ENODEV;
 977		goto reject;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 978	}
 979
 980	/* reclaim space if needed */
 981	if (zswap_is_full()) {
 982		zswap_pool_limit_hit++;
 983		if (zswap_shrink()) {
 984			zswap_reject_reclaim_fail++;
 985			ret = -ENOMEM;
 986			goto reject;
 987		}
 
 
 
 
 988	}
 989
 990	/* allocate entry */
 991	entry = zswap_entry_cache_alloc(GFP_KERNEL);
 992	if (!entry) {
 993		zswap_reject_kmemcache_fail++;
 994		ret = -ENOMEM;
 995		goto reject;
 996	}
 997
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 998	/* if entry is successfully added, it keeps the reference */
 999	entry->pool = zswap_pool_current_get();
1000	if (!entry->pool) {
1001		ret = -EINVAL;
1002		goto freepage;
1003	}
1004
1005	/* compress */
1006	dst = get_cpu_var(zswap_dstmem);
1007	tfm = *get_cpu_ptr(entry->pool->tfm);
1008	src = kmap_atomic(page);
1009	ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1010	kunmap_atomic(src);
1011	put_cpu_ptr(entry->pool->tfm);
1012	if (ret) {
1013		ret = -EINVAL;
1014		goto put_dstmem;
1015	}
1016
1017	/* store */
1018	len = dlen + sizeof(struct zswap_header);
1019	ret = zpool_malloc(entry->pool->zpool, len,
1020			   __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
1021			   &handle);
1022	if (ret == -ENOSPC) {
1023		zswap_reject_compress_poor++;
1024		goto put_dstmem;
1025	}
1026	if (ret) {
1027		zswap_reject_alloc_fail++;
1028		goto put_dstmem;
1029	}
1030	zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1031	zhdr->swpentry = swp_entry(type, offset);
1032	buf = (u8 *)(zhdr + 1);
1033	memcpy(buf, dst, dlen);
1034	zpool_unmap_handle(entry->pool->zpool, handle);
1035	put_cpu_var(zswap_dstmem);
1036
1037	/* populate entry */
1038	entry->offset = offset;
1039	entry->handle = handle;
1040	entry->length = dlen;
 
 
 
 
1041
1042	/* map */
1043	spin_lock(&tree->lock);
1044	do {
1045		ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1046		if (ret == -EEXIST) {
1047			zswap_duplicate_entry++;
1048			/* remove from rbtree */
1049			zswap_rb_erase(&tree->rbroot, dupentry);
1050			zswap_entry_put(tree, dupentry);
1051		}
1052	} while (ret == -EEXIST);
 
 
 
 
1053	spin_unlock(&tree->lock);
1054
1055	/* update stats */
1056	atomic_inc(&zswap_stored_pages);
1057	zswap_update_total_size();
 
1058
1059	return 0;
1060
1061put_dstmem:
1062	put_cpu_var(zswap_dstmem);
1063	zswap_pool_put(entry->pool);
1064freepage:
1065	zswap_entry_cache_free(entry);
1066reject:
1067	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1068}
1069
1070/*
1071 * returns 0 if the page was successfully decompressed
1072 * return -1 on entry not found or error
1073*/
1074static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1075				struct page *page)
1076{
1077	struct zswap_tree *tree = zswap_trees[type];
 
 
 
 
1078	struct zswap_entry *entry;
1079	struct crypto_comp *tfm;
1080	u8 *src, *dst;
1081	unsigned int dlen;
1082	int ret;
1083
1084	/* find */
1085	spin_lock(&tree->lock);
1086	entry = zswap_entry_find_get(&tree->rbroot, offset);
1087	if (!entry) {
1088		/* entry was written back */
1089		spin_unlock(&tree->lock);
1090		return -1;
1091	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1092	spin_unlock(&tree->lock);
1093
1094	/* decompress */
1095	dlen = PAGE_SIZE;
1096	src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
1097			ZPOOL_MM_RO) + sizeof(struct zswap_header);
1098	dst = kmap_atomic(page);
1099	tfm = *get_cpu_ptr(entry->pool->tfm);
1100	ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1101	put_cpu_ptr(entry->pool->tfm);
1102	kunmap_atomic(dst);
1103	zpool_unmap_handle(entry->pool->zpool, entry->handle);
1104	BUG_ON(ret);
1105
1106	spin_lock(&tree->lock);
1107	zswap_entry_put(tree, entry);
1108	spin_unlock(&tree->lock);
1109
1110	return 0;
 
 
 
 
 
1111}
1112
1113/* frees an entry in zswap */
1114static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1115{
1116	struct zswap_tree *tree = zswap_trees[type];
 
1117	struct zswap_entry *entry;
1118
1119	/* find */
1120	spin_lock(&tree->lock);
1121	entry = zswap_rb_search(&tree->rbroot, offset);
1122	if (!entry) {
1123		/* entry was written back */
1124		spin_unlock(&tree->lock);
1125		return;
1126	}
1127
1128	/* remove from rbtree */
1129	zswap_rb_erase(&tree->rbroot, entry);
1130
1131	/* drop the initial reference from entry creation */
1132	zswap_entry_put(tree, entry);
1133
1134	spin_unlock(&tree->lock);
1135}
1136
1137/* frees all zswap entries for the given swap type */
1138static void zswap_frontswap_invalidate_area(unsigned type)
1139{
1140	struct zswap_tree *tree = zswap_trees[type];
1141	struct zswap_entry *entry, *n;
1142
1143	if (!tree)
1144		return;
 
 
 
 
1145
1146	/* walk the tree and free everything */
1147	spin_lock(&tree->lock);
1148	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1149		zswap_free_entry(entry);
1150	tree->rbroot = RB_ROOT;
1151	spin_unlock(&tree->lock);
1152	kfree(tree);
1153	zswap_trees[type] = NULL;
 
1154}
1155
1156static void zswap_frontswap_init(unsigned type)
1157{
1158	struct zswap_tree *tree;
 
1159
1160	tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
1161	if (!tree) {
1162		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1163		return;
1164	}
1165
1166	tree->rbroot = RB_ROOT;
1167	spin_lock_init(&tree->lock);
1168	zswap_trees[type] = tree;
1169}
1170
1171static struct frontswap_ops zswap_frontswap_ops = {
1172	.store = zswap_frontswap_store,
1173	.load = zswap_frontswap_load,
1174	.invalidate_page = zswap_frontswap_invalidate_page,
1175	.invalidate_area = zswap_frontswap_invalidate_area,
1176	.init = zswap_frontswap_init
1177};
1178
1179/*********************************
1180* debugfs functions
1181**********************************/
1182#ifdef CONFIG_DEBUG_FS
1183#include <linux/debugfs.h>
1184
1185static struct dentry *zswap_debugfs_root;
1186
1187static int __init zswap_debugfs_init(void)
1188{
1189	if (!debugfs_initialized())
1190		return -ENODEV;
1191
1192	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1193	if (!zswap_debugfs_root)
1194		return -ENOMEM;
1195
1196	debugfs_create_u64("pool_limit_hit", S_IRUGO,
1197			zswap_debugfs_root, &zswap_pool_limit_hit);
1198	debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
1199			zswap_debugfs_root, &zswap_reject_reclaim_fail);
1200	debugfs_create_u64("reject_alloc_fail", S_IRUGO,
1201			zswap_debugfs_root, &zswap_reject_alloc_fail);
1202	debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
1203			zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1204	debugfs_create_u64("reject_compress_poor", S_IRUGO,
1205			zswap_debugfs_root, &zswap_reject_compress_poor);
1206	debugfs_create_u64("written_back_pages", S_IRUGO,
1207			zswap_debugfs_root, &zswap_written_back_pages);
1208	debugfs_create_u64("duplicate_entry", S_IRUGO,
1209			zswap_debugfs_root, &zswap_duplicate_entry);
1210	debugfs_create_u64("pool_total_size", S_IRUGO,
1211			zswap_debugfs_root, &zswap_pool_total_size);
1212	debugfs_create_atomic_t("stored_pages", S_IRUGO,
1213			zswap_debugfs_root, &zswap_stored_pages);
 
 
1214
1215	return 0;
1216}
1217
1218static void __exit zswap_debugfs_exit(void)
1219{
1220	debugfs_remove_recursive(zswap_debugfs_root);
1221}
1222#else
1223static int __init zswap_debugfs_init(void)
1224{
1225	return 0;
1226}
1227
1228static void __exit zswap_debugfs_exit(void) { }
1229#endif
1230
1231/*********************************
1232* module init and exit
1233**********************************/
1234static int __init init_zswap(void)
1235{
1236	struct zswap_pool *pool;
 
1237
1238	zswap_init_started = true;
1239
1240	if (zswap_entry_cache_create()) {
1241		pr_err("entry cache creation failed\n");
1242		goto cache_fail;
1243	}
1244
1245	if (zswap_cpu_dstmem_init()) {
1246		pr_err("dstmem alloc failed\n");
1247		goto dstmem_fail;
1248	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1249
1250	pool = __zswap_pool_create_fallback();
1251	if (!pool) {
 
 
 
 
 
1252		pr_err("pool creation failed\n");
1253		goto pool_fail;
1254	}
1255	pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1256		zpool_get_type(pool->zpool));
1257
1258	list_add(&pool->list, &zswap_pools);
1259
1260	frontswap_register_ops(&zswap_frontswap_ops);
1261	if (zswap_debugfs_init())
1262		pr_warn("debugfs initialization failed\n");
 
1263	return 0;
1264
1265pool_fail:
1266	zswap_cpu_dstmem_destroy();
1267dstmem_fail:
1268	zswap_entry_cache_destroy();
 
 
 
 
1269cache_fail:
 
 
 
1270	return -ENOMEM;
1271}
 
 
 
 
 
 
 
1272/* must be late so crypto has time to come up */
1273late_initcall(init_zswap);
1274
1275MODULE_LICENSE("GPL");
1276MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1277MODULE_DESCRIPTION("Compressed cache for swap pages");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * zswap.c - zswap driver file
   4 *
   5 * zswap is a cache that takes pages that are in the process
   6 * of being swapped out and attempts to compress and store them in a
   7 * RAM-based memory pool.  This can result in a significant I/O reduction on
   8 * the swap device and, in the case where decompressing from RAM is faster
   9 * than reading from the swap device, can also improve workload performance.
  10 *
  11 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
 
 
 
 
 
 
 
 
 
 
  12*/
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/cpu.h>
  18#include <linux/highmem.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <linux/types.h>
  22#include <linux/atomic.h>
 
  23#include <linux/rbtree.h>
  24#include <linux/swap.h>
  25#include <linux/crypto.h>
  26#include <linux/scatterlist.h>
  27#include <linux/mempolicy.h>
  28#include <linux/mempool.h>
  29#include <linux/zpool.h>
  30#include <crypto/acompress.h>
  31#include <linux/zswap.h>
  32#include <linux/mm_types.h>
  33#include <linux/page-flags.h>
  34#include <linux/swapops.h>
  35#include <linux/writeback.h>
  36#include <linux/pagemap.h>
  37#include <linux/workqueue.h>
  38#include <linux/list_lru.h>
  39
  40#include "swap.h"
  41#include "internal.h"
  42
  43/*********************************
  44* statistics
  45**********************************/
  46/* Total bytes used by the compressed storage */
  47u64 zswap_pool_total_size;
  48/* The number of compressed pages currently stored in zswap */
  49atomic_t zswap_stored_pages = ATOMIC_INIT(0);
  50/* The number of same-value filled pages currently stored in zswap */
  51static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
  52
  53/*
  54 * The statistics below are not protected from concurrent access for
  55 * performance reasons so they may not be a 100% accurate.  However,
  56 * they do provide useful information on roughly how many times a
  57 * certain event is occurring.
  58*/
  59
  60/* Pool limit was hit (see zswap_max_pool_percent) */
  61static u64 zswap_pool_limit_hit;
  62/* Pages written back when pool limit was reached */
  63static u64 zswap_written_back_pages;
  64/* Store failed due to a reclaim failure after pool limit was reached */
  65static u64 zswap_reject_reclaim_fail;
  66/* Store failed due to compression algorithm failure */
  67static u64 zswap_reject_compress_fail;
  68/* Compressed page was too big for the allocator to (optimally) store */
  69static u64 zswap_reject_compress_poor;
  70/* Store failed because underlying allocator could not get memory */
  71static u64 zswap_reject_alloc_fail;
  72/* Store failed because the entry metadata could not be allocated (rare) */
  73static u64 zswap_reject_kmemcache_fail;
  74
  75/* Shrinker work queue */
  76static struct workqueue_struct *shrink_wq;
  77/* Pool limit was hit, we need to calm down */
  78static bool zswap_pool_reached_full;
  79
  80/*********************************
  81* tunables
  82**********************************/
  83
  84#define ZSWAP_PARAM_UNSET ""
  85
  86static int zswap_setup(void);
  87
  88/* Enable/disable zswap */
  89static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
  90static int zswap_enabled_param_set(const char *,
  91				   const struct kernel_param *);
  92static const struct kernel_param_ops zswap_enabled_param_ops = {
  93	.set =		zswap_enabled_param_set,
  94	.get =		param_get_bool,
  95};
  96module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
  97
  98/* Crypto compressor to use */
  99static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
 
 100static int zswap_compressor_param_set(const char *,
 101				      const struct kernel_param *);
 102static const struct kernel_param_ops zswap_compressor_param_ops = {
 103	.set =		zswap_compressor_param_set,
 104	.get =		param_get_charp,
 105	.free =		param_free_charp,
 106};
 107module_param_cb(compressor, &zswap_compressor_param_ops,
 108		&zswap_compressor, 0644);
 109
 110/* Compressed storage zpool to use */
 111static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
 
 112static int zswap_zpool_param_set(const char *, const struct kernel_param *);
 113static const struct kernel_param_ops zswap_zpool_param_ops = {
 114	.set =		zswap_zpool_param_set,
 115	.get =		param_get_charp,
 116	.free =		param_free_charp,
 117};
 118module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
 119
 120/* The maximum percentage of memory that the compressed pool can occupy */
 121static unsigned int zswap_max_pool_percent = 20;
 122module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
 123
 124/* The threshold for accepting new pages after the max_pool_percent was hit */
 125static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
 126module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
 127		   uint, 0644);
 128
 129/*
 130 * Enable/disable handling same-value filled pages (enabled by default).
 131 * If disabled every page is considered non-same-value filled.
 132 */
 133static bool zswap_same_filled_pages_enabled = true;
 134module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
 135		   bool, 0644);
 136
 137/* Enable/disable handling non-same-value filled pages (enabled by default) */
 138static bool zswap_non_same_filled_pages_enabled = true;
 139module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
 140		   bool, 0644);
 141
 142/* Number of zpools in zswap_pool (empirically determined for scalability) */
 143#define ZSWAP_NR_ZPOOLS 32
 144
 145/* Enable/disable memory pressure-based shrinker. */
 146static bool zswap_shrinker_enabled = IS_ENABLED(
 147		CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
 148module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
 149
 150bool is_zswap_enabled(void)
 151{
 152	return zswap_enabled;
 153}
 154
 155/*********************************
 156* data structures
 157**********************************/
 158
 159struct crypto_acomp_ctx {
 160	struct crypto_acomp *acomp;
 161	struct acomp_req *req;
 162	struct crypto_wait wait;
 163	u8 *buffer;
 164	struct mutex mutex;
 165	bool is_sleepable;
 166};
 167
 168/*
 169 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
 170 * The only case where lru_lock is not acquired while holding tree.lock is
 171 * when a zswap_entry is taken off the lru for writeback, in that case it
 172 * needs to be verified that it's still valid in the tree.
 173 */
 174struct zswap_pool {
 175	struct zpool *zpools[ZSWAP_NR_ZPOOLS];
 176	struct crypto_acomp_ctx __percpu *acomp_ctx;
 177	struct percpu_ref ref;
 178	struct list_head list;
 179	struct work_struct release_work;
 180	struct hlist_node node;
 181	char tfm_name[CRYPTO_MAX_ALG_NAME];
 182};
 183
 184/* Global LRU lists shared by all zswap pools. */
 185static struct list_lru zswap_list_lru;
 186/* counter of pages stored in all zswap pools. */
 187static atomic_t zswap_nr_stored = ATOMIC_INIT(0);
 188
 189/* The lock protects zswap_next_shrink updates. */
 190static DEFINE_SPINLOCK(zswap_shrink_lock);
 191static struct mem_cgroup *zswap_next_shrink;
 192static struct work_struct zswap_shrink_work;
 193static struct shrinker *zswap_shrinker;
 194
 195/*
 196 * struct zswap_entry
 197 *
 198 * This structure contains the metadata for tracking a single compressed
 199 * page within zswap.
 200 *
 201 * rbnode - links the entry into red-black tree for the appropriate swap type
 202 * swpentry - associated swap entry, the offset indexes into the red-black tree
 
 
 
 
 
 
 203 * length - the length in bytes of the compressed page data.  Needed during
 204 *          decompression. For a same value filled page length is 0, and both
 205 *          pool and lru are invalid and must be ignored.
 206 * pool - the zswap_pool the entry's data is in
 207 * handle - zpool allocation handle that stores the compressed page data
 208 * value - value of the same-value filled pages which have same content
 209 * objcg - the obj_cgroup that the compressed memory is charged to
 210 * lru - handle to the pool's lru used to evict pages.
 211 */
 212struct zswap_entry {
 213	struct rb_node rbnode;
 214	swp_entry_t swpentry;
 
 215	unsigned int length;
 216	struct zswap_pool *pool;
 217	union {
 218		unsigned long handle;
 219		unsigned long value;
 220	};
 221	struct obj_cgroup *objcg;
 222	struct list_head lru;
 223};
 224
 
 
 
 
 
 225struct zswap_tree {
 226	struct rb_root rbroot;
 227	spinlock_t lock;
 228};
 229
 230static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 231static unsigned int nr_zswap_trees[MAX_SWAPFILES];
 232
 233/* RCU-protected iteration */
 234static LIST_HEAD(zswap_pools);
 235/* protects zswap_pools list modification */
 236static DEFINE_SPINLOCK(zswap_pools_lock);
 237/* pool counter to provide unique names to zpool */
 238static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 239
 240enum zswap_init_type {
 241	ZSWAP_UNINIT,
 242	ZSWAP_INIT_SUCCEED,
 243	ZSWAP_INIT_FAILED
 244};
 245
 246static enum zswap_init_type zswap_init_state;
 247
 248/* used to ensure the integrity of initialization */
 249static DEFINE_MUTEX(zswap_init_lock);
 250
 251/* init completed, but couldn't create the initial pool */
 252static bool zswap_has_pool;
 253
 254/*********************************
 255* helpers and fwd declarations
 256**********************************/
 257
 258static inline struct zswap_tree *swap_zswap_tree(swp_entry_t swp)
 259{
 260	return &zswap_trees[swp_type(swp)][swp_offset(swp)
 261		>> SWAP_ADDRESS_SPACE_SHIFT];
 262}
 263
 264#define zswap_pool_debug(msg, p)				\
 265	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
 266		 zpool_get_type((p)->zpools[0]))
 267
 268static bool zswap_is_full(void)
 269{
 270	return totalram_pages() * zswap_max_pool_percent / 100 <
 271			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 272}
 273
 274static bool zswap_can_accept(void)
 275{
 276	return totalram_pages() * zswap_accept_thr_percent / 100 *
 277				zswap_max_pool_percent / 100 >
 278			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 279}
 280
 281static u64 get_zswap_pool_size(struct zswap_pool *pool)
 282{
 283	u64 pool_size = 0;
 284	int i;
 285
 286	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
 287		pool_size += zpool_get_total_size(pool->zpools[i]);
 288
 289	return pool_size;
 290}
 291
 292static void zswap_update_total_size(void)
 293{
 294	struct zswap_pool *pool;
 295	u64 total = 0;
 296
 297	rcu_read_lock();
 298
 299	list_for_each_entry_rcu(pool, &zswap_pools, list)
 300		total += get_zswap_pool_size(pool);
 301
 302	rcu_read_unlock();
 303
 304	zswap_pool_total_size = total;
 305}
 306
 307/*********************************
 308* pool functions
 309**********************************/
 310static void __zswap_pool_empty(struct percpu_ref *ref);
 311
 312static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 313{
 314	int i;
 315	struct zswap_pool *pool;
 316	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
 317	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 318	int ret;
 319
 320	if (!zswap_has_pool) {
 321		/* if either are unset, pool initialization failed, and we
 322		 * need both params to be set correctly before trying to
 323		 * create a pool.
 324		 */
 325		if (!strcmp(type, ZSWAP_PARAM_UNSET))
 326			return NULL;
 327		if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
 328			return NULL;
 329	}
 330
 331	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 332	if (!pool)
 
 
 
 333		return NULL;
 
 
 
 
 334
 335	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
 336		/* unique name for each pool specifically required by zsmalloc */
 337		snprintf(name, 38, "zswap%x",
 338			 atomic_inc_return(&zswap_pools_count));
 339
 340		pool->zpools[i] = zpool_create_pool(type, name, gfp);
 341		if (!pool->zpools[i]) {
 342			pr_err("%s zpool not available\n", type);
 343			goto error;
 344		}
 
 
 
 
 
 
 
 
 
 
 
 345	}
 346	pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
 
 347
 348	strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
 
 
 
 
 
 
 
 
 349
 350	pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
 351	if (!pool->acomp_ctx) {
 352		pr_err("percpu alloc failed\n");
 353		goto error;
 
 
 
 
 
 
 
 354	}
 
 
 
 
 355
 356	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
 357				       &pool->node);
 358	if (ret)
 359		goto error;
 
 
 
 360
 361	/* being the current pool takes 1 ref; this func expects the
 362	 * caller to always add the new pool as the current pool
 363	 */
 364	ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
 365			      PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
 366	if (ret)
 367		goto ref_fail;
 368	INIT_LIST_HEAD(&pool->list);
 
 
 
 
 369
 370	zswap_pool_debug("created", pool);
 
 
 
 
 371
 372	return pool;
 
 
 
 
 
 
 373
 374ref_fail:
 375	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
 376error:
 377	if (pool->acomp_ctx)
 378		free_percpu(pool->acomp_ctx);
 379	while (i--)
 380		zpool_destroy_pool(pool->zpools[i]);
 381	kfree(pool);
 382	return NULL;
 383}
 384
 385static struct zswap_pool *__zswap_pool_create_fallback(void)
 
 
 386{
 387	bool has_comp, has_zpool;
 388
 389	has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
 390	if (!has_comp && strcmp(zswap_compressor,
 391				CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
 392		pr_err("compressor %s not available, using default %s\n",
 393		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
 394		param_free_charp(&zswap_compressor);
 395		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
 396		has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
 397	}
 398	if (!has_comp) {
 399		pr_err("default compressor %s not available\n",
 400		       zswap_compressor);
 401		param_free_charp(&zswap_compressor);
 402		zswap_compressor = ZSWAP_PARAM_UNSET;
 403	}
 404
 405	has_zpool = zpool_has_pool(zswap_zpool_type);
 406	if (!has_zpool && strcmp(zswap_zpool_type,
 407				 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
 408		pr_err("zpool %s not available, using default %s\n",
 409		       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
 410		param_free_charp(&zswap_zpool_type);
 411		zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
 412		has_zpool = zpool_has_pool(zswap_zpool_type);
 413	}
 414	if (!has_zpool) {
 415		pr_err("default zpool %s not available\n",
 416		       zswap_zpool_type);
 417		param_free_charp(&zswap_zpool_type);
 418		zswap_zpool_type = ZSWAP_PARAM_UNSET;
 419	}
 420
 421	if (!has_comp || !has_zpool)
 422		return NULL;
 
 423
 424	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 425}
 426
 427static void zswap_pool_destroy(struct zswap_pool *pool)
 
 428{
 429	int i;
 
 
 
 
 
 430
 431	zswap_pool_debug("destroying", pool);
 
 
 432
 433	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
 434	free_percpu(pool->acomp_ctx);
 
 
 
 
 
 
 435
 436	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
 437		zpool_destroy_pool(pool->zpools[i]);
 438	kfree(pool);
 
 
 439}
 440
 441static void __zswap_pool_release(struct work_struct *work)
 442{
 443	struct zswap_pool *pool = container_of(work, typeof(*pool),
 444						release_work);
 445
 446	synchronize_rcu();
 
 
 
 
 
 447
 448	/* nobody should have been able to get a ref... */
 449	WARN_ON(!percpu_ref_is_zero(&pool->ref));
 450	percpu_ref_exit(&pool->ref);
 
 451
 452	/* pool is now off zswap_pools list and has no references. */
 453	zswap_pool_destroy(pool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454}
 455
 456static struct zswap_pool *zswap_pool_current(void);
 457
 458static void __zswap_pool_empty(struct percpu_ref *ref)
 459{
 460	struct zswap_pool *pool;
 
 461
 462	pool = container_of(ref, typeof(*pool), ref);
 
 463
 464	spin_lock_bh(&zswap_pools_lock);
 
 
 465
 466	WARN_ON(pool == zswap_pool_current());
 
 467
 468	list_del_rcu(&pool->list);
 
 
 
 
 
 
 
 469
 470	INIT_WORK(&pool->release_work, __zswap_pool_release);
 471	schedule_work(&pool->release_work);
 472
 473	spin_unlock_bh(&zswap_pools_lock);
 
 474}
 475
 476static int __must_check zswap_pool_get(struct zswap_pool *pool)
 477{
 478	if (!pool)
 479		return 0;
 480
 481	return percpu_ref_tryget(&pool->ref);
 
 
 
 
 482}
 483
 484static void zswap_pool_put(struct zswap_pool *pool)
 485{
 486	percpu_ref_put(&pool->ref);
 487}
 488
 489static struct zswap_pool *__zswap_pool_current(void)
 490{
 491	struct zswap_pool *pool;
 492
 493	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
 494	WARN_ONCE(!pool && zswap_has_pool,
 495		  "%s: no page storage pool!\n", __func__);
 496
 497	return pool;
 498}
 499
 500static struct zswap_pool *zswap_pool_current(void)
 501{
 502	assert_spin_locked(&zswap_pools_lock);
 503
 504	return __zswap_pool_current();
 505}
 506
 507static struct zswap_pool *zswap_pool_current_get(void)
 508{
 509	struct zswap_pool *pool;
 510
 511	rcu_read_lock();
 512
 513	pool = __zswap_pool_current();
 514	if (!zswap_pool_get(pool))
 515		pool = NULL;
 516
 517	rcu_read_unlock();
 518
 519	return pool;
 520}
 521
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 522/* type and compressor must be null-terminated */
 523static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 524{
 525	struct zswap_pool *pool;
 526
 527	assert_spin_locked(&zswap_pools_lock);
 528
 529	list_for_each_entry_rcu(pool, &zswap_pools, list) {
 530		if (strcmp(pool->tfm_name, compressor))
 531			continue;
 532		/* all zpools share the same type */
 533		if (strcmp(zpool_get_type(pool->zpools[0]), type))
 534			continue;
 535		/* if we can't get it, it's about to be destroyed */
 536		if (!zswap_pool_get(pool))
 537			continue;
 538		return pool;
 539	}
 540
 541	return NULL;
 542}
 543
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544/*********************************
 545* param callbacks
 546**********************************/
 547
 548static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
 549{
 550	/* no change required */
 551	if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
 552		return false;
 553	return true;
 554}
 555
 556/* val must be a null-terminated string */
 557static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 558			     char *type, char *compressor)
 559{
 560	struct zswap_pool *pool, *put_pool = NULL;
 561	char *s = strstrip((char *)val);
 562	int ret = 0;
 563	bool new_pool = false;
 564
 565	mutex_lock(&zswap_init_lock);
 566	switch (zswap_init_state) {
 567	case ZSWAP_UNINIT:
 568		/* if this is load-time (pre-init) param setting,
 569		 * don't create a pool; that's done during init.
 570		 */
 571		ret = param_set_charp(s, kp);
 572		break;
 573	case ZSWAP_INIT_SUCCEED:
 574		new_pool = zswap_pool_changed(s, kp);
 575		break;
 576	case ZSWAP_INIT_FAILED:
 577		pr_err("can't set param, initialization failed\n");
 578		ret = -ENODEV;
 579	}
 580	mutex_unlock(&zswap_init_lock);
 581
 582	/* no need to create a new pool, return directly */
 583	if (!new_pool)
 584		return ret;
 
 
 585
 586	if (!type) {
 587		if (!zpool_has_pool(s)) {
 588			pr_err("zpool %s not available\n", s);
 589			return -ENOENT;
 590		}
 591		type = s;
 592	} else if (!compressor) {
 593		if (!crypto_has_acomp(s, 0, 0)) {
 594			pr_err("compressor %s not available\n", s);
 595			return -ENOENT;
 596		}
 597		compressor = s;
 598	} else {
 599		WARN_ON(1);
 600		return -EINVAL;
 601	}
 602
 603	spin_lock_bh(&zswap_pools_lock);
 604
 605	pool = zswap_pool_find_get(type, compressor);
 606	if (pool) {
 607		zswap_pool_debug("using existing", pool);
 608		WARN_ON(pool == zswap_pool_current());
 609		list_del_rcu(&pool->list);
 610	}
 611
 612	spin_unlock_bh(&zswap_pools_lock);
 613
 614	if (!pool)
 615		pool = zswap_pool_create(type, compressor);
 616	else {
 617		/*
 618		 * Restore the initial ref dropped by percpu_ref_kill()
 619		 * when the pool was decommissioned and switch it again
 620		 * to percpu mode.
 621		 */
 622		percpu_ref_resurrect(&pool->ref);
 623
 624		/* Drop the ref from zswap_pool_find_get(). */
 625		zswap_pool_put(pool);
 626	}
 627
 628	if (pool)
 629		ret = param_set_charp(s, kp);
 630	else
 631		ret = -EINVAL;
 632
 633	spin_lock_bh(&zswap_pools_lock);
 634
 635	if (!ret) {
 636		put_pool = zswap_pool_current();
 637		list_add_rcu(&pool->list, &zswap_pools);
 638		zswap_has_pool = true;
 639	} else if (pool) {
 640		/* add the possibly pre-existing pool to the end of the pools
 641		 * list; if it's new (and empty) then it'll be removed and
 642		 * destroyed by the put after we drop the lock
 643		 */
 644		list_add_tail_rcu(&pool->list, &zswap_pools);
 645		put_pool = pool;
 646	}
 647
 648	spin_unlock_bh(&zswap_pools_lock);
 649
 650	if (!zswap_has_pool && !pool) {
 651		/* if initial pool creation failed, and this pool creation also
 652		 * failed, maybe both compressor and zpool params were bad.
 653		 * Allow changing this param, so pool creation will succeed
 654		 * when the other param is changed. We already verified this
 655		 * param is ok in the zpool_has_pool() or crypto_has_acomp()
 656		 * checks above.
 657		 */
 658		ret = param_set_charp(s, kp);
 659	}
 660
 661	/* drop the ref from either the old current pool,
 662	 * or the new pool we failed to add
 663	 */
 664	if (put_pool)
 665		percpu_ref_kill(&put_pool->ref);
 666
 667	return ret;
 668}
 669
 670static int zswap_compressor_param_set(const char *val,
 671				      const struct kernel_param *kp)
 672{
 673	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
 674}
 675
 676static int zswap_zpool_param_set(const char *val,
 677				 const struct kernel_param *kp)
 678{
 679	return __zswap_param_set(val, kp, NULL, zswap_compressor);
 680}
 681
 682static int zswap_enabled_param_set(const char *val,
 683				   const struct kernel_param *kp)
 684{
 685	int ret = -ENODEV;
 686
 687	/* if this is load-time (pre-init) param setting, only set param. */
 688	if (system_state != SYSTEM_RUNNING)
 689		return param_set_bool(val, kp);
 690
 691	mutex_lock(&zswap_init_lock);
 692	switch (zswap_init_state) {
 693	case ZSWAP_UNINIT:
 694		if (zswap_setup())
 695			break;
 696		fallthrough;
 697	case ZSWAP_INIT_SUCCEED:
 698		if (!zswap_has_pool)
 699			pr_err("can't enable, no pool configured\n");
 700		else
 701			ret = param_set_bool(val, kp);
 702		break;
 703	case ZSWAP_INIT_FAILED:
 704		pr_err("can't enable, initialization failed\n");
 705	}
 706	mutex_unlock(&zswap_init_lock);
 707
 708	return ret;
 709}
 710
 711/*********************************
 712* lru functions
 713**********************************/
 714
 715/* should be called under RCU */
 716#ifdef CONFIG_MEMCG
 717static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
 718{
 719	return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
 720}
 721#else
 722static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
 723{
 724	return NULL;
 725}
 726#endif
 727
 728static inline int entry_to_nid(struct zswap_entry *entry)
 729{
 730	return page_to_nid(virt_to_page(entry));
 731}
 732
 733static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
 734{
 735	atomic_long_t *nr_zswap_protected;
 736	unsigned long lru_size, old, new;
 737	int nid = entry_to_nid(entry);
 738	struct mem_cgroup *memcg;
 739	struct lruvec *lruvec;
 740
 741	/*
 742	 * Note that it is safe to use rcu_read_lock() here, even in the face of
 743	 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
 744	 * used in list_lru lookup, only two scenarios are possible:
 745	 *
 746	 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
 747	 *    new entry will be reparented to memcg's parent's list_lru.
 748	 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
 749	 *    new entry will be added directly to memcg's parent's list_lru.
 750	 *
 751	 * Similar reasoning holds for list_lru_del().
 752	 */
 753	rcu_read_lock();
 754	memcg = mem_cgroup_from_entry(entry);
 755	/* will always succeed */
 756	list_lru_add(list_lru, &entry->lru, nid, memcg);
 757
 758	/* Update the protection area */
 759	lru_size = list_lru_count_one(list_lru, nid, memcg);
 760	lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
 761	nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
 762	old = atomic_long_inc_return(nr_zswap_protected);
 763	/*
 764	 * Decay to avoid overflow and adapt to changing workloads.
 765	 * This is based on LRU reclaim cost decaying heuristics.
 766	 */
 767	do {
 768		new = old > lru_size / 4 ? old / 2 : old;
 769	} while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
 770	rcu_read_unlock();
 771}
 772
 773static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
 774{
 775	int nid = entry_to_nid(entry);
 776	struct mem_cgroup *memcg;
 777
 778	rcu_read_lock();
 779	memcg = mem_cgroup_from_entry(entry);
 780	/* will always succeed */
 781	list_lru_del(list_lru, &entry->lru, nid, memcg);
 782	rcu_read_unlock();
 783}
 784
 785void zswap_lruvec_state_init(struct lruvec *lruvec)
 786{
 787	atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
 788}
 789
 790void zswap_folio_swapin(struct folio *folio)
 791{
 792	struct lruvec *lruvec;
 793
 794	if (folio) {
 795		lruvec = folio_lruvec(folio);
 796		atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
 797	}
 798}
 799
 800void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
 801{
 802	/* lock out zswap shrinker walking memcg tree */
 803	spin_lock(&zswap_shrink_lock);
 804	if (zswap_next_shrink == memcg)
 805		zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
 806	spin_unlock(&zswap_shrink_lock);
 807}
 808
 809/*********************************
 810* rbtree functions
 811**********************************/
 812static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
 813{
 814	struct rb_node *node = root->rb_node;
 815	struct zswap_entry *entry;
 816	pgoff_t entry_offset;
 817
 818	while (node) {
 819		entry = rb_entry(node, struct zswap_entry, rbnode);
 820		entry_offset = swp_offset(entry->swpentry);
 821		if (entry_offset > offset)
 822			node = node->rb_left;
 823		else if (entry_offset < offset)
 824			node = node->rb_right;
 825		else
 826			return entry;
 827	}
 828	return NULL;
 829}
 830
 831/*
 832 * In the case that a entry with the same offset is found, a pointer to
 833 * the existing entry is stored in dupentry and the function returns -EEXIST
 
 
 
 
 
 
 
 
 
 
 
 
 834 */
 835static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
 836			struct zswap_entry **dupentry)
 837{
 838	struct rb_node **link = &root->rb_node, *parent = NULL;
 839	struct zswap_entry *myentry;
 840	pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
 841
 842	while (*link) {
 843		parent = *link;
 844		myentry = rb_entry(parent, struct zswap_entry, rbnode);
 845		myentry_offset = swp_offset(myentry->swpentry);
 846		if (myentry_offset > entry_offset)
 847			link = &(*link)->rb_left;
 848		else if (myentry_offset < entry_offset)
 849			link = &(*link)->rb_right;
 850		else {
 851			*dupentry = myentry;
 852			return -EEXIST;
 853		}
 854	}
 855	rb_link_node(&entry->rbnode, parent, link);
 856	rb_insert_color(&entry->rbnode, root);
 857	return 0;
 858}
 859
 860static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
 861{
 862	rb_erase(&entry->rbnode, root);
 863	RB_CLEAR_NODE(&entry->rbnode);
 864}
 865
 866/*********************************
 867* zswap entry functions
 868**********************************/
 869static struct kmem_cache *zswap_entry_cache;
 870
 871static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
 872{
 873	struct zswap_entry *entry;
 874	entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
 875	if (!entry)
 876		return NULL;
 877	RB_CLEAR_NODE(&entry->rbnode);
 878	return entry;
 879}
 880
 881static void zswap_entry_cache_free(struct zswap_entry *entry)
 882{
 883	kmem_cache_free(zswap_entry_cache, entry);
 884}
 885
 886static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
 887{
 888	int i = 0;
 889
 890	if (ZSWAP_NR_ZPOOLS > 1)
 891		i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
 892
 893	return entry->pool->zpools[i];
 894}
 895
 896/*
 897 * Carries out the common pattern of freeing and entry's zpool allocation,
 898 * freeing the entry itself, and decrementing the number of stored pages.
 899 */
 900static void zswap_entry_free(struct zswap_entry *entry)
 901{
 902	if (!entry->length)
 903		atomic_dec(&zswap_same_filled_pages);
 904	else {
 905		zswap_lru_del(&zswap_list_lru, entry);
 906		zpool_free(zswap_find_zpool(entry), entry->handle);
 907		atomic_dec(&zswap_nr_stored);
 908		zswap_pool_put(entry->pool);
 909	}
 910	if (entry->objcg) {
 911		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
 912		obj_cgroup_put(entry->objcg);
 913	}
 914	zswap_entry_cache_free(entry);
 915	atomic_dec(&zswap_stored_pages);
 916	zswap_update_total_size();
 917}
 918
 919/*
 920 * The caller hold the tree lock and search the entry from the tree,
 921 * so it must be on the tree, remove it from the tree and free it.
 922 */
 923static void zswap_invalidate_entry(struct zswap_tree *tree,
 924				   struct zswap_entry *entry)
 925{
 926	zswap_rb_erase(&tree->rbroot, entry);
 927	zswap_entry_free(entry);
 928}
 929
 930/*********************************
 931* compressed storage functions
 932**********************************/
 933static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
 934{
 935	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 936	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 937	struct crypto_acomp *acomp;
 938	struct acomp_req *req;
 939	int ret;
 940
 941	mutex_init(&acomp_ctx->mutex);
 942
 943	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
 944	if (!acomp_ctx->buffer)
 945		return -ENOMEM;
 946
 947	acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
 948	if (IS_ERR(acomp)) {
 949		pr_err("could not alloc crypto acomp %s : %ld\n",
 950				pool->tfm_name, PTR_ERR(acomp));
 951		ret = PTR_ERR(acomp);
 952		goto acomp_fail;
 953	}
 954	acomp_ctx->acomp = acomp;
 955	acomp_ctx->is_sleepable = acomp_is_async(acomp);
 956
 957	req = acomp_request_alloc(acomp_ctx->acomp);
 958	if (!req) {
 959		pr_err("could not alloc crypto acomp_request %s\n",
 960		       pool->tfm_name);
 961		ret = -ENOMEM;
 962		goto req_fail;
 963	}
 964	acomp_ctx->req = req;
 965
 966	crypto_init_wait(&acomp_ctx->wait);
 967	/*
 968	 * if the backend of acomp is async zip, crypto_req_done() will wakeup
 969	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
 970	 * won't be called, crypto_wait_req() will return without blocking.
 971	 */
 972	acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 973				   crypto_req_done, &acomp_ctx->wait);
 974
 975	return 0;
 976
 977req_fail:
 978	crypto_free_acomp(acomp_ctx->acomp);
 979acomp_fail:
 980	kfree(acomp_ctx->buffer);
 981	return ret;
 982}
 983
 984static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
 985{
 986	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 987	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 988
 989	if (!IS_ERR_OR_NULL(acomp_ctx)) {
 990		if (!IS_ERR_OR_NULL(acomp_ctx->req))
 991			acomp_request_free(acomp_ctx->req);
 992		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
 993			crypto_free_acomp(acomp_ctx->acomp);
 994		kfree(acomp_ctx->buffer);
 995	}
 996
 997	return 0;
 998}
 999
1000static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
1001{
1002	struct crypto_acomp_ctx *acomp_ctx;
1003	struct scatterlist input, output;
1004	int comp_ret = 0, alloc_ret = 0;
1005	unsigned int dlen = PAGE_SIZE;
1006	unsigned long handle;
1007	struct zpool *zpool;
1008	char *buf;
1009	gfp_t gfp;
1010	u8 *dst;
1011
1012	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1013
1014	mutex_lock(&acomp_ctx->mutex);
1015
1016	dst = acomp_ctx->buffer;
1017	sg_init_table(&input, 1);
1018	sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
1019
1020	/*
1021	 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1022	 * and hardware-accelerators may won't check the dst buffer size, so
1023	 * giving the dst buffer with enough length to avoid buffer overflow.
1024	 */
1025	sg_init_one(&output, dst, PAGE_SIZE * 2);
1026	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1027
1028	/*
1029	 * it maybe looks a little bit silly that we send an asynchronous request,
1030	 * then wait for its completion synchronously. This makes the process look
1031	 * synchronous in fact.
1032	 * Theoretically, acomp supports users send multiple acomp requests in one
1033	 * acomp instance, then get those requests done simultaneously. but in this
1034	 * case, zswap actually does store and load page by page, there is no
1035	 * existing method to send the second page before the first page is done
1036	 * in one thread doing zwap.
1037	 * but in different threads running on different cpu, we have different
1038	 * acomp instance, so multiple threads can do (de)compression in parallel.
1039	 */
1040	comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1041	dlen = acomp_ctx->req->dlen;
1042	if (comp_ret)
1043		goto unlock;
1044
1045	zpool = zswap_find_zpool(entry);
1046	gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1047	if (zpool_malloc_support_movable(zpool))
1048		gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1049	alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
1050	if (alloc_ret)
1051		goto unlock;
1052
1053	buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
1054	memcpy(buf, dst, dlen);
1055	zpool_unmap_handle(zpool, handle);
1056
1057	entry->handle = handle;
1058	entry->length = dlen;
1059
1060unlock:
1061	if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
1062		zswap_reject_compress_poor++;
1063	else if (comp_ret)
1064		zswap_reject_compress_fail++;
1065	else if (alloc_ret)
1066		zswap_reject_alloc_fail++;
1067
1068	mutex_unlock(&acomp_ctx->mutex);
1069	return comp_ret == 0 && alloc_ret == 0;
1070}
1071
1072static void zswap_decompress(struct zswap_entry *entry, struct page *page)
1073{
1074	struct zpool *zpool = zswap_find_zpool(entry);
1075	struct scatterlist input, output;
1076	struct crypto_acomp_ctx *acomp_ctx;
1077	u8 *src;
1078
1079	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1080	mutex_lock(&acomp_ctx->mutex);
1081
1082	src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1083	/*
1084	 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
1085	 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
1086	 * resort to copying the buffer to a temporary one.
1087	 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
1088	 * such as a kmap address of high memory or even ever a vmap address.
1089	 * However, sg_init_one is only equipped to handle linearly mapped low memory.
1090	 * In such cases, we also must copy the buffer to a temporary and lowmem one.
1091	 */
1092	if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
1093	    !virt_addr_valid(src)) {
1094		memcpy(acomp_ctx->buffer, src, entry->length);
1095		src = acomp_ctx->buffer;
1096		zpool_unmap_handle(zpool, entry->handle);
1097	}
1098
1099	sg_init_one(&input, src, entry->length);
1100	sg_init_table(&output, 1);
1101	sg_set_page(&output, page, PAGE_SIZE, 0);
1102	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1103	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1104	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1105	mutex_unlock(&acomp_ctx->mutex);
1106
1107	if (src != acomp_ctx->buffer)
1108		zpool_unmap_handle(zpool, entry->handle);
1109}
1110
1111/*********************************
1112* writeback code
1113**********************************/
1114/*
1115 * Attempts to free an entry by adding a folio to the swap cache,
1116 * decompressing the entry data into the folio, and issuing a
1117 * bio write to write the folio back to the swap device.
1118 *
1119 * This can be thought of as a "resumed writeback" of the folio
1120 * to the swap device.  We are basically resuming the same swap
1121 * writeback path that was intercepted with the zswap_store()
1122 * in the first place.  After the folio has been decompressed into
1123 * the swap cache, the compressed version stored by zswap can be
1124 * freed.
1125 */
1126static int zswap_writeback_entry(struct zswap_entry *entry,
1127				 swp_entry_t swpentry)
1128{
 
 
1129	struct zswap_tree *tree;
1130	struct folio *folio;
1131	struct mempolicy *mpol;
1132	bool folio_was_allocated;
 
 
 
 
1133	struct writeback_control wbc = {
1134		.sync_mode = WB_SYNC_NONE,
1135	};
1136
1137	/* try to allocate swap cache folio */
1138	mpol = get_task_policy(current);
1139	folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1140				NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1141	if (!folio)
1142		return -ENOMEM;
1143
1144	/*
1145	 * Found an existing folio, we raced with swapin or concurrent
1146	 * shrinker. We generally writeback cold folios from zswap, and
1147	 * swapin means the folio just became hot, so skip this folio.
1148	 * For unlikely concurrent shrinker case, it will be unlinked
1149	 * and freed when invalidated by the concurrent shrinker anyway.
1150	 */
1151	if (!folio_was_allocated) {
1152		folio_put(folio);
1153		return -EEXIST;
1154	}
1155
1156	/*
1157	 * folio is locked, and the swapcache is now secured against
1158	 * concurrent swapping to and from the slot, and concurrent
1159	 * swapoff so we can safely dereference the zswap tree here.
1160	 * Verify that the swap entry hasn't been invalidated and recycled
1161	 * behind our backs, to avoid overwriting a new swap folio with
1162	 * old compressed data. Only when this is successful can the entry
1163	 * be dereferenced.
1164	 */
1165	tree = swap_zswap_tree(swpentry);
1166	spin_lock(&tree->lock);
1167	if (zswap_rb_search(&tree->rbroot, swp_offset(swpentry)) != entry) {
 
 
1168		spin_unlock(&tree->lock);
1169		delete_from_swap_cache(folio);
1170		folio_unlock(folio);
1171		folio_put(folio);
1172		return -ENOMEM;
1173	}
1174
1175	/* Safe to deref entry after the entry is verified above. */
1176	zswap_rb_erase(&tree->rbroot, entry);
1177	spin_unlock(&tree->lock);
 
1178
1179	zswap_decompress(entry, &folio->page);
 
 
 
 
1180
1181	count_vm_event(ZSWPWB);
1182	if (entry->objcg)
1183		count_objcg_event(entry->objcg, ZSWPWB);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184
1185	zswap_entry_free(entry);
1186
1187	/* folio is up to date */
1188	folio_mark_uptodate(folio);
1189
1190	/* move it to the tail of the inactive list after end_writeback */
1191	folio_set_reclaim(folio);
1192
1193	/* start writeback */
1194	__swap_writepage(folio, &wbc);
1195	folio_put(folio);
 
1196
1197	return 0;
1198}
1199
1200/*********************************
1201* shrinker functions
1202**********************************/
1203static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1204				       spinlock_t *lock, void *arg)
1205{
1206	struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1207	bool *encountered_page_in_swapcache = (bool *)arg;
1208	swp_entry_t swpentry;
1209	enum lru_status ret = LRU_REMOVED_RETRY;
1210	int writeback_result;
1211
1212	/*
1213	 * As soon as we drop the LRU lock, the entry can be freed by
1214	 * a concurrent invalidation. This means the following:
1215	 *
1216	 * 1. We extract the swp_entry_t to the stack, allowing
1217	 *    zswap_writeback_entry() to pin the swap entry and
1218	 *    then validate the zwap entry against that swap entry's
1219	 *    tree using pointer value comparison. Only when that
1220	 *    is successful can the entry be dereferenced.
1221	 *
1222	 * 2. Usually, objects are taken off the LRU for reclaim. In
1223	 *    this case this isn't possible, because if reclaim fails
1224	 *    for whatever reason, we have no means of knowing if the
1225	 *    entry is alive to put it back on the LRU.
1226	 *
1227	 *    So rotate it before dropping the lock. If the entry is
1228	 *    written back or invalidated, the free path will unlink
1229	 *    it. For failures, rotation is the right thing as well.
1230	 *
1231	 *    Temporary failures, where the same entry should be tried
1232	 *    again immediately, almost never happen for this shrinker.
1233	 *    We don't do any trylocking; -ENOMEM comes closest,
1234	 *    but that's extremely rare and doesn't happen spuriously
1235	 *    either. Don't bother distinguishing this case.
1236	 */
1237	list_move_tail(item, &l->list);
1238
1239	/*
1240	 * Once the lru lock is dropped, the entry might get freed. The
1241	 * swpentry is copied to the stack, and entry isn't deref'd again
1242	 * until the entry is verified to still be alive in the tree.
1243	 */
1244	swpentry = entry->swpentry;
1245
1246	/*
1247	 * It's safe to drop the lock here because we return either
1248	 * LRU_REMOVED_RETRY or LRU_RETRY.
1249	 */
1250	spin_unlock(lock);
1251
1252	writeback_result = zswap_writeback_entry(entry, swpentry);
 
 
 
 
1253
1254	if (writeback_result) {
1255		zswap_reject_reclaim_fail++;
1256		ret = LRU_RETRY;
1257
1258		/*
1259		 * Encountering a page already in swap cache is a sign that we are shrinking
1260		 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1261		 * shrinker context).
1262		 */
1263		if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1264			ret = LRU_STOP;
1265			*encountered_page_in_swapcache = true;
1266		}
1267	} else {
1268		zswap_written_back_pages++;
1269	}
1270
1271	spin_lock(lock);
1272	return ret;
1273}
1274
1275static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1276		struct shrink_control *sc)
1277{
1278	struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
1279	unsigned long shrink_ret, nr_protected, lru_size;
1280	bool encountered_page_in_swapcache = false;
1281
1282	if (!zswap_shrinker_enabled ||
1283			!mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1284		sc->nr_scanned = 0;
1285		return SHRINK_STOP;
1286	}
1287
1288	nr_protected =
1289		atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1290	lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
1291
1292	/*
1293	 * Abort if we are shrinking into the protected region.
1294	 *
1295	 * This short-circuiting is necessary because if we have too many multiple
1296	 * concurrent reclaimers getting the freeable zswap object counts at the
1297	 * same time (before any of them made reasonable progress), the total
1298	 * number of reclaimed objects might be more than the number of unprotected
1299	 * objects (i.e the reclaimers will reclaim into the protected area of the
1300	 * zswap LRU).
1301	 */
1302	if (nr_protected >= lru_size - sc->nr_to_scan) {
1303		sc->nr_scanned = 0;
1304		return SHRINK_STOP;
1305	}
1306
1307	shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1308		&encountered_page_in_swapcache);
1309
1310	if (encountered_page_in_swapcache)
1311		return SHRINK_STOP;
1312
1313	return shrink_ret ? shrink_ret : SHRINK_STOP;
1314}
1315
1316static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1317		struct shrink_control *sc)
1318{
1319	struct mem_cgroup *memcg = sc->memcg;
1320	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1321	unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
1322
1323	if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1324		return 0;
1325
1326	/*
1327	 * The shrinker resumes swap writeback, which will enter block
1328	 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1329	 * rules (may_enter_fs()), which apply on a per-folio basis.
1330	 */
1331	if (!gfp_has_io_fs(sc->gfp_mask))
1332		return 0;
1333
1334	/*
1335	 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1336	 * have them per-node and thus per-lruvec. Careful if memcg is
1337	 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1338	 * for the lruvec, but not for memcg_page_state().
1339	 *
1340	 * Without memcg, use the zswap pool-wide metrics.
1341	 */
1342	if (!mem_cgroup_disabled()) {
1343		mem_cgroup_flush_stats(memcg);
1344		nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1345		nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1346	} else {
1347		nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
1348		nr_stored = atomic_read(&zswap_nr_stored);
1349	}
1350
1351	if (!nr_stored)
1352		return 0;
1353
1354	nr_protected =
1355		atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1356	nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1357	/*
1358	 * Subtract the lru size by an estimate of the number of pages
1359	 * that should be protected.
1360	 */
1361	nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
1362
1363	/*
1364	 * Scale the number of freeable pages by the memory saving factor.
1365	 * This ensures that the better zswap compresses memory, the fewer
1366	 * pages we will evict to swap (as it will otherwise incur IO for
1367	 * relatively small memory saving).
1368	 */
1369	return mult_frac(nr_freeable, nr_backing, nr_stored);
1370}
1371
1372static struct shrinker *zswap_alloc_shrinker(void)
1373{
1374	struct shrinker *shrinker;
1375
1376	shrinker =
1377		shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1378	if (!shrinker)
1379		return NULL;
1380
1381	shrinker->scan_objects = zswap_shrinker_scan;
1382	shrinker->count_objects = zswap_shrinker_count;
1383	shrinker->batch = 0;
1384	shrinker->seeks = DEFAULT_SEEKS;
1385	return shrinker;
1386}
1387
1388static int shrink_memcg(struct mem_cgroup *memcg)
1389{
1390	int nid, shrunk = 0;
1391
1392	if (!mem_cgroup_zswap_writeback_enabled(memcg))
1393		return -EINVAL;
1394
1395	/*
1396	 * Skip zombies because their LRUs are reparented and we would be
1397	 * reclaiming from the parent instead of the dead memcg.
1398	 */
1399	if (memcg && !mem_cgroup_online(memcg))
1400		return -ENOENT;
1401
1402	for_each_node_state(nid, N_NORMAL_MEMORY) {
1403		unsigned long nr_to_walk = 1;
1404
1405		shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1406					    &shrink_memcg_cb, NULL, &nr_to_walk);
1407	}
1408	return shrunk ? 0 : -EAGAIN;
1409}
1410
1411static void shrink_worker(struct work_struct *w)
1412{
1413	struct mem_cgroup *memcg;
1414	int ret, failures = 0;
1415
1416	/* global reclaim will select cgroup in a round-robin fashion. */
1417	do {
1418		spin_lock(&zswap_shrink_lock);
1419		zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1420		memcg = zswap_next_shrink;
1421
1422		/*
1423		 * We need to retry if we have gone through a full round trip, or if we
1424		 * got an offline memcg (or else we risk undoing the effect of the
1425		 * zswap memcg offlining cleanup callback). This is not catastrophic
1426		 * per se, but it will keep the now offlined memcg hostage for a while.
1427		 *
1428		 * Note that if we got an online memcg, we will keep the extra
1429		 * reference in case the original reference obtained by mem_cgroup_iter
1430		 * is dropped by the zswap memcg offlining callback, ensuring that the
1431		 * memcg is not killed when we are reclaiming.
1432		 */
1433		if (!memcg) {
1434			spin_unlock(&zswap_shrink_lock);
1435			if (++failures == MAX_RECLAIM_RETRIES)
1436				break;
1437
1438			goto resched;
1439		}
1440
1441		if (!mem_cgroup_tryget_online(memcg)) {
1442			/* drop the reference from mem_cgroup_iter() */
1443			mem_cgroup_iter_break(NULL, memcg);
1444			zswap_next_shrink = NULL;
1445			spin_unlock(&zswap_shrink_lock);
1446
1447			if (++failures == MAX_RECLAIM_RETRIES)
1448				break;
1449
1450			goto resched;
1451		}
1452		spin_unlock(&zswap_shrink_lock);
1453
1454		ret = shrink_memcg(memcg);
1455		/* drop the extra reference */
1456		mem_cgroup_put(memcg);
1457
1458		if (ret == -EINVAL)
1459			break;
1460		if (ret && ++failures == MAX_RECLAIM_RETRIES)
1461			break;
1462
1463resched:
1464		cond_resched();
1465	} while (!zswap_can_accept());
1466}
1467
1468static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1469{
1470	unsigned long *page;
1471	unsigned long val;
1472	unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1473
1474	page = (unsigned long *)ptr;
1475	val = page[0];
1476
1477	if (val != page[last_pos])
1478		return 0;
1479
1480	for (pos = 1; pos < last_pos; pos++) {
1481		if (val != page[pos])
1482			return 0;
1483	}
1484
1485	*value = val;
1486
1487	return 1;
1488}
1489
1490static void zswap_fill_page(void *ptr, unsigned long value)
1491{
1492	unsigned long *page;
1493
1494	page = (unsigned long *)ptr;
1495	memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1496}
1497
1498bool zswap_store(struct folio *folio)
1499{
1500	swp_entry_t swp = folio->swap;
1501	pgoff_t offset = swp_offset(swp);
1502	struct zswap_tree *tree = swap_zswap_tree(swp);
1503	struct zswap_entry *entry, *dupentry;
1504	struct obj_cgroup *objcg = NULL;
1505	struct mem_cgroup *memcg = NULL;
 
 
 
 
 
1506
1507	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1508	VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1509
1510	/* Large folios aren't supported */
1511	if (folio_test_large(folio))
1512		return false;
1513
1514	if (!zswap_enabled)
1515		goto check_old;
1516
1517	objcg = get_obj_cgroup_from_folio(folio);
1518	if (objcg && !obj_cgroup_may_zswap(objcg)) {
1519		memcg = get_mem_cgroup_from_objcg(objcg);
1520		if (shrink_memcg(memcg)) {
1521			mem_cgroup_put(memcg);
1522			goto reject;
1523		}
1524		mem_cgroup_put(memcg);
1525	}
1526
1527	/* reclaim space if needed */
1528	if (zswap_is_full()) {
1529		zswap_pool_limit_hit++;
1530		zswap_pool_reached_full = true;
1531		goto shrink;
1532	}
1533
1534	if (zswap_pool_reached_full) {
1535	       if (!zswap_can_accept())
1536			goto shrink;
1537		else
1538			zswap_pool_reached_full = false;
1539	}
1540
1541	/* allocate entry */
1542	entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
1543	if (!entry) {
1544		zswap_reject_kmemcache_fail++;
 
1545		goto reject;
1546	}
1547
1548	if (zswap_same_filled_pages_enabled) {
1549		unsigned long value;
1550		u8 *src;
1551
1552		src = kmap_local_folio(folio, 0);
1553		if (zswap_is_page_same_filled(src, &value)) {
1554			kunmap_local(src);
1555			entry->length = 0;
1556			entry->value = value;
1557			atomic_inc(&zswap_same_filled_pages);
1558			goto insert_entry;
1559		}
1560		kunmap_local(src);
1561	}
1562
1563	if (!zswap_non_same_filled_pages_enabled)
1564		goto freepage;
1565
1566	/* if entry is successfully added, it keeps the reference */
1567	entry->pool = zswap_pool_current_get();
1568	if (!entry->pool)
 
1569		goto freepage;
 
1570
1571	if (objcg) {
1572		memcg = get_mem_cgroup_from_objcg(objcg);
1573		if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1574			mem_cgroup_put(memcg);
1575			goto put_pool;
1576		}
1577		mem_cgroup_put(memcg);
 
 
 
1578	}
1579
1580	if (!zswap_compress(folio, entry))
1581		goto put_pool;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1582
1583insert_entry:
1584	entry->swpentry = swp;
1585	entry->objcg = objcg;
1586	if (objcg) {
1587		obj_cgroup_charge_zswap(objcg, entry->length);
1588		/* Account before objcg ref is moved to tree */
1589		count_objcg_event(objcg, ZSWPOUT);
1590	}
1591
1592	/* map */
1593	spin_lock(&tree->lock);
1594	/*
1595	 * The folio may have been dirtied again, invalidate the
1596	 * possibly stale entry before inserting the new entry.
1597	 */
1598	if (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
1599		zswap_invalidate_entry(tree, dupentry);
1600		WARN_ON(zswap_rb_insert(&tree->rbroot, entry, &dupentry));
1601	}
1602	if (entry->length) {
1603		INIT_LIST_HEAD(&entry->lru);
1604		zswap_lru_add(&zswap_list_lru, entry);
1605		atomic_inc(&zswap_nr_stored);
1606	}
1607	spin_unlock(&tree->lock);
1608
1609	/* update stats */
1610	atomic_inc(&zswap_stored_pages);
1611	zswap_update_total_size();
1612	count_vm_event(ZSWPOUT);
1613
1614	return true;
1615
1616put_pool:
 
1617	zswap_pool_put(entry->pool);
1618freepage:
1619	zswap_entry_cache_free(entry);
1620reject:
1621	if (objcg)
1622		obj_cgroup_put(objcg);
1623check_old:
1624	/*
1625	 * If the zswap store fails or zswap is disabled, we must invalidate the
1626	 * possibly stale entry which was previously stored at this offset.
1627	 * Otherwise, writeback could overwrite the new data in the swapfile.
1628	 */
1629	spin_lock(&tree->lock);
1630	entry = zswap_rb_search(&tree->rbroot, offset);
1631	if (entry)
1632		zswap_invalidate_entry(tree, entry);
1633	spin_unlock(&tree->lock);
1634	return false;
1635
1636shrink:
1637	queue_work(shrink_wq, &zswap_shrink_work);
1638	goto reject;
1639}
1640
1641bool zswap_load(struct folio *folio)
 
 
 
 
 
1642{
1643	swp_entry_t swp = folio->swap;
1644	pgoff_t offset = swp_offset(swp);
1645	struct page *page = &folio->page;
1646	bool swapcache = folio_test_swapcache(folio);
1647	struct zswap_tree *tree = swap_zswap_tree(swp);
1648	struct zswap_entry *entry;
1649	u8 *dst;
1650
1651	VM_WARN_ON_ONCE(!folio_test_locked(folio));
 
1652
 
1653	spin_lock(&tree->lock);
1654	entry = zswap_rb_search(&tree->rbroot, offset);
1655	if (!entry) {
 
1656		spin_unlock(&tree->lock);
1657		return false;
1658	}
1659	/*
1660	 * When reading into the swapcache, invalidate our entry. The
1661	 * swapcache can be the authoritative owner of the page and
1662	 * its mappings, and the pressure that results from having two
1663	 * in-memory copies outweighs any benefits of caching the
1664	 * compression work.
1665	 *
1666	 * (Most swapins go through the swapcache. The notable
1667	 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1668	 * files, which reads into a private page and may free it if
1669	 * the fault fails. We remain the primary owner of the entry.)
1670	 */
1671	if (swapcache)
1672		zswap_rb_erase(&tree->rbroot, entry);
1673	spin_unlock(&tree->lock);
1674
1675	if (entry->length)
1676		zswap_decompress(entry, page);
1677	else {
1678		dst = kmap_local_page(page);
1679		zswap_fill_page(dst, entry->value);
1680		kunmap_local(dst);
1681	}
 
 
 
 
1682
1683	count_vm_event(ZSWPIN);
1684	if (entry->objcg)
1685		count_objcg_event(entry->objcg, ZSWPIN);
1686
1687	if (swapcache) {
1688		zswap_entry_free(entry);
1689		folio_mark_dirty(folio);
1690	}
1691
1692	return true;
1693}
1694
1695void zswap_invalidate(swp_entry_t swp)
 
1696{
1697	pgoff_t offset = swp_offset(swp);
1698	struct zswap_tree *tree = swap_zswap_tree(swp);
1699	struct zswap_entry *entry;
1700
 
1701	spin_lock(&tree->lock);
1702	entry = zswap_rb_search(&tree->rbroot, offset);
1703	if (entry)
1704		zswap_invalidate_entry(tree, entry);
 
 
 
 
 
 
 
 
 
 
1705	spin_unlock(&tree->lock);
1706}
1707
1708int zswap_swapon(int type, unsigned long nr_pages)
 
1709{
1710	struct zswap_tree *trees, *tree;
1711	unsigned int nr, i;
1712
1713	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1714	trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1715	if (!trees) {
1716		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1717		return -ENOMEM;
1718	}
1719
1720	for (i = 0; i < nr; i++) {
1721		tree = trees + i;
1722		tree->rbroot = RB_ROOT;
1723		spin_lock_init(&tree->lock);
1724	}
1725
1726	nr_zswap_trees[type] = nr;
1727	zswap_trees[type] = trees;
1728	return 0;
1729}
1730
1731void zswap_swapoff(int type)
1732{
1733	struct zswap_tree *trees = zswap_trees[type];
1734	unsigned int i;
1735
1736	if (!trees)
 
 
1737		return;
 
1738
1739	/* try_to_unuse() invalidated all the entries already */
1740	for (i = 0; i < nr_zswap_trees[type]; i++)
1741		WARN_ON_ONCE(!RB_EMPTY_ROOT(&trees[i].rbroot));
 
1742
1743	kvfree(trees);
1744	nr_zswap_trees[type] = 0;
1745	zswap_trees[type] = NULL;
1746}
 
 
 
1747
1748/*********************************
1749* debugfs functions
1750**********************************/
1751#ifdef CONFIG_DEBUG_FS
1752#include <linux/debugfs.h>
1753
1754static struct dentry *zswap_debugfs_root;
1755
1756static int zswap_debugfs_init(void)
1757{
1758	if (!debugfs_initialized())
1759		return -ENODEV;
1760
1761	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
 
 
1762
1763	debugfs_create_u64("pool_limit_hit", 0444,
1764			   zswap_debugfs_root, &zswap_pool_limit_hit);
1765	debugfs_create_u64("reject_reclaim_fail", 0444,
1766			   zswap_debugfs_root, &zswap_reject_reclaim_fail);
1767	debugfs_create_u64("reject_alloc_fail", 0444,
1768			   zswap_debugfs_root, &zswap_reject_alloc_fail);
1769	debugfs_create_u64("reject_kmemcache_fail", 0444,
1770			   zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1771	debugfs_create_u64("reject_compress_fail", 0444,
1772			   zswap_debugfs_root, &zswap_reject_compress_fail);
1773	debugfs_create_u64("reject_compress_poor", 0444,
1774			   zswap_debugfs_root, &zswap_reject_compress_poor);
1775	debugfs_create_u64("written_back_pages", 0444,
1776			   zswap_debugfs_root, &zswap_written_back_pages);
1777	debugfs_create_u64("pool_total_size", 0444,
1778			   zswap_debugfs_root, &zswap_pool_total_size);
1779	debugfs_create_atomic_t("stored_pages", 0444,
1780				zswap_debugfs_root, &zswap_stored_pages);
1781	debugfs_create_atomic_t("same_filled_pages", 0444,
1782				zswap_debugfs_root, &zswap_same_filled_pages);
1783
1784	return 0;
1785}
 
 
 
 
 
1786#else
1787static int zswap_debugfs_init(void)
1788{
1789	return 0;
1790}
 
 
1791#endif
1792
1793/*********************************
1794* module init and exit
1795**********************************/
1796static int zswap_setup(void)
1797{
1798	struct zswap_pool *pool;
1799	int ret;
1800
1801	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1802	if (!zswap_entry_cache) {
 
1803		pr_err("entry cache creation failed\n");
1804		goto cache_fail;
1805	}
1806
1807	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1808				      "mm/zswap_pool:prepare",
1809				      zswap_cpu_comp_prepare,
1810				      zswap_cpu_comp_dead);
1811	if (ret)
1812		goto hp_fail;
1813
1814	shrink_wq = alloc_workqueue("zswap-shrink",
1815			WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1816	if (!shrink_wq)
1817		goto shrink_wq_fail;
1818
1819	zswap_shrinker = zswap_alloc_shrinker();
1820	if (!zswap_shrinker)
1821		goto shrinker_fail;
1822	if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1823		goto lru_fail;
1824	shrinker_register(zswap_shrinker);
1825
1826	INIT_WORK(&zswap_shrink_work, shrink_worker);
1827
1828	pool = __zswap_pool_create_fallback();
1829	if (pool) {
1830		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1831			zpool_get_type(pool->zpools[0]));
1832		list_add(&pool->list, &zswap_pools);
1833		zswap_has_pool = true;
1834	} else {
1835		pr_err("pool creation failed\n");
1836		zswap_enabled = false;
1837	}
 
 
 
 
1838
 
1839	if (zswap_debugfs_init())
1840		pr_warn("debugfs initialization failed\n");
1841	zswap_init_state = ZSWAP_INIT_SUCCEED;
1842	return 0;
1843
1844lru_fail:
1845	shrinker_free(zswap_shrinker);
1846shrinker_fail:
1847	destroy_workqueue(shrink_wq);
1848shrink_wq_fail:
1849	cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1850hp_fail:
1851	kmem_cache_destroy(zswap_entry_cache);
1852cache_fail:
1853	/* if built-in, we aren't unloaded on failure; don't allow use */
1854	zswap_init_state = ZSWAP_INIT_FAILED;
1855	zswap_enabled = false;
1856	return -ENOMEM;
1857}
1858
1859static int __init zswap_init(void)
1860{
1861	if (!zswap_enabled)
1862		return 0;
1863	return zswap_setup();
1864}
1865/* must be late so crypto has time to come up */
1866late_initcall(zswap_init);
1867
 
1868MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1869MODULE_DESCRIPTION("Compressed cache for swap pages");