Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * zswap.c - zswap driver file
   4 *
   5 * zswap is a backend for frontswap that takes pages that are in the process
   6 * of being swapped out and attempts to compress and store them in a
   7 * RAM-based memory pool.  This can result in a significant I/O reduction on
   8 * the swap device and, in the case where decompressing from RAM is faster
   9 * than reading from the swap device, can also improve workload performance.
  10 *
  11 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
 
 
 
 
 
 
 
 
 
 
  12*/
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/cpu.h>
  18#include <linux/highmem.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <linux/types.h>
  22#include <linux/atomic.h>
  23#include <linux/frontswap.h>
  24#include <linux/rbtree.h>
  25#include <linux/swap.h>
  26#include <linux/crypto.h>
  27#include <linux/scatterlist.h>
  28#include <linux/mempool.h>
  29#include <linux/zpool.h>
  30#include <crypto/acompress.h>
  31
  32#include <linux/mm_types.h>
  33#include <linux/page-flags.h>
  34#include <linux/swapops.h>
  35#include <linux/writeback.h>
  36#include <linux/pagemap.h>
  37#include <linux/workqueue.h>
  38
  39#include "swap.h"
  40
  41/*********************************
  42* statistics
  43**********************************/
  44/* Total bytes used by the compressed storage */
  45u64 zswap_pool_total_size;
  46/* The number of compressed pages currently stored in zswap */
  47atomic_t zswap_stored_pages = ATOMIC_INIT(0);
  48/* The number of same-value filled pages currently stored in zswap */
  49static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
  50
  51/*
  52 * The statistics below are not protected from concurrent access for
  53 * performance reasons so they may not be a 100% accurate.  However,
  54 * they do provide useful information on roughly how many times a
  55 * certain event is occurring.
  56*/
  57
  58/* Pool limit was hit (see zswap_max_pool_percent) */
  59static u64 zswap_pool_limit_hit;
  60/* Pages written back when pool limit was reached */
  61static u64 zswap_written_back_pages;
  62/* Store failed due to a reclaim failure after pool limit was reached */
  63static u64 zswap_reject_reclaim_fail;
  64/* Compressed page was too big for the allocator to (optimally) store */
  65static u64 zswap_reject_compress_poor;
  66/* Store failed because underlying allocator could not get memory */
  67static u64 zswap_reject_alloc_fail;
  68/* Store failed because the entry metadata could not be allocated (rare) */
  69static u64 zswap_reject_kmemcache_fail;
  70/* Duplicate store was encountered (rare) */
  71static u64 zswap_duplicate_entry;
  72
  73/* Shrinker work queue */
  74static struct workqueue_struct *shrink_wq;
  75/* Pool limit was hit, we need to calm down */
  76static bool zswap_pool_reached_full;
  77
  78/*********************************
  79* tunables
  80**********************************/
  81
  82#define ZSWAP_PARAM_UNSET ""
  83
  84/* Enable/disable zswap */
  85static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
  86static int zswap_enabled_param_set(const char *,
  87				   const struct kernel_param *);
  88static const struct kernel_param_ops zswap_enabled_param_ops = {
  89	.set =		zswap_enabled_param_set,
  90	.get =		param_get_bool,
  91};
  92module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
  93
  94/* Crypto compressor to use */
  95static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
 
  96static int zswap_compressor_param_set(const char *,
  97				      const struct kernel_param *);
  98static const struct kernel_param_ops zswap_compressor_param_ops = {
  99	.set =		zswap_compressor_param_set,
 100	.get =		param_get_charp,
 101	.free =		param_free_charp,
 102};
 103module_param_cb(compressor, &zswap_compressor_param_ops,
 104		&zswap_compressor, 0644);
 105
 106/* Compressed storage zpool to use */
 107static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
 
 108static int zswap_zpool_param_set(const char *, const struct kernel_param *);
 109static const struct kernel_param_ops zswap_zpool_param_ops = {
 110	.set =		zswap_zpool_param_set,
 111	.get =		param_get_charp,
 112	.free =		param_free_charp,
 113};
 114module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
 115
 116/* The maximum percentage of memory that the compressed pool can occupy */
 117static unsigned int zswap_max_pool_percent = 20;
 118module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
 119
 120/* The threshold for accepting new pages after the max_pool_percent was hit */
 121static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
 122module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
 123		   uint, 0644);
 124
 125/*
 126 * Enable/disable handling same-value filled pages (enabled by default).
 127 * If disabled every page is considered non-same-value filled.
 128 */
 129static bool zswap_same_filled_pages_enabled = true;
 130module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
 131		   bool, 0644);
 132
 133/* Enable/disable handling non-same-value filled pages (enabled by default) */
 134static bool zswap_non_same_filled_pages_enabled = true;
 135module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
 136		   bool, 0644);
 137
 138/*********************************
 139* data structures
 140**********************************/
 141
 142struct crypto_acomp_ctx {
 143	struct crypto_acomp *acomp;
 144	struct acomp_req *req;
 145	struct crypto_wait wait;
 146	u8 *dstmem;
 147	struct mutex *mutex;
 148};
 149
 150struct zswap_pool {
 151	struct zpool *zpool;
 152	struct crypto_acomp_ctx __percpu *acomp_ctx;
 153	struct kref kref;
 154	struct list_head list;
 155	struct work_struct release_work;
 156	struct work_struct shrink_work;
 157	struct hlist_node node;
 158	char tfm_name[CRYPTO_MAX_ALG_NAME];
 159};
 160
 161/*
 162 * struct zswap_entry
 163 *
 164 * This structure contains the metadata for tracking a single compressed
 165 * page within zswap.
 166 *
 167 * rbnode - links the entry into red-black tree for the appropriate swap type
 168 * offset - the swap offset for the entry.  Index into the red-black tree.
 169 * refcount - the number of outstanding reference to the entry. This is needed
 170 *            to protect against premature freeing of the entry by code
 171 *            concurrent calls to load, invalidate, and writeback.  The lock
 172 *            for the zswap_tree structure that contains the entry must
 173 *            be held while changing the refcount.  Since the lock must
 174 *            be held, there is no reason to also make refcount atomic.
 175 * length - the length in bytes of the compressed page data.  Needed during
 176 *          decompression. For a same value filled page length is 0.
 177 * pool - the zswap_pool the entry's data is in
 178 * handle - zpool allocation handle that stores the compressed page data
 179 * value - value of the same-value filled pages which have same content
 180 */
 181struct zswap_entry {
 182	struct rb_node rbnode;
 183	pgoff_t offset;
 184	int refcount;
 185	unsigned int length;
 186	struct zswap_pool *pool;
 187	union {
 188		unsigned long handle;
 189		unsigned long value;
 190	};
 191	struct obj_cgroup *objcg;
 192};
 193
 194struct zswap_header {
 195	swp_entry_t swpentry;
 196};
 197
 198/*
 199 * The tree lock in the zswap_tree struct protects a few things:
 200 * - the rbtree
 201 * - the refcount field of each entry in the tree
 202 */
 203struct zswap_tree {
 204	struct rb_root rbroot;
 205	spinlock_t lock;
 206};
 207
 208static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 209
 210/* RCU-protected iteration */
 211static LIST_HEAD(zswap_pools);
 212/* protects zswap_pools list modification */
 213static DEFINE_SPINLOCK(zswap_pools_lock);
 214/* pool counter to provide unique names to zpool */
 215static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 216
 217/* used by param callback function */
 218static bool zswap_init_started;
 219
 220/* fatal error during init */
 221static bool zswap_init_failed;
 222
 223/* init completed, but couldn't create the initial pool */
 224static bool zswap_has_pool;
 225
 226/*********************************
 227* helpers and fwd declarations
 228**********************************/
 229
 230#define zswap_pool_debug(msg, p)				\
 231	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
 232		 zpool_get_type((p)->zpool))
 233
 234static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
 235static int zswap_pool_get(struct zswap_pool *pool);
 236static void zswap_pool_put(struct zswap_pool *pool);
 237
 238static const struct zpool_ops zswap_zpool_ops = {
 239	.evict = zswap_writeback_entry
 240};
 241
 242static bool zswap_is_full(void)
 243{
 244	return totalram_pages() * zswap_max_pool_percent / 100 <
 245			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 246}
 247
 248static bool zswap_can_accept(void)
 249{
 250	return totalram_pages() * zswap_accept_thr_percent / 100 *
 251				zswap_max_pool_percent / 100 >
 252			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 253}
 254
 255static void zswap_update_total_size(void)
 256{
 257	struct zswap_pool *pool;
 258	u64 total = 0;
 259
 260	rcu_read_lock();
 261
 262	list_for_each_entry_rcu(pool, &zswap_pools, list)
 263		total += zpool_get_total_size(pool->zpool);
 264
 265	rcu_read_unlock();
 266
 267	zswap_pool_total_size = total;
 268}
 269
 270/*********************************
 271* zswap entry functions
 272**********************************/
 273static struct kmem_cache *zswap_entry_cache;
 274
 275static int __init zswap_entry_cache_create(void)
 276{
 277	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
 278	return zswap_entry_cache == NULL;
 279}
 280
 281static void __init zswap_entry_cache_destroy(void)
 282{
 283	kmem_cache_destroy(zswap_entry_cache);
 284}
 285
 286static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
 287{
 288	struct zswap_entry *entry;
 289	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
 290	if (!entry)
 291		return NULL;
 292	entry->refcount = 1;
 293	RB_CLEAR_NODE(&entry->rbnode);
 294	return entry;
 295}
 296
 297static void zswap_entry_cache_free(struct zswap_entry *entry)
 298{
 299	kmem_cache_free(zswap_entry_cache, entry);
 300}
 301
 302/*********************************
 303* rbtree functions
 304**********************************/
 305static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
 306{
 307	struct rb_node *node = root->rb_node;
 308	struct zswap_entry *entry;
 309
 310	while (node) {
 311		entry = rb_entry(node, struct zswap_entry, rbnode);
 312		if (entry->offset > offset)
 313			node = node->rb_left;
 314		else if (entry->offset < offset)
 315			node = node->rb_right;
 316		else
 317			return entry;
 318	}
 319	return NULL;
 320}
 321
 322/*
 323 * In the case that a entry with the same offset is found, a pointer to
 324 * the existing entry is stored in dupentry and the function returns -EEXIST
 325 */
 326static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
 327			struct zswap_entry **dupentry)
 328{
 329	struct rb_node **link = &root->rb_node, *parent = NULL;
 330	struct zswap_entry *myentry;
 331
 332	while (*link) {
 333		parent = *link;
 334		myentry = rb_entry(parent, struct zswap_entry, rbnode);
 335		if (myentry->offset > entry->offset)
 336			link = &(*link)->rb_left;
 337		else if (myentry->offset < entry->offset)
 338			link = &(*link)->rb_right;
 339		else {
 340			*dupentry = myentry;
 341			return -EEXIST;
 342		}
 343	}
 344	rb_link_node(&entry->rbnode, parent, link);
 345	rb_insert_color(&entry->rbnode, root);
 346	return 0;
 347}
 348
 349static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
 350{
 351	if (!RB_EMPTY_NODE(&entry->rbnode)) {
 352		rb_erase(&entry->rbnode, root);
 353		RB_CLEAR_NODE(&entry->rbnode);
 354	}
 355}
 356
 357/*
 358 * Carries out the common pattern of freeing and entry's zpool allocation,
 359 * freeing the entry itself, and decrementing the number of stored pages.
 360 */
 361static void zswap_free_entry(struct zswap_entry *entry)
 362{
 363	if (entry->objcg) {
 364		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
 365		obj_cgroup_put(entry->objcg);
 366	}
 367	if (!entry->length)
 368		atomic_dec(&zswap_same_filled_pages);
 369	else {
 370		zpool_free(entry->pool->zpool, entry->handle);
 371		zswap_pool_put(entry->pool);
 372	}
 373	zswap_entry_cache_free(entry);
 374	atomic_dec(&zswap_stored_pages);
 375	zswap_update_total_size();
 376}
 377
 378/* caller must hold the tree lock */
 379static void zswap_entry_get(struct zswap_entry *entry)
 380{
 381	entry->refcount++;
 382}
 383
 384/* caller must hold the tree lock
 385* remove from the tree and free it, if nobody reference the entry
 386*/
 387static void zswap_entry_put(struct zswap_tree *tree,
 388			struct zswap_entry *entry)
 389{
 390	int refcount = --entry->refcount;
 391
 392	BUG_ON(refcount < 0);
 393	if (refcount == 0) {
 394		zswap_rb_erase(&tree->rbroot, entry);
 395		zswap_free_entry(entry);
 396	}
 397}
 398
 399/* caller must hold the tree lock */
 400static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
 401				pgoff_t offset)
 402{
 403	struct zswap_entry *entry;
 404
 405	entry = zswap_rb_search(root, offset);
 406	if (entry)
 407		zswap_entry_get(entry);
 408
 409	return entry;
 410}
 411
 412/*********************************
 413* per-cpu code
 414**********************************/
 415static DEFINE_PER_CPU(u8 *, zswap_dstmem);
 416/*
 417 * If users dynamically change the zpool type and compressor at runtime, i.e.
 418 * zswap is running, zswap can have more than one zpool on one cpu, but they
 419 * are sharing dtsmem. So we need this mutex to be per-cpu.
 420 */
 421static DEFINE_PER_CPU(struct mutex *, zswap_mutex);
 422
 423static int zswap_dstmem_prepare(unsigned int cpu)
 424{
 425	struct mutex *mutex;
 426	u8 *dst;
 427
 428	dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
 429	if (!dst)
 430		return -ENOMEM;
 431
 432	mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu));
 433	if (!mutex) {
 
 
 
 
 
 
 434		kfree(dst);
 435		return -ENOMEM;
 
 
 
 436	}
 437
 438	mutex_init(mutex);
 439	per_cpu(zswap_dstmem, cpu) = dst;
 440	per_cpu(zswap_mutex, cpu) = mutex;
 441	return 0;
 442}
 443
 444static int zswap_dstmem_dead(unsigned int cpu)
 
 445{
 446	struct mutex *mutex;
 447	u8 *dst;
 448
 449	mutex = per_cpu(zswap_mutex, cpu);
 450	kfree(mutex);
 451	per_cpu(zswap_mutex, cpu) = NULL;
 452
 453	dst = per_cpu(zswap_dstmem, cpu);
 454	kfree(dst);
 455	per_cpu(zswap_dstmem, cpu) = NULL;
 456
 
 
 
 
 
 
 
 457	return 0;
 458}
 459
 460static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
 461{
 462	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 463	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 464	struct crypto_acomp *acomp;
 465	struct acomp_req *req;
 466
 467	acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
 468	if (IS_ERR(acomp)) {
 469		pr_err("could not alloc crypto acomp %s : %ld\n",
 470				pool->tfm_name, PTR_ERR(acomp));
 471		return PTR_ERR(acomp);
 472	}
 473	acomp_ctx->acomp = acomp;
 474
 475	req = acomp_request_alloc(acomp_ctx->acomp);
 476	if (!req) {
 477		pr_err("could not alloc crypto acomp_request %s\n",
 478		       pool->tfm_name);
 479		crypto_free_acomp(acomp_ctx->acomp);
 480		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 481	}
 482	acomp_ctx->req = req;
 483
 484	crypto_init_wait(&acomp_ctx->wait);
 485	/*
 486	 * if the backend of acomp is async zip, crypto_req_done() will wakeup
 487	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
 488	 * won't be called, crypto_wait_req() will return without blocking.
 489	 */
 490	acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 491				   crypto_req_done, &acomp_ctx->wait);
 492
 493	acomp_ctx->mutex = per_cpu(zswap_mutex, cpu);
 494	acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu);
 
 
 
 495
 496	return 0;
 497}
 498
 499static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
 500{
 501	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 502	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 503
 504	if (!IS_ERR_OR_NULL(acomp_ctx)) {
 505		if (!IS_ERR_OR_NULL(acomp_ctx->req))
 506			acomp_request_free(acomp_ctx->req);
 507		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
 508			crypto_free_acomp(acomp_ctx->acomp);
 509	}
 510
 
 
 
 
 
 
 
 511	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 512}
 513
 514/*********************************
 515* pool functions
 516**********************************/
 517
 518static struct zswap_pool *__zswap_pool_current(void)
 519{
 520	struct zswap_pool *pool;
 521
 522	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
 523	WARN_ONCE(!pool && zswap_has_pool,
 524		  "%s: no page storage pool!\n", __func__);
 525
 526	return pool;
 527}
 528
 529static struct zswap_pool *zswap_pool_current(void)
 530{
 531	assert_spin_locked(&zswap_pools_lock);
 532
 533	return __zswap_pool_current();
 534}
 535
 536static struct zswap_pool *zswap_pool_current_get(void)
 537{
 538	struct zswap_pool *pool;
 539
 540	rcu_read_lock();
 541
 542	pool = __zswap_pool_current();
 543	if (!zswap_pool_get(pool))
 544		pool = NULL;
 545
 546	rcu_read_unlock();
 547
 548	return pool;
 549}
 550
 551static struct zswap_pool *zswap_pool_last_get(void)
 552{
 553	struct zswap_pool *pool, *last = NULL;
 554
 555	rcu_read_lock();
 556
 557	list_for_each_entry_rcu(pool, &zswap_pools, list)
 558		last = pool;
 559	WARN_ONCE(!last && zswap_has_pool,
 560		  "%s: no page storage pool!\n", __func__);
 561	if (!zswap_pool_get(last))
 562		last = NULL;
 563
 564	rcu_read_unlock();
 565
 566	return last;
 567}
 568
 569/* type and compressor must be null-terminated */
 570static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 571{
 572	struct zswap_pool *pool;
 573
 574	assert_spin_locked(&zswap_pools_lock);
 575
 576	list_for_each_entry_rcu(pool, &zswap_pools, list) {
 577		if (strcmp(pool->tfm_name, compressor))
 578			continue;
 579		if (strcmp(zpool_get_type(pool->zpool), type))
 580			continue;
 581		/* if we can't get it, it's about to be destroyed */
 582		if (!zswap_pool_get(pool))
 583			continue;
 584		return pool;
 585	}
 586
 587	return NULL;
 588}
 589
 590static void shrink_worker(struct work_struct *w)
 591{
 592	struct zswap_pool *pool = container_of(w, typeof(*pool),
 593						shrink_work);
 594
 595	if (zpool_shrink(pool->zpool, 1, NULL))
 596		zswap_reject_reclaim_fail++;
 597	zswap_pool_put(pool);
 598}
 599
 600static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 601{
 602	struct zswap_pool *pool;
 603	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
 604	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 605	int ret;
 606
 607	if (!zswap_has_pool) {
 608		/* if either are unset, pool initialization failed, and we
 609		 * need both params to be set correctly before trying to
 610		 * create a pool.
 611		 */
 612		if (!strcmp(type, ZSWAP_PARAM_UNSET))
 613			return NULL;
 614		if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
 615			return NULL;
 616	}
 617
 618	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 619	if (!pool)
 
 620		return NULL;
 
 621
 622	/* unique name for each pool specifically required by zsmalloc */
 623	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
 624
 625	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
 626	if (!pool->zpool) {
 627		pr_err("%s zpool not available\n", type);
 628		goto error;
 629	}
 630	pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
 631
 632	strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
 633
 634	pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
 635	if (!pool->acomp_ctx) {
 636		pr_err("percpu alloc failed\n");
 637		goto error;
 638	}
 639
 640	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
 641				       &pool->node);
 642	if (ret)
 643		goto error;
 644	pr_debug("using %s compressor\n", pool->tfm_name);
 645
 646	/* being the current pool takes 1 ref; this func expects the
 647	 * caller to always add the new pool as the current pool
 648	 */
 649	kref_init(&pool->kref);
 650	INIT_LIST_HEAD(&pool->list);
 651	INIT_WORK(&pool->shrink_work, shrink_worker);
 652
 653	zswap_pool_debug("created", pool);
 654
 655	return pool;
 656
 657error:
 658	if (pool->acomp_ctx)
 659		free_percpu(pool->acomp_ctx);
 660	if (pool->zpool)
 661		zpool_destroy_pool(pool->zpool);
 662	kfree(pool);
 663	return NULL;
 664}
 665
 666static __init struct zswap_pool *__zswap_pool_create_fallback(void)
 667{
 668	bool has_comp, has_zpool;
 669
 670	has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
 671	if (!has_comp && strcmp(zswap_compressor,
 672				CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
 
 673		pr_err("compressor %s not available, using default %s\n",
 674		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
 675		param_free_charp(&zswap_compressor);
 676		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
 677		has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
 678	}
 679	if (!has_comp) {
 680		pr_err("default compressor %s not available\n",
 681		       zswap_compressor);
 682		param_free_charp(&zswap_compressor);
 683		zswap_compressor = ZSWAP_PARAM_UNSET;
 684	}
 685
 686	has_zpool = zpool_has_pool(zswap_zpool_type);
 687	if (!has_zpool && strcmp(zswap_zpool_type,
 688				 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
 689		pr_err("zpool %s not available, using default %s\n",
 690		       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
 691		param_free_charp(&zswap_zpool_type);
 692		zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
 693		has_zpool = zpool_has_pool(zswap_zpool_type);
 694	}
 695	if (!has_zpool) {
 696		pr_err("default zpool %s not available\n",
 697		       zswap_zpool_type);
 698		param_free_charp(&zswap_zpool_type);
 699		zswap_zpool_type = ZSWAP_PARAM_UNSET;
 700	}
 701
 702	if (!has_comp || !has_zpool)
 703		return NULL;
 704
 705	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
 706}
 707
 708static void zswap_pool_destroy(struct zswap_pool *pool)
 709{
 710	zswap_pool_debug("destroying", pool);
 711
 712	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
 713	free_percpu(pool->acomp_ctx);
 714	zpool_destroy_pool(pool->zpool);
 715	kfree(pool);
 716}
 717
 718static int __must_check zswap_pool_get(struct zswap_pool *pool)
 719{
 720	if (!pool)
 721		return 0;
 722
 723	return kref_get_unless_zero(&pool->kref);
 724}
 725
 726static void __zswap_pool_release(struct work_struct *work)
 727{
 728	struct zswap_pool *pool = container_of(work, typeof(*pool),
 729						release_work);
 730
 731	synchronize_rcu();
 732
 733	/* nobody should have been able to get a kref... */
 734	WARN_ON(kref_get_unless_zero(&pool->kref));
 735
 736	/* pool is now off zswap_pools list and has no references. */
 737	zswap_pool_destroy(pool);
 738}
 739
 740static void __zswap_pool_empty(struct kref *kref)
 741{
 742	struct zswap_pool *pool;
 743
 744	pool = container_of(kref, typeof(*pool), kref);
 745
 746	spin_lock(&zswap_pools_lock);
 747
 748	WARN_ON(pool == zswap_pool_current());
 749
 750	list_del_rcu(&pool->list);
 751
 752	INIT_WORK(&pool->release_work, __zswap_pool_release);
 753	schedule_work(&pool->release_work);
 754
 755	spin_unlock(&zswap_pools_lock);
 756}
 757
 758static void zswap_pool_put(struct zswap_pool *pool)
 759{
 760	kref_put(&pool->kref, __zswap_pool_empty);
 761}
 762
 763/*********************************
 764* param callbacks
 765**********************************/
 766
 767/* val must be a null-terminated string */
 768static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 769			     char *type, char *compressor)
 770{
 771	struct zswap_pool *pool, *put_pool = NULL;
 772	char *s = strstrip((char *)val);
 773	int ret;
 774
 775	if (zswap_init_failed) {
 776		pr_err("can't set param, initialization failed\n");
 777		return -ENODEV;
 778	}
 779
 780	/* no change required */
 781	if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
 782		return 0;
 783
 784	/* if this is load-time (pre-init) param setting,
 785	 * don't create a pool; that's done during init.
 786	 */
 787	if (!zswap_init_started)
 788		return param_set_charp(s, kp);
 789
 790	if (!type) {
 791		if (!zpool_has_pool(s)) {
 792			pr_err("zpool %s not available\n", s);
 793			return -ENOENT;
 794		}
 795		type = s;
 796	} else if (!compressor) {
 797		if (!crypto_has_acomp(s, 0, 0)) {
 798			pr_err("compressor %s not available\n", s);
 799			return -ENOENT;
 800		}
 801		compressor = s;
 802	} else {
 803		WARN_ON(1);
 804		return -EINVAL;
 805	}
 806
 807	spin_lock(&zswap_pools_lock);
 808
 809	pool = zswap_pool_find_get(type, compressor);
 810	if (pool) {
 811		zswap_pool_debug("using existing", pool);
 812		WARN_ON(pool == zswap_pool_current());
 813		list_del_rcu(&pool->list);
 814	}
 815
 816	spin_unlock(&zswap_pools_lock);
 817
 818	if (!pool)
 819		pool = zswap_pool_create(type, compressor);
 
 
 820
 821	if (pool)
 822		ret = param_set_charp(s, kp);
 823	else
 824		ret = -EINVAL;
 825
 826	spin_lock(&zswap_pools_lock);
 827
 828	if (!ret) {
 829		put_pool = zswap_pool_current();
 830		list_add_rcu(&pool->list, &zswap_pools);
 831		zswap_has_pool = true;
 832	} else if (pool) {
 833		/* add the possibly pre-existing pool to the end of the pools
 834		 * list; if it's new (and empty) then it'll be removed and
 835		 * destroyed by the put after we drop the lock
 836		 */
 837		list_add_tail_rcu(&pool->list, &zswap_pools);
 838		put_pool = pool;
 839	}
 840
 841	spin_unlock(&zswap_pools_lock);
 842
 843	if (!zswap_has_pool && !pool) {
 844		/* if initial pool creation failed, and this pool creation also
 845		 * failed, maybe both compressor and zpool params were bad.
 846		 * Allow changing this param, so pool creation will succeed
 847		 * when the other param is changed. We already verified this
 848		 * param is ok in the zpool_has_pool() or crypto_has_acomp()
 849		 * checks above.
 850		 */
 851		ret = param_set_charp(s, kp);
 852	}
 853
 854	/* drop the ref from either the old current pool,
 855	 * or the new pool we failed to add
 856	 */
 857	if (put_pool)
 858		zswap_pool_put(put_pool);
 859
 860	return ret;
 861}
 862
 863static int zswap_compressor_param_set(const char *val,
 864				      const struct kernel_param *kp)
 865{
 866	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
 867}
 868
 869static int zswap_zpool_param_set(const char *val,
 870				 const struct kernel_param *kp)
 871{
 872	return __zswap_param_set(val, kp, NULL, zswap_compressor);
 873}
 874
 875static int zswap_enabled_param_set(const char *val,
 876				   const struct kernel_param *kp)
 877{
 878	if (zswap_init_failed) {
 879		pr_err("can't enable, initialization failed\n");
 880		return -ENODEV;
 881	}
 882	if (!zswap_has_pool && zswap_init_started) {
 883		pr_err("can't enable, no pool configured\n");
 884		return -ENODEV;
 885	}
 886
 887	return param_set_bool(val, kp);
 888}
 889
 890/*********************************
 891* writeback code
 892**********************************/
 893/* return enum for zswap_get_swap_cache_page */
 894enum zswap_get_swap_ret {
 895	ZSWAP_SWAPCACHE_NEW,
 896	ZSWAP_SWAPCACHE_EXIST,
 897	ZSWAP_SWAPCACHE_FAIL,
 898};
 899
 900/*
 901 * zswap_get_swap_cache_page
 902 *
 903 * This is an adaption of read_swap_cache_async()
 904 *
 905 * This function tries to find a page with the given swap entry
 906 * in the swapper_space address space (the swap cache).  If the page
 907 * is found, it is returned in retpage.  Otherwise, a page is allocated,
 908 * added to the swap cache, and returned in retpage.
 909 *
 910 * If success, the swap cache page is returned in retpage
 911 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
 912 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
 913 *     the new page is added to swapcache and locked
 914 * Returns ZSWAP_SWAPCACHE_FAIL on error
 915 */
 916static int zswap_get_swap_cache_page(swp_entry_t entry,
 917				struct page **retpage)
 918{
 919	bool page_was_allocated;
 920
 921	*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
 922			NULL, 0, &page_was_allocated);
 923	if (page_was_allocated)
 924		return ZSWAP_SWAPCACHE_NEW;
 925	if (!*retpage)
 926		return ZSWAP_SWAPCACHE_FAIL;
 927	return ZSWAP_SWAPCACHE_EXIST;
 928}
 929
 930/*
 931 * Attempts to free an entry by adding a page to the swap cache,
 932 * decompressing the entry data into the page, and issuing a
 933 * bio write to write the page back to the swap device.
 934 *
 935 * This can be thought of as a "resumed writeback" of the page
 936 * to the swap device.  We are basically resuming the same swap
 937 * writeback path that was intercepted with the frontswap_store()
 938 * in the first place.  After the page has been decompressed into
 939 * the swap cache, the compressed version stored by zswap can be
 940 * freed.
 941 */
 942static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
 943{
 944	struct zswap_header *zhdr;
 945	swp_entry_t swpentry;
 946	struct zswap_tree *tree;
 947	pgoff_t offset;
 948	struct zswap_entry *entry;
 949	struct page *page;
 950	struct scatterlist input, output;
 951	struct crypto_acomp_ctx *acomp_ctx;
 952
 953	u8 *src, *tmp = NULL;
 954	unsigned int dlen;
 955	int ret;
 956	struct writeback_control wbc = {
 957		.sync_mode = WB_SYNC_NONE,
 958	};
 959
 960	if (!zpool_can_sleep_mapped(pool)) {
 961		tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
 962		if (!tmp)
 963			return -ENOMEM;
 964	}
 965
 966	/* extract swpentry from data */
 967	zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
 968	swpentry = zhdr->swpentry; /* here */
 
 969	tree = zswap_trees[swp_type(swpentry)];
 970	offset = swp_offset(swpentry);
 971	zpool_unmap_handle(pool, handle);
 972
 973	/* find and ref zswap entry */
 974	spin_lock(&tree->lock);
 975	entry = zswap_entry_find_get(&tree->rbroot, offset);
 976	if (!entry) {
 977		/* entry was invalidated */
 978		spin_unlock(&tree->lock);
 979		kfree(tmp);
 980		return 0;
 981	}
 982	spin_unlock(&tree->lock);
 983	BUG_ON(offset != entry->offset);
 984
 985	/* try to allocate swap cache page */
 986	switch (zswap_get_swap_cache_page(swpentry, &page)) {
 987	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
 988		ret = -ENOMEM;
 989		goto fail;
 990
 991	case ZSWAP_SWAPCACHE_EXIST:
 992		/* page is already in the swap cache, ignore for now */
 993		put_page(page);
 994		ret = -EEXIST;
 995		goto fail;
 996
 997	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
 998		/* decompress */
 999		acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1000		dlen = PAGE_SIZE;
1001
1002		zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
1003		src = (u8 *)zhdr + sizeof(struct zswap_header);
1004		if (!zpool_can_sleep_mapped(pool)) {
1005			memcpy(tmp, src, entry->length);
1006			src = tmp;
1007			zpool_unmap_handle(pool, handle);
1008		}
1009
1010		mutex_lock(acomp_ctx->mutex);
1011		sg_init_one(&input, src, entry->length);
1012		sg_init_table(&output, 1);
1013		sg_set_page(&output, page, PAGE_SIZE, 0);
1014		acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1015		ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1016		dlen = acomp_ctx->req->dlen;
1017		mutex_unlock(acomp_ctx->mutex);
1018
1019		if (!zpool_can_sleep_mapped(pool))
1020			kfree(tmp);
1021		else
1022			zpool_unmap_handle(pool, handle);
1023
1024		BUG_ON(ret);
1025		BUG_ON(dlen != PAGE_SIZE);
1026
1027		/* page is up to date */
1028		SetPageUptodate(page);
1029	}
1030
1031	/* move it to the tail of the inactive list after end_writeback */
1032	SetPageReclaim(page);
1033
1034	/* start writeback */
1035	__swap_writepage(page, &wbc);
1036	put_page(page);
1037	zswap_written_back_pages++;
1038
1039	spin_lock(&tree->lock);
1040	/* drop local reference */
1041	zswap_entry_put(tree, entry);
1042
1043	/*
1044	* There are two possible situations for entry here:
1045	* (1) refcount is 1(normal case),  entry is valid and on the tree
1046	* (2) refcount is 0, entry is freed and not on the tree
1047	*     because invalidate happened during writeback
1048	*  search the tree and free the entry if find entry
1049	*/
1050	if (entry == zswap_rb_search(&tree->rbroot, offset))
1051		zswap_entry_put(tree, entry);
1052	spin_unlock(&tree->lock);
1053
1054	return ret;
1055
1056fail:
1057	if (!zpool_can_sleep_mapped(pool))
1058		kfree(tmp);
1059
1060	/*
1061	* if we get here due to ZSWAP_SWAPCACHE_EXIST
1062	* a load may be happening concurrently.
1063	* it is safe and okay to not free the entry.
1064	* if we free the entry in the following put
1065	* it is also okay to return !0
1066	*/
 
1067	spin_lock(&tree->lock);
1068	zswap_entry_put(tree, entry);
1069	spin_unlock(&tree->lock);
1070
 
1071	return ret;
1072}
1073
1074static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1075{
1076	unsigned int pos;
1077	unsigned long *page;
1078
1079	page = (unsigned long *)ptr;
1080	for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
1081		if (page[pos] != page[0])
1082			return 0;
1083	}
1084	*value = page[0];
1085	return 1;
1086}
1087
1088static void zswap_fill_page(void *ptr, unsigned long value)
1089{
1090	unsigned long *page;
1091
1092	page = (unsigned long *)ptr;
1093	memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1094}
1095
1096/*********************************
1097* frontswap hooks
1098**********************************/
1099/* attempts to compress and store an single page */
1100static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1101				struct page *page)
1102{
1103	struct zswap_tree *tree = zswap_trees[type];
1104	struct zswap_entry *entry, *dupentry;
1105	struct scatterlist input, output;
1106	struct crypto_acomp_ctx *acomp_ctx;
1107	struct obj_cgroup *objcg = NULL;
1108	struct zswap_pool *pool;
1109	int ret;
1110	unsigned int hlen, dlen = PAGE_SIZE;
1111	unsigned long handle, value;
1112	char *buf;
1113	u8 *src, *dst;
1114	struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
1115	gfp_t gfp;
1116
1117	/* THP isn't supported */
1118	if (PageTransHuge(page)) {
1119		ret = -EINVAL;
1120		goto reject;
1121	}
1122
1123	if (!zswap_enabled || !tree) {
1124		ret = -ENODEV;
1125		goto reject;
1126	}
1127
1128	objcg = get_obj_cgroup_from_page(page);
1129	if (objcg && !obj_cgroup_may_zswap(objcg))
1130		goto shrink;
1131
1132	/* reclaim space if needed */
1133	if (zswap_is_full()) {
1134		zswap_pool_limit_hit++;
1135		zswap_pool_reached_full = true;
1136		goto shrink;
1137	}
1138
1139	if (zswap_pool_reached_full) {
1140	       if (!zswap_can_accept()) {
1141			ret = -ENOMEM;
1142			goto reject;
1143		} else
1144			zswap_pool_reached_full = false;
1145	}
1146
1147	/* allocate entry */
1148	entry = zswap_entry_cache_alloc(GFP_KERNEL);
1149	if (!entry) {
1150		zswap_reject_kmemcache_fail++;
1151		ret = -ENOMEM;
1152		goto reject;
1153	}
1154
1155	if (zswap_same_filled_pages_enabled) {
1156		src = kmap_atomic(page);
1157		if (zswap_is_page_same_filled(src, &value)) {
1158			kunmap_atomic(src);
1159			entry->offset = offset;
1160			entry->length = 0;
1161			entry->value = value;
1162			atomic_inc(&zswap_same_filled_pages);
1163			goto insert_entry;
1164		}
1165		kunmap_atomic(src);
1166	}
1167
1168	if (!zswap_non_same_filled_pages_enabled) {
1169		ret = -EINVAL;
1170		goto freepage;
1171	}
1172
1173	/* if entry is successfully added, it keeps the reference */
1174	entry->pool = zswap_pool_current_get();
1175	if (!entry->pool) {
1176		ret = -EINVAL;
1177		goto freepage;
1178	}
1179
1180	/* compress */
1181	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1182
1183	mutex_lock(acomp_ctx->mutex);
1184
1185	dst = acomp_ctx->dstmem;
1186	sg_init_table(&input, 1);
1187	sg_set_page(&input, page, PAGE_SIZE, 0);
1188
1189	/* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
1190	sg_init_one(&output, dst, PAGE_SIZE * 2);
1191	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1192	/*
1193	 * it maybe looks a little bit silly that we send an asynchronous request,
1194	 * then wait for its completion synchronously. This makes the process look
1195	 * synchronous in fact.
1196	 * Theoretically, acomp supports users send multiple acomp requests in one
1197	 * acomp instance, then get those requests done simultaneously. but in this
1198	 * case, frontswap actually does store and load page by page, there is no
1199	 * existing method to send the second page before the first page is done
1200	 * in one thread doing frontswap.
1201	 * but in different threads running on different cpu, we have different
1202	 * acomp instance, so multiple threads can do (de)compression in parallel.
1203	 */
1204	ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1205	dlen = acomp_ctx->req->dlen;
1206
1207	if (ret) {
1208		ret = -EINVAL;
1209		goto put_dstmem;
1210	}
1211
1212	/* store */
1213	hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
1214	gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1215	if (zpool_malloc_support_movable(entry->pool->zpool))
1216		gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1217	ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
1218	if (ret == -ENOSPC) {
1219		zswap_reject_compress_poor++;
1220		goto put_dstmem;
1221	}
1222	if (ret) {
1223		zswap_reject_alloc_fail++;
1224		goto put_dstmem;
1225	}
1226	buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO);
1227	memcpy(buf, &zhdr, hlen);
1228	memcpy(buf + hlen, dst, dlen);
 
1229	zpool_unmap_handle(entry->pool->zpool, handle);
1230	mutex_unlock(acomp_ctx->mutex);
1231
1232	/* populate entry */
1233	entry->offset = offset;
1234	entry->handle = handle;
1235	entry->length = dlen;
1236
1237insert_entry:
1238	entry->objcg = objcg;
1239	if (objcg) {
1240		obj_cgroup_charge_zswap(objcg, entry->length);
1241		/* Account before objcg ref is moved to tree */
1242		count_objcg_event(objcg, ZSWPOUT);
1243	}
1244
1245	/* map */
1246	spin_lock(&tree->lock);
1247	do {
1248		ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1249		if (ret == -EEXIST) {
1250			zswap_duplicate_entry++;
1251			/* remove from rbtree */
1252			zswap_rb_erase(&tree->rbroot, dupentry);
1253			zswap_entry_put(tree, dupentry);
1254		}
1255	} while (ret == -EEXIST);
1256	spin_unlock(&tree->lock);
1257
1258	/* update stats */
1259	atomic_inc(&zswap_stored_pages);
1260	zswap_update_total_size();
1261	count_vm_event(ZSWPOUT);
1262
1263	return 0;
1264
1265put_dstmem:
1266	mutex_unlock(acomp_ctx->mutex);
1267	zswap_pool_put(entry->pool);
1268freepage:
1269	zswap_entry_cache_free(entry);
1270reject:
1271	if (objcg)
1272		obj_cgroup_put(objcg);
1273	return ret;
1274
1275shrink:
1276	pool = zswap_pool_last_get();
1277	if (pool)
1278		queue_work(shrink_wq, &pool->shrink_work);
1279	ret = -ENOMEM;
1280	goto reject;
1281}
1282
1283/*
1284 * returns 0 if the page was successfully decompressed
1285 * return -1 on entry not found or error
1286*/
1287static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1288				struct page *page)
1289{
1290	struct zswap_tree *tree = zswap_trees[type];
1291	struct zswap_entry *entry;
1292	struct scatterlist input, output;
1293	struct crypto_acomp_ctx *acomp_ctx;
1294	u8 *src, *dst, *tmp;
1295	unsigned int dlen;
1296	int ret;
1297
1298	/* find */
1299	spin_lock(&tree->lock);
1300	entry = zswap_entry_find_get(&tree->rbroot, offset);
1301	if (!entry) {
1302		/* entry was written back */
1303		spin_unlock(&tree->lock);
1304		return -1;
1305	}
1306	spin_unlock(&tree->lock);
1307
1308	if (!entry->length) {
1309		dst = kmap_atomic(page);
1310		zswap_fill_page(dst, entry->value);
1311		kunmap_atomic(dst);
1312		ret = 0;
1313		goto stats;
1314	}
1315
1316	if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
1317		tmp = kmalloc(entry->length, GFP_KERNEL);
1318		if (!tmp) {
1319			ret = -ENOMEM;
1320			goto freeentry;
1321		}
1322	}
1323
1324	/* decompress */
1325	dlen = PAGE_SIZE;
1326	src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1327	if (zpool_evictable(entry->pool->zpool))
1328		src += sizeof(struct zswap_header);
1329
1330	if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
1331		memcpy(tmp, src, entry->length);
1332		src = tmp;
1333		zpool_unmap_handle(entry->pool->zpool, entry->handle);
1334	}
1335
1336	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1337	mutex_lock(acomp_ctx->mutex);
1338	sg_init_one(&input, src, entry->length);
1339	sg_init_table(&output, 1);
1340	sg_set_page(&output, page, PAGE_SIZE, 0);
1341	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1342	ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1343	mutex_unlock(acomp_ctx->mutex);
1344
1345	if (zpool_can_sleep_mapped(entry->pool->zpool))
1346		zpool_unmap_handle(entry->pool->zpool, entry->handle);
1347	else
1348		kfree(tmp);
1349
1350	BUG_ON(ret);
1351stats:
1352	count_vm_event(ZSWPIN);
1353	if (entry->objcg)
1354		count_objcg_event(entry->objcg, ZSWPIN);
1355freeentry:
1356	spin_lock(&tree->lock);
1357	zswap_entry_put(tree, entry);
1358	spin_unlock(&tree->lock);
1359
1360	return ret;
1361}
1362
1363/* frees an entry in zswap */
1364static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1365{
1366	struct zswap_tree *tree = zswap_trees[type];
1367	struct zswap_entry *entry;
1368
1369	/* find */
1370	spin_lock(&tree->lock);
1371	entry = zswap_rb_search(&tree->rbroot, offset);
1372	if (!entry) {
1373		/* entry was written back */
1374		spin_unlock(&tree->lock);
1375		return;
1376	}
1377
1378	/* remove from rbtree */
1379	zswap_rb_erase(&tree->rbroot, entry);
1380
1381	/* drop the initial reference from entry creation */
1382	zswap_entry_put(tree, entry);
1383
1384	spin_unlock(&tree->lock);
1385}
1386
1387/* frees all zswap entries for the given swap type */
1388static void zswap_frontswap_invalidate_area(unsigned type)
1389{
1390	struct zswap_tree *tree = zswap_trees[type];
1391	struct zswap_entry *entry, *n;
1392
1393	if (!tree)
1394		return;
1395
1396	/* walk the tree and free everything */
1397	spin_lock(&tree->lock);
1398	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1399		zswap_free_entry(entry);
1400	tree->rbroot = RB_ROOT;
1401	spin_unlock(&tree->lock);
1402	kfree(tree);
1403	zswap_trees[type] = NULL;
1404}
1405
1406static void zswap_frontswap_init(unsigned type)
1407{
1408	struct zswap_tree *tree;
1409
1410	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1411	if (!tree) {
1412		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1413		return;
1414	}
1415
1416	tree->rbroot = RB_ROOT;
1417	spin_lock_init(&tree->lock);
1418	zswap_trees[type] = tree;
1419}
1420
1421static const struct frontswap_ops zswap_frontswap_ops = {
1422	.store = zswap_frontswap_store,
1423	.load = zswap_frontswap_load,
1424	.invalidate_page = zswap_frontswap_invalidate_page,
1425	.invalidate_area = zswap_frontswap_invalidate_area,
1426	.init = zswap_frontswap_init
1427};
1428
1429/*********************************
1430* debugfs functions
1431**********************************/
1432#ifdef CONFIG_DEBUG_FS
1433#include <linux/debugfs.h>
1434
1435static struct dentry *zswap_debugfs_root;
1436
1437static int __init zswap_debugfs_init(void)
1438{
1439	if (!debugfs_initialized())
1440		return -ENODEV;
1441
1442	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
 
 
1443
1444	debugfs_create_u64("pool_limit_hit", 0444,
1445			   zswap_debugfs_root, &zswap_pool_limit_hit);
1446	debugfs_create_u64("reject_reclaim_fail", 0444,
1447			   zswap_debugfs_root, &zswap_reject_reclaim_fail);
1448	debugfs_create_u64("reject_alloc_fail", 0444,
1449			   zswap_debugfs_root, &zswap_reject_alloc_fail);
1450	debugfs_create_u64("reject_kmemcache_fail", 0444,
1451			   zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1452	debugfs_create_u64("reject_compress_poor", 0444,
1453			   zswap_debugfs_root, &zswap_reject_compress_poor);
1454	debugfs_create_u64("written_back_pages", 0444,
1455			   zswap_debugfs_root, &zswap_written_back_pages);
1456	debugfs_create_u64("duplicate_entry", 0444,
1457			   zswap_debugfs_root, &zswap_duplicate_entry);
1458	debugfs_create_u64("pool_total_size", 0444,
1459			   zswap_debugfs_root, &zswap_pool_total_size);
1460	debugfs_create_atomic_t("stored_pages", 0444,
1461				zswap_debugfs_root, &zswap_stored_pages);
1462	debugfs_create_atomic_t("same_filled_pages", 0444,
1463				zswap_debugfs_root, &zswap_same_filled_pages);
1464
1465	return 0;
1466}
 
 
 
 
 
1467#else
1468static int __init zswap_debugfs_init(void)
1469{
1470	return 0;
1471}
 
 
1472#endif
1473
1474/*********************************
1475* module init and exit
1476**********************************/
1477static int __init init_zswap(void)
1478{
1479	struct zswap_pool *pool;
1480	int ret;
1481
1482	zswap_init_started = true;
1483
1484	if (zswap_entry_cache_create()) {
1485		pr_err("entry cache creation failed\n");
1486		goto cache_fail;
1487	}
1488
1489	ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1490				zswap_dstmem_prepare, zswap_dstmem_dead);
1491	if (ret) {
1492		pr_err("dstmem alloc failed\n");
1493		goto dstmem_fail;
1494	}
1495
1496	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1497				      "mm/zswap_pool:prepare",
1498				      zswap_cpu_comp_prepare,
1499				      zswap_cpu_comp_dead);
1500	if (ret)
1501		goto hp_fail;
1502
1503	pool = __zswap_pool_create_fallback();
1504	if (pool) {
1505		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1506			zpool_get_type(pool->zpool));
1507		list_add(&pool->list, &zswap_pools);
1508		zswap_has_pool = true;
1509	} else {
1510		pr_err("pool creation failed\n");
1511		zswap_enabled = false;
1512	}
 
 
1513
1514	shrink_wq = create_workqueue("zswap-shrink");
1515	if (!shrink_wq)
1516		goto fallback_fail;
1517
1518	ret = frontswap_register_ops(&zswap_frontswap_ops);
1519	if (ret)
1520		goto destroy_wq;
1521	if (zswap_debugfs_init())
1522		pr_warn("debugfs initialization failed\n");
1523	return 0;
1524
1525destroy_wq:
1526	destroy_workqueue(shrink_wq);
1527fallback_fail:
1528	if (pool)
1529		zswap_pool_destroy(pool);
1530hp_fail:
1531	cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1532dstmem_fail:
1533	zswap_entry_cache_destroy();
1534cache_fail:
1535	/* if built-in, we aren't unloaded on failure; don't allow use */
1536	zswap_init_failed = true;
1537	zswap_enabled = false;
1538	return -ENOMEM;
1539}
1540/* must be late so crypto has time to come up */
1541late_initcall(init_zswap);
1542
1543MODULE_LICENSE("GPL");
1544MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1545MODULE_DESCRIPTION("Compressed cache for swap pages");
v4.6
 
   1/*
   2 * zswap.c - zswap driver file
   3 *
   4 * zswap is a backend for frontswap that takes pages that are in the process
   5 * of being swapped out and attempts to compress and store them in a
   6 * RAM-based memory pool.  This can result in a significant I/O reduction on
   7 * the swap device and, in the case where decompressing from RAM is faster
   8 * than reading from the swap device, can also improve workload performance.
   9 *
  10 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU General Public License
  14 * as published by the Free Software Foundation; either version 2
  15 * of the License, or (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21*/
  22
  23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24
  25#include <linux/module.h>
  26#include <linux/cpu.h>
  27#include <linux/highmem.h>
  28#include <linux/slab.h>
  29#include <linux/spinlock.h>
  30#include <linux/types.h>
  31#include <linux/atomic.h>
  32#include <linux/frontswap.h>
  33#include <linux/rbtree.h>
  34#include <linux/swap.h>
  35#include <linux/crypto.h>
 
  36#include <linux/mempool.h>
  37#include <linux/zpool.h>
 
  38
  39#include <linux/mm_types.h>
  40#include <linux/page-flags.h>
  41#include <linux/swapops.h>
  42#include <linux/writeback.h>
  43#include <linux/pagemap.h>
 
 
 
  44
  45/*********************************
  46* statistics
  47**********************************/
  48/* Total bytes used by the compressed storage */
  49static u64 zswap_pool_total_size;
  50/* The number of compressed pages currently stored in zswap */
  51static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
 
 
  52
  53/*
  54 * The statistics below are not protected from concurrent access for
  55 * performance reasons so they may not be a 100% accurate.  However,
  56 * they do provide useful information on roughly how many times a
  57 * certain event is occurring.
  58*/
  59
  60/* Pool limit was hit (see zswap_max_pool_percent) */
  61static u64 zswap_pool_limit_hit;
  62/* Pages written back when pool limit was reached */
  63static u64 zswap_written_back_pages;
  64/* Store failed due to a reclaim failure after pool limit was reached */
  65static u64 zswap_reject_reclaim_fail;
  66/* Compressed page was too big for the allocator to (optimally) store */
  67static u64 zswap_reject_compress_poor;
  68/* Store failed because underlying allocator could not get memory */
  69static u64 zswap_reject_alloc_fail;
  70/* Store failed because the entry metadata could not be allocated (rare) */
  71static u64 zswap_reject_kmemcache_fail;
  72/* Duplicate store was encountered (rare) */
  73static u64 zswap_duplicate_entry;
  74
 
 
 
 
 
  75/*********************************
  76* tunables
  77**********************************/
  78
  79/* Enable/disable zswap (disabled by default) */
  80static bool zswap_enabled;
  81module_param_named(enabled, zswap_enabled, bool, 0644);
 
 
 
 
 
 
 
 
  82
  83/* Crypto compressor to use */
  84#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
  85static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
  86static int zswap_compressor_param_set(const char *,
  87				      const struct kernel_param *);
  88static struct kernel_param_ops zswap_compressor_param_ops = {
  89	.set =		zswap_compressor_param_set,
  90	.get =		param_get_charp,
  91	.free =		param_free_charp,
  92};
  93module_param_cb(compressor, &zswap_compressor_param_ops,
  94		&zswap_compressor, 0644);
  95
  96/* Compressed storage zpool to use */
  97#define ZSWAP_ZPOOL_DEFAULT "zbud"
  98static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
  99static int zswap_zpool_param_set(const char *, const struct kernel_param *);
 100static struct kernel_param_ops zswap_zpool_param_ops = {
 101	.set =		zswap_zpool_param_set,
 102	.get =		param_get_charp,
 103	.free =		param_free_charp,
 104};
 105module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
 106
 107/* The maximum percentage of memory that the compressed pool can occupy */
 108static unsigned int zswap_max_pool_percent = 20;
 109module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111/*********************************
 112* data structures
 113**********************************/
 114
 
 
 
 
 
 
 
 
 115struct zswap_pool {
 116	struct zpool *zpool;
 117	struct crypto_comp * __percpu *tfm;
 118	struct kref kref;
 119	struct list_head list;
 120	struct rcu_head rcu_head;
 121	struct notifier_block notifier;
 
 122	char tfm_name[CRYPTO_MAX_ALG_NAME];
 123};
 124
 125/*
 126 * struct zswap_entry
 127 *
 128 * This structure contains the metadata for tracking a single compressed
 129 * page within zswap.
 130 *
 131 * rbnode - links the entry into red-black tree for the appropriate swap type
 132 * offset - the swap offset for the entry.  Index into the red-black tree.
 133 * refcount - the number of outstanding reference to the entry. This is needed
 134 *            to protect against premature freeing of the entry by code
 135 *            concurrent calls to load, invalidate, and writeback.  The lock
 136 *            for the zswap_tree structure that contains the entry must
 137 *            be held while changing the refcount.  Since the lock must
 138 *            be held, there is no reason to also make refcount atomic.
 139 * length - the length in bytes of the compressed page data.  Needed during
 140 *          decompression
 141 * pool - the zswap_pool the entry's data is in
 142 * handle - zpool allocation handle that stores the compressed page data
 
 143 */
 144struct zswap_entry {
 145	struct rb_node rbnode;
 146	pgoff_t offset;
 147	int refcount;
 148	unsigned int length;
 149	struct zswap_pool *pool;
 150	unsigned long handle;
 
 
 
 
 151};
 152
 153struct zswap_header {
 154	swp_entry_t swpentry;
 155};
 156
 157/*
 158 * The tree lock in the zswap_tree struct protects a few things:
 159 * - the rbtree
 160 * - the refcount field of each entry in the tree
 161 */
 162struct zswap_tree {
 163	struct rb_root rbroot;
 164	spinlock_t lock;
 165};
 166
 167static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 168
 169/* RCU-protected iteration */
 170static LIST_HEAD(zswap_pools);
 171/* protects zswap_pools list modification */
 172static DEFINE_SPINLOCK(zswap_pools_lock);
 173/* pool counter to provide unique names to zpool */
 174static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 175
 176/* used by param callback function */
 177static bool zswap_init_started;
 178
 
 
 
 
 
 
 179/*********************************
 180* helpers and fwd declarations
 181**********************************/
 182
 183#define zswap_pool_debug(msg, p)				\
 184	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
 185		 zpool_get_type((p)->zpool))
 186
 187static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
 188static int zswap_pool_get(struct zswap_pool *pool);
 189static void zswap_pool_put(struct zswap_pool *pool);
 190
 191static const struct zpool_ops zswap_zpool_ops = {
 192	.evict = zswap_writeback_entry
 193};
 194
 195static bool zswap_is_full(void)
 196{
 197	return totalram_pages * zswap_max_pool_percent / 100 <
 198		DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 
 
 
 
 
 
 
 199}
 200
 201static void zswap_update_total_size(void)
 202{
 203	struct zswap_pool *pool;
 204	u64 total = 0;
 205
 206	rcu_read_lock();
 207
 208	list_for_each_entry_rcu(pool, &zswap_pools, list)
 209		total += zpool_get_total_size(pool->zpool);
 210
 211	rcu_read_unlock();
 212
 213	zswap_pool_total_size = total;
 214}
 215
 216/*********************************
 217* zswap entry functions
 218**********************************/
 219static struct kmem_cache *zswap_entry_cache;
 220
 221static int __init zswap_entry_cache_create(void)
 222{
 223	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
 224	return zswap_entry_cache == NULL;
 225}
 226
 227static void __init zswap_entry_cache_destroy(void)
 228{
 229	kmem_cache_destroy(zswap_entry_cache);
 230}
 231
 232static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
 233{
 234	struct zswap_entry *entry;
 235	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
 236	if (!entry)
 237		return NULL;
 238	entry->refcount = 1;
 239	RB_CLEAR_NODE(&entry->rbnode);
 240	return entry;
 241}
 242
 243static void zswap_entry_cache_free(struct zswap_entry *entry)
 244{
 245	kmem_cache_free(zswap_entry_cache, entry);
 246}
 247
 248/*********************************
 249* rbtree functions
 250**********************************/
 251static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
 252{
 253	struct rb_node *node = root->rb_node;
 254	struct zswap_entry *entry;
 255
 256	while (node) {
 257		entry = rb_entry(node, struct zswap_entry, rbnode);
 258		if (entry->offset > offset)
 259			node = node->rb_left;
 260		else if (entry->offset < offset)
 261			node = node->rb_right;
 262		else
 263			return entry;
 264	}
 265	return NULL;
 266}
 267
 268/*
 269 * In the case that a entry with the same offset is found, a pointer to
 270 * the existing entry is stored in dupentry and the function returns -EEXIST
 271 */
 272static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
 273			struct zswap_entry **dupentry)
 274{
 275	struct rb_node **link = &root->rb_node, *parent = NULL;
 276	struct zswap_entry *myentry;
 277
 278	while (*link) {
 279		parent = *link;
 280		myentry = rb_entry(parent, struct zswap_entry, rbnode);
 281		if (myentry->offset > entry->offset)
 282			link = &(*link)->rb_left;
 283		else if (myentry->offset < entry->offset)
 284			link = &(*link)->rb_right;
 285		else {
 286			*dupentry = myentry;
 287			return -EEXIST;
 288		}
 289	}
 290	rb_link_node(&entry->rbnode, parent, link);
 291	rb_insert_color(&entry->rbnode, root);
 292	return 0;
 293}
 294
 295static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
 296{
 297	if (!RB_EMPTY_NODE(&entry->rbnode)) {
 298		rb_erase(&entry->rbnode, root);
 299		RB_CLEAR_NODE(&entry->rbnode);
 300	}
 301}
 302
 303/*
 304 * Carries out the common pattern of freeing and entry's zpool allocation,
 305 * freeing the entry itself, and decrementing the number of stored pages.
 306 */
 307static void zswap_free_entry(struct zswap_entry *entry)
 308{
 309	zpool_free(entry->pool->zpool, entry->handle);
 310	zswap_pool_put(entry->pool);
 
 
 
 
 
 
 
 
 311	zswap_entry_cache_free(entry);
 312	atomic_dec(&zswap_stored_pages);
 313	zswap_update_total_size();
 314}
 315
 316/* caller must hold the tree lock */
 317static void zswap_entry_get(struct zswap_entry *entry)
 318{
 319	entry->refcount++;
 320}
 321
 322/* caller must hold the tree lock
 323* remove from the tree and free it, if nobody reference the entry
 324*/
 325static void zswap_entry_put(struct zswap_tree *tree,
 326			struct zswap_entry *entry)
 327{
 328	int refcount = --entry->refcount;
 329
 330	BUG_ON(refcount < 0);
 331	if (refcount == 0) {
 332		zswap_rb_erase(&tree->rbroot, entry);
 333		zswap_free_entry(entry);
 334	}
 335}
 336
 337/* caller must hold the tree lock */
 338static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
 339				pgoff_t offset)
 340{
 341	struct zswap_entry *entry;
 342
 343	entry = zswap_rb_search(root, offset);
 344	if (entry)
 345		zswap_entry_get(entry);
 346
 347	return entry;
 348}
 349
 350/*********************************
 351* per-cpu code
 352**********************************/
 353static DEFINE_PER_CPU(u8 *, zswap_dstmem);
 
 
 
 
 
 
 354
 355static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu)
 356{
 
 357	u8 *dst;
 358
 359	switch (action) {
 360	case CPU_UP_PREPARE:
 361		dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
 362		if (!dst) {
 363			pr_err("can't allocate compressor buffer\n");
 364			return NOTIFY_BAD;
 365		}
 366		per_cpu(zswap_dstmem, cpu) = dst;
 367		break;
 368	case CPU_DEAD:
 369	case CPU_UP_CANCELED:
 370		dst = per_cpu(zswap_dstmem, cpu);
 371		kfree(dst);
 372		per_cpu(zswap_dstmem, cpu) = NULL;
 373		break;
 374	default:
 375		break;
 376	}
 377	return NOTIFY_OK;
 
 
 
 
 378}
 379
 380static int zswap_cpu_dstmem_notifier(struct notifier_block *nb,
 381				     unsigned long action, void *pcpu)
 382{
 383	return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu);
 384}
 385
 386static struct notifier_block zswap_dstmem_notifier = {
 387	.notifier_call =	zswap_cpu_dstmem_notifier,
 388};
 389
 390static int __init zswap_cpu_dstmem_init(void)
 391{
 392	unsigned long cpu;
 393
 394	cpu_notifier_register_begin();
 395	for_each_online_cpu(cpu)
 396		if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) ==
 397		    NOTIFY_BAD)
 398			goto cleanup;
 399	__register_cpu_notifier(&zswap_dstmem_notifier);
 400	cpu_notifier_register_done();
 401	return 0;
 
 402
 403cleanup:
 404	for_each_online_cpu(cpu)
 405		__zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
 406	cpu_notifier_register_done();
 407	return -ENOMEM;
 408}
 409
 410static void zswap_cpu_dstmem_destroy(void)
 411{
 412	unsigned long cpu;
 413
 414	cpu_notifier_register_begin();
 415	for_each_online_cpu(cpu)
 416		__zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
 417	__unregister_cpu_notifier(&zswap_dstmem_notifier);
 418	cpu_notifier_register_done();
 419}
 420
 421static int __zswap_cpu_comp_notifier(struct zswap_pool *pool,
 422				     unsigned long action, unsigned long cpu)
 423{
 424	struct crypto_comp *tfm;
 425
 426	switch (action) {
 427	case CPU_UP_PREPARE:
 428		if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
 429			break;
 430		tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
 431		if (IS_ERR_OR_NULL(tfm)) {
 432			pr_err("could not alloc crypto comp %s : %ld\n",
 433			       pool->tfm_name, PTR_ERR(tfm));
 434			return NOTIFY_BAD;
 435		}
 436		*per_cpu_ptr(pool->tfm, cpu) = tfm;
 437		break;
 438	case CPU_DEAD:
 439	case CPU_UP_CANCELED:
 440		tfm = *per_cpu_ptr(pool->tfm, cpu);
 441		if (!IS_ERR_OR_NULL(tfm))
 442			crypto_free_comp(tfm);
 443		*per_cpu_ptr(pool->tfm, cpu) = NULL;
 444		break;
 445	default:
 446		break;
 447	}
 448	return NOTIFY_OK;
 449}
 
 
 
 
 
 
 
 
 450
 451static int zswap_cpu_comp_notifier(struct notifier_block *nb,
 452				   unsigned long action, void *pcpu)
 453{
 454	unsigned long cpu = (unsigned long)pcpu;
 455	struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier);
 456
 457	return __zswap_cpu_comp_notifier(pool, action, cpu);
 458}
 459
 460static int zswap_cpu_comp_init(struct zswap_pool *pool)
 461{
 462	unsigned long cpu;
 
 463
 464	memset(&pool->notifier, 0, sizeof(pool->notifier));
 465	pool->notifier.notifier_call = zswap_cpu_comp_notifier;
 
 
 
 
 466
 467	cpu_notifier_register_begin();
 468	for_each_online_cpu(cpu)
 469		if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) ==
 470		    NOTIFY_BAD)
 471			goto cleanup;
 472	__register_cpu_notifier(&pool->notifier);
 473	cpu_notifier_register_done();
 474	return 0;
 475
 476cleanup:
 477	for_each_online_cpu(cpu)
 478		__zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
 479	cpu_notifier_register_done();
 480	return -ENOMEM;
 481}
 482
 483static void zswap_cpu_comp_destroy(struct zswap_pool *pool)
 484{
 485	unsigned long cpu;
 486
 487	cpu_notifier_register_begin();
 488	for_each_online_cpu(cpu)
 489		__zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
 490	__unregister_cpu_notifier(&pool->notifier);
 491	cpu_notifier_register_done();
 492}
 493
 494/*********************************
 495* pool functions
 496**********************************/
 497
 498static struct zswap_pool *__zswap_pool_current(void)
 499{
 500	struct zswap_pool *pool;
 501
 502	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
 503	WARN_ON(!pool);
 
 504
 505	return pool;
 506}
 507
 508static struct zswap_pool *zswap_pool_current(void)
 509{
 510	assert_spin_locked(&zswap_pools_lock);
 511
 512	return __zswap_pool_current();
 513}
 514
 515static struct zswap_pool *zswap_pool_current_get(void)
 516{
 517	struct zswap_pool *pool;
 518
 519	rcu_read_lock();
 520
 521	pool = __zswap_pool_current();
 522	if (!pool || !zswap_pool_get(pool))
 523		pool = NULL;
 524
 525	rcu_read_unlock();
 526
 527	return pool;
 528}
 529
 530static struct zswap_pool *zswap_pool_last_get(void)
 531{
 532	struct zswap_pool *pool, *last = NULL;
 533
 534	rcu_read_lock();
 535
 536	list_for_each_entry_rcu(pool, &zswap_pools, list)
 537		last = pool;
 538	if (!WARN_ON(!last) && !zswap_pool_get(last))
 
 
 539		last = NULL;
 540
 541	rcu_read_unlock();
 542
 543	return last;
 544}
 545
 546/* type and compressor must be null-terminated */
 547static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 548{
 549	struct zswap_pool *pool;
 550
 551	assert_spin_locked(&zswap_pools_lock);
 552
 553	list_for_each_entry_rcu(pool, &zswap_pools, list) {
 554		if (strcmp(pool->tfm_name, compressor))
 555			continue;
 556		if (strcmp(zpool_get_type(pool->zpool), type))
 557			continue;
 558		/* if we can't get it, it's about to be destroyed */
 559		if (!zswap_pool_get(pool))
 560			continue;
 561		return pool;
 562	}
 563
 564	return NULL;
 565}
 566
 
 
 
 
 
 
 
 
 
 
 567static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 568{
 569	struct zswap_pool *pool;
 570	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
 571	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 
 
 
 
 
 
 
 
 
 
 
 
 572
 573	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 574	if (!pool) {
 575		pr_err("pool alloc failed\n");
 576		return NULL;
 577	}
 578
 579	/* unique name for each pool specifically required by zsmalloc */
 580	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
 581
 582	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
 583	if (!pool->zpool) {
 584		pr_err("%s zpool not available\n", type);
 585		goto error;
 586	}
 587	pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
 588
 589	strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
 590	pool->tfm = alloc_percpu(struct crypto_comp *);
 591	if (!pool->tfm) {
 
 592		pr_err("percpu alloc failed\n");
 593		goto error;
 594	}
 595
 596	if (zswap_cpu_comp_init(pool))
 
 
 597		goto error;
 598	pr_debug("using %s compressor\n", pool->tfm_name);
 599
 600	/* being the current pool takes 1 ref; this func expects the
 601	 * caller to always add the new pool as the current pool
 602	 */
 603	kref_init(&pool->kref);
 604	INIT_LIST_HEAD(&pool->list);
 
 605
 606	zswap_pool_debug("created", pool);
 607
 608	return pool;
 609
 610error:
 611	free_percpu(pool->tfm);
 
 612	if (pool->zpool)
 613		zpool_destroy_pool(pool->zpool);
 614	kfree(pool);
 615	return NULL;
 616}
 617
 618static __init struct zswap_pool *__zswap_pool_create_fallback(void)
 619{
 620	if (!crypto_has_comp(zswap_compressor, 0, 0)) {
 621		if (!strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
 622			pr_err("default compressor %s not available\n",
 623			       zswap_compressor);
 624			return NULL;
 625		}
 626		pr_err("compressor %s not available, using default %s\n",
 627		       zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
 628		param_free_charp(&zswap_compressor);
 629		zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
 
 630	}
 631	if (!zpool_has_pool(zswap_zpool_type)) {
 632		if (!strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
 633			pr_err("default zpool %s not available\n",
 634			       zswap_zpool_type);
 635			return NULL;
 636		}
 
 
 
 
 637		pr_err("zpool %s not available, using default %s\n",
 638		       zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
 
 
 
 
 
 
 
 639		param_free_charp(&zswap_zpool_type);
 640		zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
 641	}
 642
 
 
 
 643	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
 644}
 645
 646static void zswap_pool_destroy(struct zswap_pool *pool)
 647{
 648	zswap_pool_debug("destroying", pool);
 649
 650	zswap_cpu_comp_destroy(pool);
 651	free_percpu(pool->tfm);
 652	zpool_destroy_pool(pool->zpool);
 653	kfree(pool);
 654}
 655
 656static int __must_check zswap_pool_get(struct zswap_pool *pool)
 657{
 
 
 
 658	return kref_get_unless_zero(&pool->kref);
 659}
 660
 661static void __zswap_pool_release(struct rcu_head *head)
 662{
 663	struct zswap_pool *pool = container_of(head, typeof(*pool), rcu_head);
 
 
 
 664
 665	/* nobody should have been able to get a kref... */
 666	WARN_ON(kref_get_unless_zero(&pool->kref));
 667
 668	/* pool is now off zswap_pools list and has no references. */
 669	zswap_pool_destroy(pool);
 670}
 671
 672static void __zswap_pool_empty(struct kref *kref)
 673{
 674	struct zswap_pool *pool;
 675
 676	pool = container_of(kref, typeof(*pool), kref);
 677
 678	spin_lock(&zswap_pools_lock);
 679
 680	WARN_ON(pool == zswap_pool_current());
 681
 682	list_del_rcu(&pool->list);
 683	call_rcu(&pool->rcu_head, __zswap_pool_release);
 
 
 684
 685	spin_unlock(&zswap_pools_lock);
 686}
 687
 688static void zswap_pool_put(struct zswap_pool *pool)
 689{
 690	kref_put(&pool->kref, __zswap_pool_empty);
 691}
 692
 693/*********************************
 694* param callbacks
 695**********************************/
 696
 697/* val must be a null-terminated string */
 698static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 699			     char *type, char *compressor)
 700{
 701	struct zswap_pool *pool, *put_pool = NULL;
 702	char *s = strstrip((char *)val);
 703	int ret;
 704
 
 
 
 
 
 705	/* no change required */
 706	if (!strcmp(s, *(char **)kp->arg))
 707		return 0;
 708
 709	/* if this is load-time (pre-init) param setting,
 710	 * don't create a pool; that's done during init.
 711	 */
 712	if (!zswap_init_started)
 713		return param_set_charp(s, kp);
 714
 715	if (!type) {
 716		if (!zpool_has_pool(s)) {
 717			pr_err("zpool %s not available\n", s);
 718			return -ENOENT;
 719		}
 720		type = s;
 721	} else if (!compressor) {
 722		if (!crypto_has_comp(s, 0, 0)) {
 723			pr_err("compressor %s not available\n", s);
 724			return -ENOENT;
 725		}
 726		compressor = s;
 727	} else {
 728		WARN_ON(1);
 729		return -EINVAL;
 730	}
 731
 732	spin_lock(&zswap_pools_lock);
 733
 734	pool = zswap_pool_find_get(type, compressor);
 735	if (pool) {
 736		zswap_pool_debug("using existing", pool);
 
 737		list_del_rcu(&pool->list);
 738	} else {
 739		spin_unlock(&zswap_pools_lock);
 
 
 
 740		pool = zswap_pool_create(type, compressor);
 741		spin_lock(&zswap_pools_lock);
 742	}
 743
 744	if (pool)
 745		ret = param_set_charp(s, kp);
 746	else
 747		ret = -EINVAL;
 748
 
 
 749	if (!ret) {
 750		put_pool = zswap_pool_current();
 751		list_add_rcu(&pool->list, &zswap_pools);
 
 752	} else if (pool) {
 753		/* add the possibly pre-existing pool to the end of the pools
 754		 * list; if it's new (and empty) then it'll be removed and
 755		 * destroyed by the put after we drop the lock
 756		 */
 757		list_add_tail_rcu(&pool->list, &zswap_pools);
 758		put_pool = pool;
 759	}
 760
 761	spin_unlock(&zswap_pools_lock);
 762
 
 
 
 
 
 
 
 
 
 
 
 763	/* drop the ref from either the old current pool,
 764	 * or the new pool we failed to add
 765	 */
 766	if (put_pool)
 767		zswap_pool_put(put_pool);
 768
 769	return ret;
 770}
 771
 772static int zswap_compressor_param_set(const char *val,
 773				      const struct kernel_param *kp)
 774{
 775	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
 776}
 777
 778static int zswap_zpool_param_set(const char *val,
 779				 const struct kernel_param *kp)
 780{
 781	return __zswap_param_set(val, kp, NULL, zswap_compressor);
 782}
 783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 784/*********************************
 785* writeback code
 786**********************************/
 787/* return enum for zswap_get_swap_cache_page */
 788enum zswap_get_swap_ret {
 789	ZSWAP_SWAPCACHE_NEW,
 790	ZSWAP_SWAPCACHE_EXIST,
 791	ZSWAP_SWAPCACHE_FAIL,
 792};
 793
 794/*
 795 * zswap_get_swap_cache_page
 796 *
 797 * This is an adaption of read_swap_cache_async()
 798 *
 799 * This function tries to find a page with the given swap entry
 800 * in the swapper_space address space (the swap cache).  If the page
 801 * is found, it is returned in retpage.  Otherwise, a page is allocated,
 802 * added to the swap cache, and returned in retpage.
 803 *
 804 * If success, the swap cache page is returned in retpage
 805 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
 806 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
 807 *     the new page is added to swapcache and locked
 808 * Returns ZSWAP_SWAPCACHE_FAIL on error
 809 */
 810static int zswap_get_swap_cache_page(swp_entry_t entry,
 811				struct page **retpage)
 812{
 813	bool page_was_allocated;
 814
 815	*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
 816			NULL, 0, &page_was_allocated);
 817	if (page_was_allocated)
 818		return ZSWAP_SWAPCACHE_NEW;
 819	if (!*retpage)
 820		return ZSWAP_SWAPCACHE_FAIL;
 821	return ZSWAP_SWAPCACHE_EXIST;
 822}
 823
 824/*
 825 * Attempts to free an entry by adding a page to the swap cache,
 826 * decompressing the entry data into the page, and issuing a
 827 * bio write to write the page back to the swap device.
 828 *
 829 * This can be thought of as a "resumed writeback" of the page
 830 * to the swap device.  We are basically resuming the same swap
 831 * writeback path that was intercepted with the frontswap_store()
 832 * in the first place.  After the page has been decompressed into
 833 * the swap cache, the compressed version stored by zswap can be
 834 * freed.
 835 */
 836static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
 837{
 838	struct zswap_header *zhdr;
 839	swp_entry_t swpentry;
 840	struct zswap_tree *tree;
 841	pgoff_t offset;
 842	struct zswap_entry *entry;
 843	struct page *page;
 844	struct crypto_comp *tfm;
 845	u8 *src, *dst;
 
 
 846	unsigned int dlen;
 847	int ret;
 848	struct writeback_control wbc = {
 849		.sync_mode = WB_SYNC_NONE,
 850	};
 851
 
 
 
 
 
 
 852	/* extract swpentry from data */
 853	zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
 854	swpentry = zhdr->swpentry; /* here */
 855	zpool_unmap_handle(pool, handle);
 856	tree = zswap_trees[swp_type(swpentry)];
 857	offset = swp_offset(swpentry);
 
 858
 859	/* find and ref zswap entry */
 860	spin_lock(&tree->lock);
 861	entry = zswap_entry_find_get(&tree->rbroot, offset);
 862	if (!entry) {
 863		/* entry was invalidated */
 864		spin_unlock(&tree->lock);
 
 865		return 0;
 866	}
 867	spin_unlock(&tree->lock);
 868	BUG_ON(offset != entry->offset);
 869
 870	/* try to allocate swap cache page */
 871	switch (zswap_get_swap_cache_page(swpentry, &page)) {
 872	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
 873		ret = -ENOMEM;
 874		goto fail;
 875
 876	case ZSWAP_SWAPCACHE_EXIST:
 877		/* page is already in the swap cache, ignore for now */
 878		put_page(page);
 879		ret = -EEXIST;
 880		goto fail;
 881
 882	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
 883		/* decompress */
 
 884		dlen = PAGE_SIZE;
 885		src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
 886				ZPOOL_MM_RO) + sizeof(struct zswap_header);
 887		dst = kmap_atomic(page);
 888		tfm = *get_cpu_ptr(entry->pool->tfm);
 889		ret = crypto_comp_decompress(tfm, src, entry->length,
 890					     dst, &dlen);
 891		put_cpu_ptr(entry->pool->tfm);
 892		kunmap_atomic(dst);
 893		zpool_unmap_handle(entry->pool->zpool, entry->handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894		BUG_ON(ret);
 895		BUG_ON(dlen != PAGE_SIZE);
 896
 897		/* page is up to date */
 898		SetPageUptodate(page);
 899	}
 900
 901	/* move it to the tail of the inactive list after end_writeback */
 902	SetPageReclaim(page);
 903
 904	/* start writeback */
 905	__swap_writepage(page, &wbc, end_swap_bio_write);
 906	put_page(page);
 907	zswap_written_back_pages++;
 908
 909	spin_lock(&tree->lock);
 910	/* drop local reference */
 911	zswap_entry_put(tree, entry);
 912
 913	/*
 914	* There are two possible situations for entry here:
 915	* (1) refcount is 1(normal case),  entry is valid and on the tree
 916	* (2) refcount is 0, entry is freed and not on the tree
 917	*     because invalidate happened during writeback
 918	*  search the tree and free the entry if find entry
 919	*/
 920	if (entry == zswap_rb_search(&tree->rbroot, offset))
 921		zswap_entry_put(tree, entry);
 922	spin_unlock(&tree->lock);
 923
 924	goto end;
 
 
 
 
 925
 926	/*
 927	* if we get here due to ZSWAP_SWAPCACHE_EXIST
 928	* a load may happening concurrently
 929	* it is safe and okay to not free the entry
 930	* if we free the entry in the following put
 931	* it it either okay to return !0
 932	*/
 933fail:
 934	spin_lock(&tree->lock);
 935	zswap_entry_put(tree, entry);
 936	spin_unlock(&tree->lock);
 937
 938end:
 939	return ret;
 940}
 941
 942static int zswap_shrink(void)
 943{
 944	struct zswap_pool *pool;
 945	int ret;
 946
 947	pool = zswap_pool_last_get();
 948	if (!pool)
 949		return -ENOENT;
 
 
 
 
 
 950
 951	ret = zpool_shrink(pool->zpool, 1, NULL);
 952
 953	zswap_pool_put(pool);
 954
 955	return ret;
 
 956}
 957
 958/*********************************
 959* frontswap hooks
 960**********************************/
 961/* attempts to compress and store an single page */
 962static int zswap_frontswap_store(unsigned type, pgoff_t offset,
 963				struct page *page)
 964{
 965	struct zswap_tree *tree = zswap_trees[type];
 966	struct zswap_entry *entry, *dupentry;
 967	struct crypto_comp *tfm;
 
 
 
 968	int ret;
 969	unsigned int dlen = PAGE_SIZE, len;
 970	unsigned long handle;
 971	char *buf;
 972	u8 *src, *dst;
 973	struct zswap_header *zhdr;
 
 
 
 
 
 
 
 974
 975	if (!zswap_enabled || !tree) {
 976		ret = -ENODEV;
 977		goto reject;
 978	}
 979
 
 
 
 
 980	/* reclaim space if needed */
 981	if (zswap_is_full()) {
 982		zswap_pool_limit_hit++;
 983		if (zswap_shrink()) {
 984			zswap_reject_reclaim_fail++;
 
 
 
 
 985			ret = -ENOMEM;
 986			goto reject;
 987		}
 
 988	}
 989
 990	/* allocate entry */
 991	entry = zswap_entry_cache_alloc(GFP_KERNEL);
 992	if (!entry) {
 993		zswap_reject_kmemcache_fail++;
 994		ret = -ENOMEM;
 995		goto reject;
 996	}
 997
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 998	/* if entry is successfully added, it keeps the reference */
 999	entry->pool = zswap_pool_current_get();
1000	if (!entry->pool) {
1001		ret = -EINVAL;
1002		goto freepage;
1003	}
1004
1005	/* compress */
1006	dst = get_cpu_var(zswap_dstmem);
1007	tfm = *get_cpu_ptr(entry->pool->tfm);
1008	src = kmap_atomic(page);
1009	ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1010	kunmap_atomic(src);
1011	put_cpu_ptr(entry->pool->tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1012	if (ret) {
1013		ret = -EINVAL;
1014		goto put_dstmem;
1015	}
1016
1017	/* store */
1018	len = dlen + sizeof(struct zswap_header);
1019	ret = zpool_malloc(entry->pool->zpool, len,
1020			   __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
1021			   &handle);
 
1022	if (ret == -ENOSPC) {
1023		zswap_reject_compress_poor++;
1024		goto put_dstmem;
1025	}
1026	if (ret) {
1027		zswap_reject_alloc_fail++;
1028		goto put_dstmem;
1029	}
1030	zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1031	zhdr->swpentry = swp_entry(type, offset);
1032	buf = (u8 *)(zhdr + 1);
1033	memcpy(buf, dst, dlen);
1034	zpool_unmap_handle(entry->pool->zpool, handle);
1035	put_cpu_var(zswap_dstmem);
1036
1037	/* populate entry */
1038	entry->offset = offset;
1039	entry->handle = handle;
1040	entry->length = dlen;
1041
 
 
 
 
 
 
 
 
1042	/* map */
1043	spin_lock(&tree->lock);
1044	do {
1045		ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1046		if (ret == -EEXIST) {
1047			zswap_duplicate_entry++;
1048			/* remove from rbtree */
1049			zswap_rb_erase(&tree->rbroot, dupentry);
1050			zswap_entry_put(tree, dupentry);
1051		}
1052	} while (ret == -EEXIST);
1053	spin_unlock(&tree->lock);
1054
1055	/* update stats */
1056	atomic_inc(&zswap_stored_pages);
1057	zswap_update_total_size();
 
1058
1059	return 0;
1060
1061put_dstmem:
1062	put_cpu_var(zswap_dstmem);
1063	zswap_pool_put(entry->pool);
1064freepage:
1065	zswap_entry_cache_free(entry);
1066reject:
 
 
1067	return ret;
 
 
 
 
 
 
 
1068}
1069
1070/*
1071 * returns 0 if the page was successfully decompressed
1072 * return -1 on entry not found or error
1073*/
1074static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1075				struct page *page)
1076{
1077	struct zswap_tree *tree = zswap_trees[type];
1078	struct zswap_entry *entry;
1079	struct crypto_comp *tfm;
1080	u8 *src, *dst;
 
1081	unsigned int dlen;
1082	int ret;
1083
1084	/* find */
1085	spin_lock(&tree->lock);
1086	entry = zswap_entry_find_get(&tree->rbroot, offset);
1087	if (!entry) {
1088		/* entry was written back */
1089		spin_unlock(&tree->lock);
1090		return -1;
1091	}
1092	spin_unlock(&tree->lock);
1093
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1094	/* decompress */
1095	dlen = PAGE_SIZE;
1096	src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
1097			ZPOOL_MM_RO) + sizeof(struct zswap_header);
1098	dst = kmap_atomic(page);
1099	tfm = *get_cpu_ptr(entry->pool->tfm);
1100	ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1101	put_cpu_ptr(entry->pool->tfm);
1102	kunmap_atomic(dst);
1103	zpool_unmap_handle(entry->pool->zpool, entry->handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1104	BUG_ON(ret);
1105
 
 
 
 
1106	spin_lock(&tree->lock);
1107	zswap_entry_put(tree, entry);
1108	spin_unlock(&tree->lock);
1109
1110	return 0;
1111}
1112
1113/* frees an entry in zswap */
1114static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1115{
1116	struct zswap_tree *tree = zswap_trees[type];
1117	struct zswap_entry *entry;
1118
1119	/* find */
1120	spin_lock(&tree->lock);
1121	entry = zswap_rb_search(&tree->rbroot, offset);
1122	if (!entry) {
1123		/* entry was written back */
1124		spin_unlock(&tree->lock);
1125		return;
1126	}
1127
1128	/* remove from rbtree */
1129	zswap_rb_erase(&tree->rbroot, entry);
1130
1131	/* drop the initial reference from entry creation */
1132	zswap_entry_put(tree, entry);
1133
1134	spin_unlock(&tree->lock);
1135}
1136
1137/* frees all zswap entries for the given swap type */
1138static void zswap_frontswap_invalidate_area(unsigned type)
1139{
1140	struct zswap_tree *tree = zswap_trees[type];
1141	struct zswap_entry *entry, *n;
1142
1143	if (!tree)
1144		return;
1145
1146	/* walk the tree and free everything */
1147	spin_lock(&tree->lock);
1148	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1149		zswap_free_entry(entry);
1150	tree->rbroot = RB_ROOT;
1151	spin_unlock(&tree->lock);
1152	kfree(tree);
1153	zswap_trees[type] = NULL;
1154}
1155
1156static void zswap_frontswap_init(unsigned type)
1157{
1158	struct zswap_tree *tree;
1159
1160	tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
1161	if (!tree) {
1162		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1163		return;
1164	}
1165
1166	tree->rbroot = RB_ROOT;
1167	spin_lock_init(&tree->lock);
1168	zswap_trees[type] = tree;
1169}
1170
1171static struct frontswap_ops zswap_frontswap_ops = {
1172	.store = zswap_frontswap_store,
1173	.load = zswap_frontswap_load,
1174	.invalidate_page = zswap_frontswap_invalidate_page,
1175	.invalidate_area = zswap_frontswap_invalidate_area,
1176	.init = zswap_frontswap_init
1177};
1178
1179/*********************************
1180* debugfs functions
1181**********************************/
1182#ifdef CONFIG_DEBUG_FS
1183#include <linux/debugfs.h>
1184
1185static struct dentry *zswap_debugfs_root;
1186
1187static int __init zswap_debugfs_init(void)
1188{
1189	if (!debugfs_initialized())
1190		return -ENODEV;
1191
1192	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1193	if (!zswap_debugfs_root)
1194		return -ENOMEM;
1195
1196	debugfs_create_u64("pool_limit_hit", S_IRUGO,
1197			zswap_debugfs_root, &zswap_pool_limit_hit);
1198	debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
1199			zswap_debugfs_root, &zswap_reject_reclaim_fail);
1200	debugfs_create_u64("reject_alloc_fail", S_IRUGO,
1201			zswap_debugfs_root, &zswap_reject_alloc_fail);
1202	debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
1203			zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1204	debugfs_create_u64("reject_compress_poor", S_IRUGO,
1205			zswap_debugfs_root, &zswap_reject_compress_poor);
1206	debugfs_create_u64("written_back_pages", S_IRUGO,
1207			zswap_debugfs_root, &zswap_written_back_pages);
1208	debugfs_create_u64("duplicate_entry", S_IRUGO,
1209			zswap_debugfs_root, &zswap_duplicate_entry);
1210	debugfs_create_u64("pool_total_size", S_IRUGO,
1211			zswap_debugfs_root, &zswap_pool_total_size);
1212	debugfs_create_atomic_t("stored_pages", S_IRUGO,
1213			zswap_debugfs_root, &zswap_stored_pages);
 
 
1214
1215	return 0;
1216}
1217
1218static void __exit zswap_debugfs_exit(void)
1219{
1220	debugfs_remove_recursive(zswap_debugfs_root);
1221}
1222#else
1223static int __init zswap_debugfs_init(void)
1224{
1225	return 0;
1226}
1227
1228static void __exit zswap_debugfs_exit(void) { }
1229#endif
1230
1231/*********************************
1232* module init and exit
1233**********************************/
1234static int __init init_zswap(void)
1235{
1236	struct zswap_pool *pool;
 
1237
1238	zswap_init_started = true;
1239
1240	if (zswap_entry_cache_create()) {
1241		pr_err("entry cache creation failed\n");
1242		goto cache_fail;
1243	}
1244
1245	if (zswap_cpu_dstmem_init()) {
 
 
1246		pr_err("dstmem alloc failed\n");
1247		goto dstmem_fail;
1248	}
1249
 
 
 
 
 
 
 
1250	pool = __zswap_pool_create_fallback();
1251	if (!pool) {
 
 
 
 
 
1252		pr_err("pool creation failed\n");
1253		goto pool_fail;
1254	}
1255	pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1256		zpool_get_type(pool->zpool));
1257
1258	list_add(&pool->list, &zswap_pools);
 
 
1259
1260	frontswap_register_ops(&zswap_frontswap_ops);
 
 
1261	if (zswap_debugfs_init())
1262		pr_warn("debugfs initialization failed\n");
1263	return 0;
1264
1265pool_fail:
1266	zswap_cpu_dstmem_destroy();
 
 
 
 
 
1267dstmem_fail:
1268	zswap_entry_cache_destroy();
1269cache_fail:
 
 
 
1270	return -ENOMEM;
1271}
1272/* must be late so crypto has time to come up */
1273late_initcall(init_zswap);
1274
1275MODULE_LICENSE("GPL");
1276MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1277MODULE_DESCRIPTION("Compressed cache for swap pages");