Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * zswap.c - zswap driver file
   4 *
   5 * zswap is a cache that takes pages that are in the process
   6 * of being swapped out and attempts to compress and store them in a
   7 * RAM-based memory pool.  This can result in a significant I/O reduction on
   8 * the swap device and, in the case where decompressing from RAM is faster
   9 * than reading from the swap device, can also improve workload performance.
  10 *
  11 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
 
 
 
 
 
 
 
 
 
 
  12*/
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/cpu.h>
  18#include <linux/highmem.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <linux/types.h>
  22#include <linux/atomic.h>
 
  23#include <linux/rbtree.h>
  24#include <linux/swap.h>
  25#include <linux/crypto.h>
  26#include <linux/scatterlist.h>
  27#include <linux/mempolicy.h>
  28#include <linux/mempool.h>
  29#include <linux/zpool.h>
  30#include <crypto/acompress.h>
  31#include <linux/zswap.h>
  32#include <linux/mm_types.h>
  33#include <linux/page-flags.h>
  34#include <linux/swapops.h>
  35#include <linux/writeback.h>
  36#include <linux/pagemap.h>
  37#include <linux/workqueue.h>
  38#include <linux/list_lru.h>
  39
  40#include "swap.h"
  41#include "internal.h"
  42
  43/*********************************
  44* statistics
  45**********************************/
  46/* Total bytes used by the compressed storage */
  47u64 zswap_pool_total_size;
  48/* The number of compressed pages currently stored in zswap */
  49atomic_t zswap_stored_pages = ATOMIC_INIT(0);
  50/* The number of same-value filled pages currently stored in zswap */
  51static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
  52
  53/*
  54 * The statistics below are not protected from concurrent access for
  55 * performance reasons so they may not be a 100% accurate.  However,
  56 * they do provide useful information on roughly how many times a
  57 * certain event is occurring.
  58*/
  59
  60/* Pool limit was hit (see zswap_max_pool_percent) */
  61static u64 zswap_pool_limit_hit;
  62/* Pages written back when pool limit was reached */
  63static u64 zswap_written_back_pages;
  64/* Store failed due to a reclaim failure after pool limit was reached */
  65static u64 zswap_reject_reclaim_fail;
  66/* Store failed due to compression algorithm failure */
  67static u64 zswap_reject_compress_fail;
  68/* Compressed page was too big for the allocator to (optimally) store */
  69static u64 zswap_reject_compress_poor;
  70/* Store failed because underlying allocator could not get memory */
  71static u64 zswap_reject_alloc_fail;
  72/* Store failed because the entry metadata could not be allocated (rare) */
  73static u64 zswap_reject_kmemcache_fail;
  74/* Duplicate store was encountered (rare) */
  75static u64 zswap_duplicate_entry;
  76
  77/* Shrinker work queue */
  78static struct workqueue_struct *shrink_wq;
  79/* Pool limit was hit, we need to calm down */
  80static bool zswap_pool_reached_full;
  81
  82/*********************************
  83* tunables
  84**********************************/
  85
  86#define ZSWAP_PARAM_UNSET ""
  87
  88static int zswap_setup(void);
  89
  90/* Enable/disable zswap */
  91static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
  92static int zswap_enabled_param_set(const char *,
  93				   const struct kernel_param *);
  94static const struct kernel_param_ops zswap_enabled_param_ops = {
  95	.set =		zswap_enabled_param_set,
  96	.get =		param_get_bool,
  97};
  98module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
  99
 100/* Crypto compressor to use */
 101static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
 
 102static int zswap_compressor_param_set(const char *,
 103				      const struct kernel_param *);
 104static const struct kernel_param_ops zswap_compressor_param_ops = {
 105	.set =		zswap_compressor_param_set,
 106	.get =		param_get_charp,
 107	.free =		param_free_charp,
 108};
 109module_param_cb(compressor, &zswap_compressor_param_ops,
 110		&zswap_compressor, 0644);
 111
 112/* Compressed storage zpool to use */
 113static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
 
 114static int zswap_zpool_param_set(const char *, const struct kernel_param *);
 115static const struct kernel_param_ops zswap_zpool_param_ops = {
 116	.set =		zswap_zpool_param_set,
 117	.get =		param_get_charp,
 118	.free =		param_free_charp,
 119};
 120module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
 121
 122/* The maximum percentage of memory that the compressed pool can occupy */
 123static unsigned int zswap_max_pool_percent = 20;
 124module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
 125
 126/* The threshold for accepting new pages after the max_pool_percent was hit */
 127static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
 128module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
 129		   uint, 0644);
 130
 131/*
 132 * Enable/disable handling same-value filled pages (enabled by default).
 133 * If disabled every page is considered non-same-value filled.
 134 */
 135static bool zswap_same_filled_pages_enabled = true;
 136module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
 137		   bool, 0644);
 138
 139/* Enable/disable handling non-same-value filled pages (enabled by default) */
 140static bool zswap_non_same_filled_pages_enabled = true;
 141module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
 142		   bool, 0644);
 143
 144static bool zswap_exclusive_loads_enabled = IS_ENABLED(
 145		CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
 146module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
 147
 148/* Number of zpools in zswap_pool (empirically determined for scalability) */
 149#define ZSWAP_NR_ZPOOLS 32
 150
 151/* Enable/disable memory pressure-based shrinker. */
 152static bool zswap_shrinker_enabled = IS_ENABLED(
 153		CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
 154module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
 155
 156bool is_zswap_enabled(void)
 157{
 158	return zswap_enabled;
 159}
 160
 161/*********************************
 162* data structures
 163**********************************/
 164
 165struct crypto_acomp_ctx {
 166	struct crypto_acomp *acomp;
 167	struct acomp_req *req;
 168	struct crypto_wait wait;
 169	u8 *buffer;
 170	struct mutex mutex;
 171};
 172
 173/*
 174 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
 175 * The only case where lru_lock is not acquired while holding tree.lock is
 176 * when a zswap_entry is taken off the lru for writeback, in that case it
 177 * needs to be verified that it's still valid in the tree.
 178 */
 179struct zswap_pool {
 180	struct zpool *zpools[ZSWAP_NR_ZPOOLS];
 181	struct crypto_acomp_ctx __percpu *acomp_ctx;
 182	struct kref kref;
 183	struct list_head list;
 184	struct work_struct release_work;
 185	struct work_struct shrink_work;
 186	struct hlist_node node;
 187	char tfm_name[CRYPTO_MAX_ALG_NAME];
 188	struct list_lru list_lru;
 189	struct mem_cgroup *next_shrink;
 190	struct shrinker *shrinker;
 191	atomic_t nr_stored;
 192};
 193
 194/*
 195 * struct zswap_entry
 196 *
 197 * This structure contains the metadata for tracking a single compressed
 198 * page within zswap.
 199 *
 200 * rbnode - links the entry into red-black tree for the appropriate swap type
 201 * swpentry - associated swap entry, the offset indexes into the red-black tree
 202 * refcount - the number of outstanding reference to the entry. This is needed
 203 *            to protect against premature freeing of the entry by code
 204 *            concurrent calls to load, invalidate, and writeback.  The lock
 205 *            for the zswap_tree structure that contains the entry must
 206 *            be held while changing the refcount.  Since the lock must
 207 *            be held, there is no reason to also make refcount atomic.
 208 * length - the length in bytes of the compressed page data.  Needed during
 209 *          decompression. For a same value filled page length is 0, and both
 210 *          pool and lru are invalid and must be ignored.
 211 * pool - the zswap_pool the entry's data is in
 212 * handle - zpool allocation handle that stores the compressed page data
 213 * value - value of the same-value filled pages which have same content
 214 * objcg - the obj_cgroup that the compressed memory is charged to
 215 * lru - handle to the pool's lru used to evict pages.
 216 */
 217struct zswap_entry {
 218	struct rb_node rbnode;
 219	swp_entry_t swpentry;
 220	int refcount;
 221	unsigned int length;
 222	struct zswap_pool *pool;
 223	union {
 224		unsigned long handle;
 225		unsigned long value;
 226	};
 227	struct obj_cgroup *objcg;
 228	struct list_head lru;
 229};
 230
 231/*
 232 * The tree lock in the zswap_tree struct protects a few things:
 233 * - the rbtree
 234 * - the refcount field of each entry in the tree
 235 */
 236struct zswap_tree {
 237	struct rb_root rbroot;
 238	spinlock_t lock;
 239};
 240
 241static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 242
 243/* RCU-protected iteration */
 244static LIST_HEAD(zswap_pools);
 245/* protects zswap_pools list modification */
 246static DEFINE_SPINLOCK(zswap_pools_lock);
 247/* pool counter to provide unique names to zpool */
 248static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 249
 250enum zswap_init_type {
 251	ZSWAP_UNINIT,
 252	ZSWAP_INIT_SUCCEED,
 253	ZSWAP_INIT_FAILED
 254};
 255
 256static enum zswap_init_type zswap_init_state;
 257
 258/* used to ensure the integrity of initialization */
 259static DEFINE_MUTEX(zswap_init_lock);
 260
 261/* init completed, but couldn't create the initial pool */
 262static bool zswap_has_pool;
 263
 264/*********************************
 265* helpers and fwd declarations
 266**********************************/
 267
 268#define zswap_pool_debug(msg, p)				\
 269	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
 270		 zpool_get_type((p)->zpools[0]))
 271
 272static int zswap_writeback_entry(struct zswap_entry *entry,
 273				 struct zswap_tree *tree);
 274static int zswap_pool_get(struct zswap_pool *pool);
 275static void zswap_pool_put(struct zswap_pool *pool);
 276
 277static bool zswap_is_full(void)
 278{
 279	return totalram_pages() * zswap_max_pool_percent / 100 <
 280			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 281}
 282
 283static bool zswap_can_accept(void)
 284{
 285	return totalram_pages() * zswap_accept_thr_percent / 100 *
 286				zswap_max_pool_percent / 100 >
 287			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 288}
 289
 290static u64 get_zswap_pool_size(struct zswap_pool *pool)
 291{
 292	u64 pool_size = 0;
 293	int i;
 294
 295	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
 296		pool_size += zpool_get_total_size(pool->zpools[i]);
 297
 298	return pool_size;
 299}
 300
 301static void zswap_update_total_size(void)
 302{
 303	struct zswap_pool *pool;
 304	u64 total = 0;
 305
 306	rcu_read_lock();
 307
 308	list_for_each_entry_rcu(pool, &zswap_pools, list)
 309		total += get_zswap_pool_size(pool);
 310
 311	rcu_read_unlock();
 312
 313	zswap_pool_total_size = total;
 314}
 315
 316/* should be called under RCU */
 317#ifdef CONFIG_MEMCG
 318static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
 319{
 320	return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
 321}
 322#else
 323static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
 324{
 325	return NULL;
 326}
 327#endif
 328
 329static inline int entry_to_nid(struct zswap_entry *entry)
 330{
 331	return page_to_nid(virt_to_page(entry));
 
 332}
 333
 334void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
 335{
 336	struct zswap_pool *pool;
 337
 338	/* lock out zswap pools list modification */
 339	spin_lock(&zswap_pools_lock);
 340	list_for_each_entry(pool, &zswap_pools, list) {
 341		if (pool->next_shrink == memcg)
 342			pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
 343	}
 344	spin_unlock(&zswap_pools_lock);
 345}
 346
 347/*********************************
 348* zswap entry functions
 349**********************************/
 350static struct kmem_cache *zswap_entry_cache;
 351
 352static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
 353{
 354	struct zswap_entry *entry;
 355	entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
 356	if (!entry)
 357		return NULL;
 358	entry->refcount = 1;
 359	RB_CLEAR_NODE(&entry->rbnode);
 360	return entry;
 361}
 362
 363static void zswap_entry_cache_free(struct zswap_entry *entry)
 364{
 365	kmem_cache_free(zswap_entry_cache, entry);
 366}
 367
 368/*********************************
 369* zswap lruvec functions
 370**********************************/
 371void zswap_lruvec_state_init(struct lruvec *lruvec)
 372{
 373	atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
 374}
 375
 376void zswap_folio_swapin(struct folio *folio)
 377{
 378	struct lruvec *lruvec;
 379
 380	VM_WARN_ON_ONCE(!folio_test_locked(folio));
 381	lruvec = folio_lruvec(folio);
 382	atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
 383}
 384
 385/*********************************
 386* lru functions
 387**********************************/
 388static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
 389{
 390	atomic_long_t *nr_zswap_protected;
 391	unsigned long lru_size, old, new;
 392	int nid = entry_to_nid(entry);
 393	struct mem_cgroup *memcg;
 394	struct lruvec *lruvec;
 395
 396	/*
 397	 * Note that it is safe to use rcu_read_lock() here, even in the face of
 398	 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
 399	 * used in list_lru lookup, only two scenarios are possible:
 400	 *
 401	 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
 402	 *    new entry will be reparented to memcg's parent's list_lru.
 403	 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
 404	 *    new entry will be added directly to memcg's parent's list_lru.
 405	 *
 406	 * Similar reasoning holds for list_lru_del() and list_lru_putback().
 407	 */
 408	rcu_read_lock();
 409	memcg = mem_cgroup_from_entry(entry);
 410	/* will always succeed */
 411	list_lru_add(list_lru, &entry->lru, nid, memcg);
 412
 413	/* Update the protection area */
 414	lru_size = list_lru_count_one(list_lru, nid, memcg);
 415	lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
 416	nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
 417	old = atomic_long_inc_return(nr_zswap_protected);
 418	/*
 419	 * Decay to avoid overflow and adapt to changing workloads.
 420	 * This is based on LRU reclaim cost decaying heuristics.
 421	 */
 422	do {
 423		new = old > lru_size / 4 ? old / 2 : old;
 424	} while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
 425	rcu_read_unlock();
 426}
 427
 428static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
 429{
 430	int nid = entry_to_nid(entry);
 431	struct mem_cgroup *memcg;
 432
 433	rcu_read_lock();
 434	memcg = mem_cgroup_from_entry(entry);
 435	/* will always succeed */
 436	list_lru_del(list_lru, &entry->lru, nid, memcg);
 437	rcu_read_unlock();
 438}
 439
 440static void zswap_lru_putback(struct list_lru *list_lru,
 441		struct zswap_entry *entry)
 442{
 443	int nid = entry_to_nid(entry);
 444	spinlock_t *lock = &list_lru->node[nid].lock;
 445	struct mem_cgroup *memcg;
 446	struct lruvec *lruvec;
 447
 448	rcu_read_lock();
 449	memcg = mem_cgroup_from_entry(entry);
 450	spin_lock(lock);
 451	/* we cannot use list_lru_add here, because it increments node's lru count */
 452	list_lru_putback(list_lru, &entry->lru, nid, memcg);
 453	spin_unlock(lock);
 454
 455	lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(entry_to_nid(entry)));
 456	/* increment the protection area to account for the LRU rotation. */
 457	atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
 458	rcu_read_unlock();
 459}
 460
 461/*********************************
 462* rbtree functions
 463**********************************/
 464static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
 465{
 466	struct rb_node *node = root->rb_node;
 467	struct zswap_entry *entry;
 468	pgoff_t entry_offset;
 469
 470	while (node) {
 471		entry = rb_entry(node, struct zswap_entry, rbnode);
 472		entry_offset = swp_offset(entry->swpentry);
 473		if (entry_offset > offset)
 474			node = node->rb_left;
 475		else if (entry_offset < offset)
 476			node = node->rb_right;
 477		else
 478			return entry;
 479	}
 480	return NULL;
 481}
 482
 483/*
 484 * In the case that a entry with the same offset is found, a pointer to
 485 * the existing entry is stored in dupentry and the function returns -EEXIST
 486 */
 487static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
 488			struct zswap_entry **dupentry)
 489{
 490	struct rb_node **link = &root->rb_node, *parent = NULL;
 491	struct zswap_entry *myentry;
 492	pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
 493
 494	while (*link) {
 495		parent = *link;
 496		myentry = rb_entry(parent, struct zswap_entry, rbnode);
 497		myentry_offset = swp_offset(myentry->swpentry);
 498		if (myentry_offset > entry_offset)
 499			link = &(*link)->rb_left;
 500		else if (myentry_offset < entry_offset)
 501			link = &(*link)->rb_right;
 502		else {
 503			*dupentry = myentry;
 504			return -EEXIST;
 505		}
 506	}
 507	rb_link_node(&entry->rbnode, parent, link);
 508	rb_insert_color(&entry->rbnode, root);
 509	return 0;
 510}
 511
 512static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
 513{
 514	if (!RB_EMPTY_NODE(&entry->rbnode)) {
 515		rb_erase(&entry->rbnode, root);
 516		RB_CLEAR_NODE(&entry->rbnode);
 517		return true;
 518	}
 519	return false;
 520}
 521
 522static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
 523{
 524	int i = 0;
 525
 526	if (ZSWAP_NR_ZPOOLS > 1)
 527		i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
 528
 529	return entry->pool->zpools[i];
 530}
 531
 532/*
 533 * Carries out the common pattern of freeing and entry's zpool allocation,
 534 * freeing the entry itself, and decrementing the number of stored pages.
 535 */
 536static void zswap_free_entry(struct zswap_entry *entry)
 537{
 538	if (!entry->length)
 539		atomic_dec(&zswap_same_filled_pages);
 540	else {
 541		zswap_lru_del(&entry->pool->list_lru, entry);
 542		zpool_free(zswap_find_zpool(entry), entry->handle);
 543		atomic_dec(&entry->pool->nr_stored);
 544		zswap_pool_put(entry->pool);
 545	}
 546	if (entry->objcg) {
 547		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
 548		obj_cgroup_put(entry->objcg);
 549	}
 550	zswap_entry_cache_free(entry);
 551	atomic_dec(&zswap_stored_pages);
 552	zswap_update_total_size();
 553}
 554
 555/* caller must hold the tree lock */
 556static void zswap_entry_get(struct zswap_entry *entry)
 557{
 558	entry->refcount++;
 559}
 560
 561/* caller must hold the tree lock
 562* remove from the tree and free it, if nobody reference the entry
 563*/
 564static void zswap_entry_put(struct zswap_tree *tree,
 565			struct zswap_entry *entry)
 566{
 567	int refcount = --entry->refcount;
 568
 569	WARN_ON_ONCE(refcount < 0);
 570	if (refcount == 0) {
 571		WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode));
 572		zswap_free_entry(entry);
 573	}
 574}
 575
 576/* caller must hold the tree lock */
 577static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
 578				pgoff_t offset)
 579{
 580	struct zswap_entry *entry;
 581
 582	entry = zswap_rb_search(root, offset);
 583	if (entry)
 584		zswap_entry_get(entry);
 585
 586	return entry;
 587}
 588
 589/*********************************
 590* shrinker functions
 591**********************************/
 592static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
 593				       spinlock_t *lock, void *arg);
 594
 595static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
 596		struct shrink_control *sc)
 597{
 598	struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
 599	unsigned long shrink_ret, nr_protected, lru_size;
 600	struct zswap_pool *pool = shrinker->private_data;
 601	bool encountered_page_in_swapcache = false;
 602
 603	if (!zswap_shrinker_enabled ||
 604			!mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
 605		sc->nr_scanned = 0;
 606		return SHRINK_STOP;
 607	}
 608
 609	nr_protected =
 610		atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
 611	lru_size = list_lru_shrink_count(&pool->list_lru, sc);
 612
 613	/*
 614	 * Abort if we are shrinking into the protected region.
 615	 *
 616	 * This short-circuiting is necessary because if we have too many multiple
 617	 * concurrent reclaimers getting the freeable zswap object counts at the
 618	 * same time (before any of them made reasonable progress), the total
 619	 * number of reclaimed objects might be more than the number of unprotected
 620	 * objects (i.e the reclaimers will reclaim into the protected area of the
 621	 * zswap LRU).
 622	 */
 623	if (nr_protected >= lru_size - sc->nr_to_scan) {
 624		sc->nr_scanned = 0;
 625		return SHRINK_STOP;
 
 
 
 
 626	}
 627
 628	shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
 629		&encountered_page_in_swapcache);
 630
 631	if (encountered_page_in_swapcache)
 632		return SHRINK_STOP;
 633
 634	return shrink_ret ? shrink_ret : SHRINK_STOP;
 635}
 636
 637static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
 638		struct shrink_control *sc)
 639{
 640	struct zswap_pool *pool = shrinker->private_data;
 641	struct mem_cgroup *memcg = sc->memcg;
 642	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
 643	unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
 644
 645	if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
 646		return 0;
 647
 648#ifdef CONFIG_MEMCG_KMEM
 649	mem_cgroup_flush_stats(memcg);
 650	nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
 651	nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
 652#else
 653	/* use pool stats instead of memcg stats */
 654	nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
 655	nr_stored = atomic_read(&pool->nr_stored);
 656#endif
 657
 658	if (!nr_stored)
 659		return 0;
 
 660
 661	nr_protected =
 662		atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
 663	nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
 664	/*
 665	 * Subtract the lru size by an estimate of the number of pages
 666	 * that should be protected.
 667	 */
 668	nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
 669
 670	/*
 671	 * Scale the number of freeable pages by the memory saving factor.
 672	 * This ensures that the better zswap compresses memory, the fewer
 673	 * pages we will evict to swap (as it will otherwise incur IO for
 674	 * relatively small memory saving).
 675	 */
 676	return mult_frac(nr_freeable, nr_backing, nr_stored);
 677}
 678
 679static void zswap_alloc_shrinker(struct zswap_pool *pool)
 680{
 681	pool->shrinker =
 682		shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
 683	if (!pool->shrinker)
 684		return;
 685
 686	pool->shrinker->private_data = pool;
 687	pool->shrinker->scan_objects = zswap_shrinker_scan;
 688	pool->shrinker->count_objects = zswap_shrinker_count;
 689	pool->shrinker->batch = 0;
 690	pool->shrinker->seeks = DEFAULT_SEEKS;
 691}
 692
 693/*********************************
 694* per-cpu code
 695**********************************/
 696static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
 697{
 698	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 699	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 700	struct crypto_acomp *acomp;
 701	struct acomp_req *req;
 702	int ret;
 703
 704	mutex_init(&acomp_ctx->mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 705
 706	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
 707	if (!acomp_ctx->buffer)
 708		return -ENOMEM;
 
 
 709
 710	acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
 711	if (IS_ERR(acomp)) {
 712		pr_err("could not alloc crypto acomp %s : %ld\n",
 713				pool->tfm_name, PTR_ERR(acomp));
 714		ret = PTR_ERR(acomp);
 715		goto acomp_fail;
 716	}
 717	acomp_ctx->acomp = acomp;
 718
 719	req = acomp_request_alloc(acomp_ctx->acomp);
 720	if (!req) {
 721		pr_err("could not alloc crypto acomp_request %s\n",
 722		       pool->tfm_name);
 723		ret = -ENOMEM;
 724		goto req_fail;
 725	}
 726	acomp_ctx->req = req;
 727
 728	crypto_init_wait(&acomp_ctx->wait);
 729	/*
 730	 * if the backend of acomp is async zip, crypto_req_done() will wakeup
 731	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
 732	 * won't be called, crypto_wait_req() will return without blocking.
 733	 */
 734	acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 735				   crypto_req_done, &acomp_ctx->wait);
 736
 
 
 
 
 
 
 
 
 
 
 737	return 0;
 738
 739req_fail:
 740	crypto_free_acomp(acomp_ctx->acomp);
 741acomp_fail:
 742	kfree(acomp_ctx->buffer);
 743	return ret;
 744}
 745
 746static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
 747{
 748	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 749	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 750
 751	if (!IS_ERR_OR_NULL(acomp_ctx)) {
 752		if (!IS_ERR_OR_NULL(acomp_ctx->req))
 753			acomp_request_free(acomp_ctx->req);
 754		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
 755			crypto_free_acomp(acomp_ctx->acomp);
 756		kfree(acomp_ctx->buffer);
 757	}
 758
 759	return 0;
 
 
 
 
 760}
 761
 762/*********************************
 763* pool functions
 764**********************************/
 765
 766static struct zswap_pool *__zswap_pool_current(void)
 767{
 768	struct zswap_pool *pool;
 769
 770	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
 771	WARN_ONCE(!pool && zswap_has_pool,
 772		  "%s: no page storage pool!\n", __func__);
 773
 774	return pool;
 775}
 776
 777static struct zswap_pool *zswap_pool_current(void)
 778{
 779	assert_spin_locked(&zswap_pools_lock);
 780
 781	return __zswap_pool_current();
 782}
 783
 784static struct zswap_pool *zswap_pool_current_get(void)
 785{
 786	struct zswap_pool *pool;
 787
 788	rcu_read_lock();
 789
 790	pool = __zswap_pool_current();
 791	if (!zswap_pool_get(pool))
 792		pool = NULL;
 793
 794	rcu_read_unlock();
 795
 796	return pool;
 797}
 798
 799static struct zswap_pool *zswap_pool_last_get(void)
 800{
 801	struct zswap_pool *pool, *last = NULL;
 802
 803	rcu_read_lock();
 804
 805	list_for_each_entry_rcu(pool, &zswap_pools, list)
 806		last = pool;
 807	WARN_ONCE(!last && zswap_has_pool,
 808		  "%s: no page storage pool!\n", __func__);
 809	if (!zswap_pool_get(last))
 810		last = NULL;
 811
 812	rcu_read_unlock();
 813
 814	return last;
 815}
 816
 817/* type and compressor must be null-terminated */
 818static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 819{
 820	struct zswap_pool *pool;
 821
 822	assert_spin_locked(&zswap_pools_lock);
 823
 824	list_for_each_entry_rcu(pool, &zswap_pools, list) {
 825		if (strcmp(pool->tfm_name, compressor))
 826			continue;
 827		/* all zpools share the same type */
 828		if (strcmp(zpool_get_type(pool->zpools[0]), type))
 829			continue;
 830		/* if we can't get it, it's about to be destroyed */
 831		if (!zswap_pool_get(pool))
 832			continue;
 833		return pool;
 834	}
 835
 836	return NULL;
 837}
 838
 839/*
 840 * If the entry is still valid in the tree, drop the initial ref and remove it
 841 * from the tree. This function must be called with an additional ref held,
 842 * otherwise it may race with another invalidation freeing the entry.
 843 */
 844static void zswap_invalidate_entry(struct zswap_tree *tree,
 845				   struct zswap_entry *entry)
 846{
 847	if (zswap_rb_erase(&tree->rbroot, entry))
 848		zswap_entry_put(tree, entry);
 849}
 850
 851static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
 852				       spinlock_t *lock, void *arg)
 853{
 854	struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
 855	bool *encountered_page_in_swapcache = (bool *)arg;
 856	struct zswap_tree *tree;
 857	pgoff_t swpoffset;
 858	enum lru_status ret = LRU_REMOVED_RETRY;
 859	int writeback_result;
 860
 861	/*
 862	 * Once the lru lock is dropped, the entry might get freed. The
 863	 * swpoffset is copied to the stack, and entry isn't deref'd again
 864	 * until the entry is verified to still be alive in the tree.
 865	 */
 866	swpoffset = swp_offset(entry->swpentry);
 867	tree = zswap_trees[swp_type(entry->swpentry)];
 868	list_lru_isolate(l, item);
 869	/*
 870	 * It's safe to drop the lock here because we return either
 871	 * LRU_REMOVED_RETRY or LRU_RETRY.
 872	 */
 873	spin_unlock(lock);
 874
 875	/* Check for invalidate() race */
 876	spin_lock(&tree->lock);
 877	if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
 878		goto unlock;
 879
 880	/* Hold a reference to prevent a free during writeback */
 881	zswap_entry_get(entry);
 882	spin_unlock(&tree->lock);
 883
 884	writeback_result = zswap_writeback_entry(entry, tree);
 885
 886	spin_lock(&tree->lock);
 887	if (writeback_result) {
 888		zswap_reject_reclaim_fail++;
 889		zswap_lru_putback(&entry->pool->list_lru, entry);
 890		ret = LRU_RETRY;
 891
 892		/*
 893		 * Encountering a page already in swap cache is a sign that we are shrinking
 894		 * into the warmer region. We should terminate shrinking (if we're in the dynamic
 895		 * shrinker context).
 896		 */
 897		if (writeback_result == -EEXIST && encountered_page_in_swapcache)
 898			*encountered_page_in_swapcache = true;
 899
 900		goto put_unlock;
 901	}
 902	zswap_written_back_pages++;
 903
 904	if (entry->objcg)
 905		count_objcg_event(entry->objcg, ZSWPWB);
 906
 907	count_vm_event(ZSWPWB);
 908	/*
 909	 * Writeback started successfully, the page now belongs to the
 910	 * swapcache. Drop the entry from zswap - unless invalidate already
 911	 * took it out while we had the tree->lock released for IO.
 912	 */
 913	zswap_invalidate_entry(tree, entry);
 914
 915put_unlock:
 916	/* Drop local reference */
 917	zswap_entry_put(tree, entry);
 918unlock:
 919	spin_unlock(&tree->lock);
 920	spin_lock(lock);
 921	return ret;
 922}
 923
 924static int shrink_memcg(struct mem_cgroup *memcg)
 925{
 926	struct zswap_pool *pool;
 927	int nid, shrunk = 0;
 928
 929	if (!mem_cgroup_zswap_writeback_enabled(memcg))
 930		return -EINVAL;
 931
 932	/*
 933	 * Skip zombies because their LRUs are reparented and we would be
 934	 * reclaiming from the parent instead of the dead memcg.
 935	 */
 936	if (memcg && !mem_cgroup_online(memcg))
 937		return -ENOENT;
 938
 939	pool = zswap_pool_current_get();
 940	if (!pool)
 941		return -EINVAL;
 942
 943	for_each_node_state(nid, N_NORMAL_MEMORY) {
 944		unsigned long nr_to_walk = 1;
 945
 946		shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
 947					    &shrink_memcg_cb, NULL, &nr_to_walk);
 948	}
 949	zswap_pool_put(pool);
 950	return shrunk ? 0 : -EAGAIN;
 951}
 952
 953static void shrink_worker(struct work_struct *w)
 954{
 955	struct zswap_pool *pool = container_of(w, typeof(*pool),
 956						shrink_work);
 957	struct mem_cgroup *memcg;
 958	int ret, failures = 0;
 959
 960	/* global reclaim will select cgroup in a round-robin fashion. */
 961	do {
 962		spin_lock(&zswap_pools_lock);
 963		pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
 964		memcg = pool->next_shrink;
 965
 966		/*
 967		 * We need to retry if we have gone through a full round trip, or if we
 968		 * got an offline memcg (or else we risk undoing the effect of the
 969		 * zswap memcg offlining cleanup callback). This is not catastrophic
 970		 * per se, but it will keep the now offlined memcg hostage for a while.
 971		 *
 972		 * Note that if we got an online memcg, we will keep the extra
 973		 * reference in case the original reference obtained by mem_cgroup_iter
 974		 * is dropped by the zswap memcg offlining callback, ensuring that the
 975		 * memcg is not killed when we are reclaiming.
 976		 */
 977		if (!memcg) {
 978			spin_unlock(&zswap_pools_lock);
 979			if (++failures == MAX_RECLAIM_RETRIES)
 980				break;
 981
 982			goto resched;
 983		}
 984
 985		if (!mem_cgroup_tryget_online(memcg)) {
 986			/* drop the reference from mem_cgroup_iter() */
 987			mem_cgroup_iter_break(NULL, memcg);
 988			pool->next_shrink = NULL;
 989			spin_unlock(&zswap_pools_lock);
 990
 991			if (++failures == MAX_RECLAIM_RETRIES)
 992				break;
 993
 994			goto resched;
 995		}
 996		spin_unlock(&zswap_pools_lock);
 997
 998		ret = shrink_memcg(memcg);
 999		/* drop the extra reference */
1000		mem_cgroup_put(memcg);
1001
1002		if (ret == -EINVAL)
1003			break;
1004		if (ret && ++failures == MAX_RECLAIM_RETRIES)
1005			break;
1006
1007resched:
1008		cond_resched();
1009	} while (!zswap_can_accept());
1010	zswap_pool_put(pool);
1011}
1012
1013static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
1014{
1015	int i;
1016	struct zswap_pool *pool;
1017	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
1018	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1019	int ret;
1020
1021	if (!zswap_has_pool) {
1022		/* if either are unset, pool initialization failed, and we
1023		 * need both params to be set correctly before trying to
1024		 * create a pool.
1025		 */
1026		if (!strcmp(type, ZSWAP_PARAM_UNSET))
1027			return NULL;
1028		if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
1029			return NULL;
1030	}
1031
1032	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1033	if (!pool)
 
1034		return NULL;
1035
1036	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
1037		/* unique name for each pool specifically required by zsmalloc */
1038		snprintf(name, 38, "zswap%x",
1039			 atomic_inc_return(&zswap_pools_count));
1040
1041		pool->zpools[i] = zpool_create_pool(type, name, gfp);
1042		if (!pool->zpools[i]) {
1043			pr_err("%s zpool not available\n", type);
1044			goto error;
1045		}
1046	}
1047	pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
1048
1049	strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
 
1050
1051	pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
1052	if (!pool->acomp_ctx) {
1053		pr_err("percpu alloc failed\n");
1054		goto error;
1055	}
 
1056
1057	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
1058				       &pool->node);
1059	if (ret)
 
1060		goto error;
 
1061
1062	zswap_alloc_shrinker(pool);
1063	if (!pool->shrinker)
1064		goto error;
1065
1066	pr_debug("using %s compressor\n", pool->tfm_name);
1067
1068	/* being the current pool takes 1 ref; this func expects the
1069	 * caller to always add the new pool as the current pool
1070	 */
1071	kref_init(&pool->kref);
1072	INIT_LIST_HEAD(&pool->list);
1073	if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
1074		goto lru_fail;
1075	shrinker_register(pool->shrinker);
1076	INIT_WORK(&pool->shrink_work, shrink_worker);
1077	atomic_set(&pool->nr_stored, 0);
1078
1079	zswap_pool_debug("created", pool);
1080
1081	return pool;
1082
1083lru_fail:
1084	list_lru_destroy(&pool->list_lru);
1085	shrinker_free(pool->shrinker);
1086error:
1087	if (pool->acomp_ctx)
1088		free_percpu(pool->acomp_ctx);
1089	while (i--)
1090		zpool_destroy_pool(pool->zpools[i]);
1091	kfree(pool);
1092	return NULL;
1093}
1094
1095static struct zswap_pool *__zswap_pool_create_fallback(void)
1096{
1097	bool has_comp, has_zpool;
1098
1099	has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
1100	if (!has_comp && strcmp(zswap_compressor,
1101				CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
 
1102		pr_err("compressor %s not available, using default %s\n",
1103		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
1104		param_free_charp(&zswap_compressor);
1105		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
1106		has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
1107	}
1108	if (!has_comp) {
1109		pr_err("default compressor %s not available\n",
1110		       zswap_compressor);
1111		param_free_charp(&zswap_compressor);
1112		zswap_compressor = ZSWAP_PARAM_UNSET;
1113	}
1114
1115	has_zpool = zpool_has_pool(zswap_zpool_type);
1116	if (!has_zpool && strcmp(zswap_zpool_type,
1117				 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
 
 
1118		pr_err("zpool %s not available, using default %s\n",
1119		       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
1120		param_free_charp(&zswap_zpool_type);
1121		zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
1122		has_zpool = zpool_has_pool(zswap_zpool_type);
1123	}
1124	if (!has_zpool) {
1125		pr_err("default zpool %s not available\n",
1126		       zswap_zpool_type);
1127		param_free_charp(&zswap_zpool_type);
1128		zswap_zpool_type = ZSWAP_PARAM_UNSET;
1129	}
1130
1131	if (!has_comp || !has_zpool)
1132		return NULL;
1133
1134	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
1135}
1136
1137static void zswap_pool_destroy(struct zswap_pool *pool)
1138{
1139	int i;
1140
1141	zswap_pool_debug("destroying", pool);
1142
1143	shrinker_free(pool->shrinker);
1144	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
1145	free_percpu(pool->acomp_ctx);
1146	list_lru_destroy(&pool->list_lru);
1147
1148	spin_lock(&zswap_pools_lock);
1149	mem_cgroup_iter_break(NULL, pool->next_shrink);
1150	pool->next_shrink = NULL;
1151	spin_unlock(&zswap_pools_lock);
1152
1153	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
1154		zpool_destroy_pool(pool->zpools[i]);
1155	kfree(pool);
1156}
1157
1158static int __must_check zswap_pool_get(struct zswap_pool *pool)
1159{
1160	if (!pool)
1161		return 0;
1162
1163	return kref_get_unless_zero(&pool->kref);
1164}
1165
1166static void __zswap_pool_release(struct work_struct *work)
1167{
1168	struct zswap_pool *pool = container_of(work, typeof(*pool),
1169						release_work);
1170
1171	synchronize_rcu();
1172
1173	/* nobody should have been able to get a kref... */
1174	WARN_ON(kref_get_unless_zero(&pool->kref));
1175
1176	/* pool is now off zswap_pools list and has no references. */
1177	zswap_pool_destroy(pool);
1178}
1179
1180static void __zswap_pool_empty(struct kref *kref)
1181{
1182	struct zswap_pool *pool;
1183
1184	pool = container_of(kref, typeof(*pool), kref);
1185
1186	spin_lock(&zswap_pools_lock);
1187
1188	WARN_ON(pool == zswap_pool_current());
1189
1190	list_del_rcu(&pool->list);
1191
1192	INIT_WORK(&pool->release_work, __zswap_pool_release);
1193	schedule_work(&pool->release_work);
1194
1195	spin_unlock(&zswap_pools_lock);
1196}
1197
1198static void zswap_pool_put(struct zswap_pool *pool)
1199{
1200	kref_put(&pool->kref, __zswap_pool_empty);
1201}
1202
1203/*********************************
1204* param callbacks
1205**********************************/
1206
1207static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
1208{
1209	/* no change required */
1210	if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
1211		return false;
1212	return true;
1213}
1214
1215/* val must be a null-terminated string */
1216static int __zswap_param_set(const char *val, const struct kernel_param *kp,
1217			     char *type, char *compressor)
1218{
1219	struct zswap_pool *pool, *put_pool = NULL;
1220	char *s = strstrip((char *)val);
1221	int ret = 0;
1222	bool new_pool = false;
1223
1224	mutex_lock(&zswap_init_lock);
1225	switch (zswap_init_state) {
1226	case ZSWAP_UNINIT:
1227		/* if this is load-time (pre-init) param setting,
1228		 * don't create a pool; that's done during init.
1229		 */
1230		ret = param_set_charp(s, kp);
1231		break;
1232	case ZSWAP_INIT_SUCCEED:
1233		new_pool = zswap_pool_changed(s, kp);
1234		break;
1235	case ZSWAP_INIT_FAILED:
1236		pr_err("can't set param, initialization failed\n");
1237		ret = -ENODEV;
1238	}
1239	mutex_unlock(&zswap_init_lock);
1240
1241	/* no need to create a new pool, return directly */
1242	if (!new_pool)
1243		return ret;
 
 
1244
1245	if (!type) {
1246		if (!zpool_has_pool(s)) {
1247			pr_err("zpool %s not available\n", s);
1248			return -ENOENT;
1249		}
1250		type = s;
1251	} else if (!compressor) {
1252		if (!crypto_has_acomp(s, 0, 0)) {
1253			pr_err("compressor %s not available\n", s);
1254			return -ENOENT;
1255		}
1256		compressor = s;
1257	} else {
1258		WARN_ON(1);
1259		return -EINVAL;
1260	}
1261
1262	spin_lock(&zswap_pools_lock);
1263
1264	pool = zswap_pool_find_get(type, compressor);
1265	if (pool) {
1266		zswap_pool_debug("using existing", pool);
1267		WARN_ON(pool == zswap_pool_current());
1268		list_del_rcu(&pool->list);
1269	}
1270
1271	spin_unlock(&zswap_pools_lock);
1272
1273	if (!pool)
1274		pool = zswap_pool_create(type, compressor);
 
 
1275
1276	if (pool)
1277		ret = param_set_charp(s, kp);
1278	else
1279		ret = -EINVAL;
1280
1281	spin_lock(&zswap_pools_lock);
1282
1283	if (!ret) {
1284		put_pool = zswap_pool_current();
1285		list_add_rcu(&pool->list, &zswap_pools);
1286		zswap_has_pool = true;
1287	} else if (pool) {
1288		/* add the possibly pre-existing pool to the end of the pools
1289		 * list; if it's new (and empty) then it'll be removed and
1290		 * destroyed by the put after we drop the lock
1291		 */
1292		list_add_tail_rcu(&pool->list, &zswap_pools);
1293		put_pool = pool;
1294	}
1295
1296	spin_unlock(&zswap_pools_lock);
1297
1298	if (!zswap_has_pool && !pool) {
1299		/* if initial pool creation failed, and this pool creation also
1300		 * failed, maybe both compressor and zpool params were bad.
1301		 * Allow changing this param, so pool creation will succeed
1302		 * when the other param is changed. We already verified this
1303		 * param is ok in the zpool_has_pool() or crypto_has_acomp()
1304		 * checks above.
1305		 */
1306		ret = param_set_charp(s, kp);
1307	}
1308
1309	/* drop the ref from either the old current pool,
1310	 * or the new pool we failed to add
1311	 */
1312	if (put_pool)
1313		zswap_pool_put(put_pool);
1314
1315	return ret;
1316}
1317
1318static int zswap_compressor_param_set(const char *val,
1319				      const struct kernel_param *kp)
1320{
1321	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
1322}
1323
1324static int zswap_zpool_param_set(const char *val,
1325				 const struct kernel_param *kp)
1326{
1327	return __zswap_param_set(val, kp, NULL, zswap_compressor);
1328}
1329
1330static int zswap_enabled_param_set(const char *val,
1331				   const struct kernel_param *kp)
1332{
1333	int ret = -ENODEV;
1334
1335	/* if this is load-time (pre-init) param setting, only set param. */
1336	if (system_state != SYSTEM_RUNNING)
1337		return param_set_bool(val, kp);
1338
1339	mutex_lock(&zswap_init_lock);
1340	switch (zswap_init_state) {
1341	case ZSWAP_UNINIT:
1342		if (zswap_setup())
1343			break;
1344		fallthrough;
1345	case ZSWAP_INIT_SUCCEED:
1346		if (!zswap_has_pool)
1347			pr_err("can't enable, no pool configured\n");
1348		else
1349			ret = param_set_bool(val, kp);
1350		break;
1351	case ZSWAP_INIT_FAILED:
1352		pr_err("can't enable, initialization failed\n");
1353	}
1354	mutex_unlock(&zswap_init_lock);
1355
1356	return ret;
1357}
1358
1359static void __zswap_load(struct zswap_entry *entry, struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1360{
1361	struct zpool *zpool = zswap_find_zpool(entry);
1362	struct scatterlist input, output;
1363	struct crypto_acomp_ctx *acomp_ctx;
1364	u8 *src;
1365
1366	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1367	mutex_lock(&acomp_ctx->mutex);
1368
1369	src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1370	if (!zpool_can_sleep_mapped(zpool)) {
1371		memcpy(acomp_ctx->buffer, src, entry->length);
1372		src = acomp_ctx->buffer;
1373		zpool_unmap_handle(zpool, entry->handle);
1374	}
1375
1376	sg_init_one(&input, src, entry->length);
1377	sg_init_table(&output, 1);
1378	sg_set_page(&output, page, PAGE_SIZE, 0);
1379	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1380	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1381	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1382	mutex_unlock(&acomp_ctx->mutex);
1383
1384	if (zpool_can_sleep_mapped(zpool))
1385		zpool_unmap_handle(zpool, entry->handle);
 
 
 
 
 
1386}
1387
1388/*********************************
1389* writeback code
1390**********************************/
1391/*
1392 * Attempts to free an entry by adding a folio to the swap cache,
1393 * decompressing the entry data into the folio, and issuing a
1394 * bio write to write the folio back to the swap device.
1395 *
1396 * This can be thought of as a "resumed writeback" of the folio
1397 * to the swap device.  We are basically resuming the same swap
1398 * writeback path that was intercepted with the zswap_store()
1399 * in the first place.  After the folio has been decompressed into
1400 * the swap cache, the compressed version stored by zswap can be
1401 * freed.
1402 */
1403static int zswap_writeback_entry(struct zswap_entry *entry,
1404				 struct zswap_tree *tree)
1405{
1406	swp_entry_t swpentry = entry->swpentry;
1407	struct folio *folio;
1408	struct mempolicy *mpol;
1409	bool folio_was_allocated;
 
 
 
 
 
 
1410	struct writeback_control wbc = {
1411		.sync_mode = WB_SYNC_NONE,
1412	};
1413
1414	/* try to allocate swap cache folio */
1415	mpol = get_task_policy(current);
1416	folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1417				NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1418	if (!folio)
1419		return -ENOMEM;
1420
1421	/*
1422	 * Found an existing folio, we raced with load/swapin. We generally
1423	 * writeback cold folios from zswap, and swapin means the folio just
1424	 * became hot. Skip this folio and let the caller find another one.
1425	 */
1426	if (!folio_was_allocated) {
1427		folio_put(folio);
1428		return -EEXIST;
1429	}
1430
1431	/*
1432	 * folio is locked, and the swapcache is now secured against
1433	 * concurrent swapping to and from the slot. Verify that the
1434	 * swap entry hasn't been invalidated and recycled behind our
1435	 * backs (our zswap_entry reference doesn't prevent that), to
1436	 * avoid overwriting a new swap folio with old compressed data.
1437	 */
1438	spin_lock(&tree->lock);
1439	if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
 
 
1440		spin_unlock(&tree->lock);
1441		delete_from_swap_cache(folio);
1442		folio_unlock(folio);
1443		folio_put(folio);
1444		return -ENOMEM;
1445	}
1446	spin_unlock(&tree->lock);
 
 
 
 
 
 
 
1447
1448	__zswap_load(entry, &folio->page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1449
1450	/* folio is up to date */
1451	folio_mark_uptodate(folio);
 
1452
1453	/* move it to the tail of the inactive list after end_writeback */
1454	folio_set_reclaim(folio);
1455
1456	/* start writeback */
1457	__swap_writepage(folio, &wbc);
1458	folio_put(folio);
1459
1460	return 0;
1461}
1462
1463static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1464{
1465	unsigned long *page;
1466	unsigned long val;
1467	unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1468
1469	page = (unsigned long *)ptr;
1470	val = page[0];
 
1471
1472	if (val != page[last_pos])
1473		return 0;
 
 
 
 
 
 
 
 
1474
1475	for (pos = 1; pos < last_pos; pos++) {
1476		if (val != page[pos])
1477			return 0;
1478	}
1479
1480	*value = val;
 
 
 
 
 
 
 
 
 
 
1481
1482	return 1;
 
1483}
1484
1485static void zswap_fill_page(void *ptr, unsigned long value)
1486{
1487	unsigned long *page;
 
 
 
 
 
1488
1489	page = (unsigned long *)ptr;
1490	memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
 
 
 
1491}
1492
1493bool zswap_store(struct folio *folio)
 
 
 
 
 
1494{
1495	swp_entry_t swp = folio->swap;
1496	int type = swp_type(swp);
1497	pgoff_t offset = swp_offset(swp);
1498	struct page *page = &folio->page;
1499	struct zswap_tree *tree = zswap_trees[type];
1500	struct zswap_entry *entry, *dupentry;
1501	struct scatterlist input, output;
1502	struct crypto_acomp_ctx *acomp_ctx;
1503	struct obj_cgroup *objcg = NULL;
1504	struct mem_cgroup *memcg = NULL;
1505	struct zswap_pool *pool;
1506	struct zpool *zpool;
1507	unsigned int dlen = PAGE_SIZE;
1508	unsigned long handle, value;
1509	char *buf;
1510	u8 *src, *dst;
1511	gfp_t gfp;
1512	int ret;
1513
1514	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1515	VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1516
1517	/* Large folios aren't supported */
1518	if (folio_test_large(folio))
1519		return false;
1520
1521	if (!tree)
1522		return false;
1523
1524	/*
1525	 * If this is a duplicate, it must be removed before attempting to store
1526	 * it, otherwise, if the store fails the old page won't be removed from
1527	 * the tree, and it might be written back overriding the new data.
1528	 */
1529	spin_lock(&tree->lock);
1530	dupentry = zswap_rb_search(&tree->rbroot, offset);
1531	if (dupentry) {
1532		zswap_duplicate_entry++;
1533		zswap_invalidate_entry(tree, dupentry);
1534	}
1535	spin_unlock(&tree->lock);
1536
1537	if (!zswap_enabled)
1538		return false;
1539
1540	objcg = get_obj_cgroup_from_folio(folio);
1541	if (objcg && !obj_cgroup_may_zswap(objcg)) {
1542		memcg = get_mem_cgroup_from_objcg(objcg);
1543		if (shrink_memcg(memcg)) {
1544			mem_cgroup_put(memcg);
1545			goto reject;
1546		}
1547		mem_cgroup_put(memcg);
1548	}
1549
1550	/* reclaim space if needed */
1551	if (zswap_is_full()) {
1552		zswap_pool_limit_hit++;
1553		zswap_pool_reached_full = true;
1554		goto shrink;
1555	}
1556
1557	if (zswap_pool_reached_full) {
1558	       if (!zswap_can_accept())
1559			goto shrink;
1560		else
1561			zswap_pool_reached_full = false;
1562	}
1563
1564	/* allocate entry */
1565	entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
1566	if (!entry) {
1567		zswap_reject_kmemcache_fail++;
 
1568		goto reject;
1569	}
1570
1571	if (zswap_same_filled_pages_enabled) {
1572		src = kmap_local_page(page);
1573		if (zswap_is_page_same_filled(src, &value)) {
1574			kunmap_local(src);
1575			entry->swpentry = swp_entry(type, offset);
1576			entry->length = 0;
1577			entry->value = value;
1578			atomic_inc(&zswap_same_filled_pages);
1579			goto insert_entry;
1580		}
1581		kunmap_local(src);
1582	}
1583
1584	if (!zswap_non_same_filled_pages_enabled)
1585		goto freepage;
1586
1587	/* if entry is successfully added, it keeps the reference */
1588	entry->pool = zswap_pool_current_get();
1589	if (!entry->pool)
 
1590		goto freepage;
1591
1592	if (objcg) {
1593		memcg = get_mem_cgroup_from_objcg(objcg);
1594		if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
1595			mem_cgroup_put(memcg);
1596			goto put_pool;
1597		}
1598		mem_cgroup_put(memcg);
1599	}
1600
1601	/* compress */
1602	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1603
1604	mutex_lock(&acomp_ctx->mutex);
1605
1606	dst = acomp_ctx->buffer;
1607	sg_init_table(&input, 1);
1608	sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
1609
1610	/*
1611	 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1612	 * and hardware-accelerators may won't check the dst buffer size, so
1613	 * giving the dst buffer with enough length to avoid buffer overflow.
1614	 */
1615	sg_init_one(&output, dst, PAGE_SIZE * 2);
1616	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1617	/*
1618	 * it maybe looks a little bit silly that we send an asynchronous request,
1619	 * then wait for its completion synchronously. This makes the process look
1620	 * synchronous in fact.
1621	 * Theoretically, acomp supports users send multiple acomp requests in one
1622	 * acomp instance, then get those requests done simultaneously. but in this
1623	 * case, zswap actually does store and load page by page, there is no
1624	 * existing method to send the second page before the first page is done
1625	 * in one thread doing zwap.
1626	 * but in different threads running on different cpu, we have different
1627	 * acomp instance, so multiple threads can do (de)compression in parallel.
1628	 */
1629	ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1630	dlen = acomp_ctx->req->dlen;
1631
1632	if (ret) {
1633		zswap_reject_compress_fail++;
1634		goto put_dstmem;
1635	}
1636
1637	/* store */
1638	zpool = zswap_find_zpool(entry);
1639	gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1640	if (zpool_malloc_support_movable(zpool))
1641		gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1642	ret = zpool_malloc(zpool, dlen, gfp, &handle);
1643	if (ret == -ENOSPC) {
1644		zswap_reject_compress_poor++;
1645		goto put_dstmem;
1646	}
1647	if (ret) {
1648		zswap_reject_alloc_fail++;
1649		goto put_dstmem;
1650	}
1651	buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
 
 
1652	memcpy(buf, dst, dlen);
1653	zpool_unmap_handle(zpool, handle);
1654	mutex_unlock(&acomp_ctx->mutex);
1655
1656	/* populate entry */
1657	entry->swpentry = swp_entry(type, offset);
1658	entry->handle = handle;
1659	entry->length = dlen;
1660
1661insert_entry:
1662	entry->objcg = objcg;
1663	if (objcg) {
1664		obj_cgroup_charge_zswap(objcg, entry->length);
1665		/* Account before objcg ref is moved to tree */
1666		count_objcg_event(objcg, ZSWPOUT);
1667	}
1668
1669	/* map */
1670	spin_lock(&tree->lock);
1671	/*
1672	 * A duplicate entry should have been removed at the beginning of this
1673	 * function. Since the swap entry should be pinned, if a duplicate is
1674	 * found again here it means that something went wrong in the swap
1675	 * cache.
1676	 */
1677	while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
1678		WARN_ON(1);
1679		zswap_duplicate_entry++;
1680		zswap_invalidate_entry(tree, dupentry);
1681	}
1682	if (entry->length) {
1683		INIT_LIST_HEAD(&entry->lru);
1684		zswap_lru_add(&entry->pool->list_lru, entry);
1685		atomic_inc(&entry->pool->nr_stored);
1686	}
1687	spin_unlock(&tree->lock);
1688
1689	/* update stats */
1690	atomic_inc(&zswap_stored_pages);
1691	zswap_update_total_size();
1692	count_vm_event(ZSWPOUT);
1693
1694	return true;
1695
1696put_dstmem:
1697	mutex_unlock(&acomp_ctx->mutex);
1698put_pool:
1699	zswap_pool_put(entry->pool);
1700freepage:
1701	zswap_entry_cache_free(entry);
1702reject:
1703	if (objcg)
1704		obj_cgroup_put(objcg);
1705	return false;
1706
1707shrink:
1708	pool = zswap_pool_last_get();
1709	if (pool && !queue_work(shrink_wq, &pool->shrink_work))
1710		zswap_pool_put(pool);
1711	goto reject;
1712}
1713
1714bool zswap_load(struct folio *folio)
 
 
 
 
 
1715{
1716	swp_entry_t swp = folio->swap;
1717	int type = swp_type(swp);
1718	pgoff_t offset = swp_offset(swp);
1719	struct page *page = &folio->page;
1720	struct zswap_tree *tree = zswap_trees[type];
1721	struct zswap_entry *entry;
1722	u8 *dst;
1723
1724	VM_WARN_ON_ONCE(!folio_test_locked(folio));
 
1725
1726	/* find */
1727	spin_lock(&tree->lock);
1728	entry = zswap_entry_find_get(&tree->rbroot, offset);
1729	if (!entry) {
 
1730		spin_unlock(&tree->lock);
1731		return false;
1732	}
1733	spin_unlock(&tree->lock);
1734
1735	if (entry->length)
1736		__zswap_load(entry, page);
1737	else {
1738		dst = kmap_local_page(page);
1739		zswap_fill_page(dst, entry->value);
1740		kunmap_local(dst);
1741	}
1742
1743	count_vm_event(ZSWPIN);
1744	if (entry->objcg)
1745		count_objcg_event(entry->objcg, ZSWPIN);
1746
1747	spin_lock(&tree->lock);
1748	if (zswap_exclusive_loads_enabled) {
1749		zswap_invalidate_entry(tree, entry);
1750		folio_mark_dirty(folio);
1751	} else if (entry->length) {
1752		zswap_lru_del(&entry->pool->list_lru, entry);
1753		zswap_lru_add(&entry->pool->list_lru, entry);
1754	}
1755	zswap_entry_put(tree, entry);
1756	spin_unlock(&tree->lock);
1757
1758	return true;
1759}
1760
1761void zswap_invalidate(int type, pgoff_t offset)
 
1762{
1763	struct zswap_tree *tree = zswap_trees[type];
1764	struct zswap_entry *entry;
1765
1766	/* find */
1767	spin_lock(&tree->lock);
1768	entry = zswap_rb_search(&tree->rbroot, offset);
1769	if (!entry) {
1770		/* entry was written back */
1771		spin_unlock(&tree->lock);
1772		return;
1773	}
1774	zswap_invalidate_entry(tree, entry);
1775	spin_unlock(&tree->lock);
1776}
1777
1778void zswap_swapon(int type)
1779{
1780	struct zswap_tree *tree;
1781
1782	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1783	if (!tree) {
1784		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1785		return;
1786	}
1787
1788	tree->rbroot = RB_ROOT;
1789	spin_lock_init(&tree->lock);
1790	zswap_trees[type] = tree;
1791}
1792
1793void zswap_swapoff(int type)
 
1794{
1795	struct zswap_tree *tree = zswap_trees[type];
1796	struct zswap_entry *entry, *n;
1797
1798	if (!tree)
1799		return;
1800
1801	/* walk the tree and free everything */
1802	spin_lock(&tree->lock);
1803	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1804		zswap_free_entry(entry);
1805	tree->rbroot = RB_ROOT;
1806	spin_unlock(&tree->lock);
1807	kfree(tree);
1808	zswap_trees[type] = NULL;
1809}
1810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1811/*********************************
1812* debugfs functions
1813**********************************/
1814#ifdef CONFIG_DEBUG_FS
1815#include <linux/debugfs.h>
1816
1817static struct dentry *zswap_debugfs_root;
1818
1819static int zswap_debugfs_init(void)
1820{
1821	if (!debugfs_initialized())
1822		return -ENODEV;
1823
1824	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
 
 
1825
1826	debugfs_create_u64("pool_limit_hit", 0444,
1827			   zswap_debugfs_root, &zswap_pool_limit_hit);
1828	debugfs_create_u64("reject_reclaim_fail", 0444,
1829			   zswap_debugfs_root, &zswap_reject_reclaim_fail);
1830	debugfs_create_u64("reject_alloc_fail", 0444,
1831			   zswap_debugfs_root, &zswap_reject_alloc_fail);
1832	debugfs_create_u64("reject_kmemcache_fail", 0444,
1833			   zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1834	debugfs_create_u64("reject_compress_fail", 0444,
1835			   zswap_debugfs_root, &zswap_reject_compress_fail);
1836	debugfs_create_u64("reject_compress_poor", 0444,
1837			   zswap_debugfs_root, &zswap_reject_compress_poor);
1838	debugfs_create_u64("written_back_pages", 0444,
1839			   zswap_debugfs_root, &zswap_written_back_pages);
1840	debugfs_create_u64("duplicate_entry", 0444,
1841			   zswap_debugfs_root, &zswap_duplicate_entry);
1842	debugfs_create_u64("pool_total_size", 0444,
1843			   zswap_debugfs_root, &zswap_pool_total_size);
1844	debugfs_create_atomic_t("stored_pages", 0444,
1845				zswap_debugfs_root, &zswap_stored_pages);
1846	debugfs_create_atomic_t("same_filled_pages", 0444,
1847				zswap_debugfs_root, &zswap_same_filled_pages);
1848
1849	return 0;
1850}
 
 
 
 
 
1851#else
1852static int zswap_debugfs_init(void)
1853{
1854	return 0;
1855}
 
 
1856#endif
1857
1858/*********************************
1859* module init and exit
1860**********************************/
1861static int zswap_setup(void)
1862{
1863	struct zswap_pool *pool;
1864	int ret;
1865
1866	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1867	if (!zswap_entry_cache) {
 
1868		pr_err("entry cache creation failed\n");
1869		goto cache_fail;
1870	}
1871
1872	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1873				      "mm/zswap_pool:prepare",
1874				      zswap_cpu_comp_prepare,
1875				      zswap_cpu_comp_dead);
1876	if (ret)
1877		goto hp_fail;
1878
1879	pool = __zswap_pool_create_fallback();
1880	if (pool) {
1881		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1882			zpool_get_type(pool->zpools[0]));
1883		list_add(&pool->list, &zswap_pools);
1884		zswap_has_pool = true;
1885	} else {
1886		pr_err("pool creation failed\n");
1887		zswap_enabled = false;
1888	}
 
 
1889
1890	shrink_wq = create_workqueue("zswap-shrink");
1891	if (!shrink_wq)
1892		goto fallback_fail;
1893
 
1894	if (zswap_debugfs_init())
1895		pr_warn("debugfs initialization failed\n");
1896	zswap_init_state = ZSWAP_INIT_SUCCEED;
1897	return 0;
1898
1899fallback_fail:
1900	if (pool)
1901		zswap_pool_destroy(pool);
1902hp_fail:
1903	kmem_cache_destroy(zswap_entry_cache);
1904cache_fail:
1905	/* if built-in, we aren't unloaded on failure; don't allow use */
1906	zswap_init_state = ZSWAP_INIT_FAILED;
1907	zswap_enabled = false;
1908	return -ENOMEM;
1909}
1910
1911static int __init zswap_init(void)
1912{
1913	if (!zswap_enabled)
1914		return 0;
1915	return zswap_setup();
1916}
1917/* must be late so crypto has time to come up */
1918late_initcall(zswap_init);
1919
 
1920MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1921MODULE_DESCRIPTION("Compressed cache for swap pages");
v4.6
 
   1/*
   2 * zswap.c - zswap driver file
   3 *
   4 * zswap is a backend for frontswap that takes pages that are in the process
   5 * of being swapped out and attempts to compress and store them in a
   6 * RAM-based memory pool.  This can result in a significant I/O reduction on
   7 * the swap device and, in the case where decompressing from RAM is faster
   8 * than reading from the swap device, can also improve workload performance.
   9 *
  10 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
  11 *
  12 * This program is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU General Public License
  14 * as published by the Free Software Foundation; either version 2
  15 * of the License, or (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 * GNU General Public License for more details.
  21*/
  22
  23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24
  25#include <linux/module.h>
  26#include <linux/cpu.h>
  27#include <linux/highmem.h>
  28#include <linux/slab.h>
  29#include <linux/spinlock.h>
  30#include <linux/types.h>
  31#include <linux/atomic.h>
  32#include <linux/frontswap.h>
  33#include <linux/rbtree.h>
  34#include <linux/swap.h>
  35#include <linux/crypto.h>
 
 
  36#include <linux/mempool.h>
  37#include <linux/zpool.h>
  38
 
  39#include <linux/mm_types.h>
  40#include <linux/page-flags.h>
  41#include <linux/swapops.h>
  42#include <linux/writeback.h>
  43#include <linux/pagemap.h>
 
 
 
 
 
  44
  45/*********************************
  46* statistics
  47**********************************/
  48/* Total bytes used by the compressed storage */
  49static u64 zswap_pool_total_size;
  50/* The number of compressed pages currently stored in zswap */
  51static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
 
 
  52
  53/*
  54 * The statistics below are not protected from concurrent access for
  55 * performance reasons so they may not be a 100% accurate.  However,
  56 * they do provide useful information on roughly how many times a
  57 * certain event is occurring.
  58*/
  59
  60/* Pool limit was hit (see zswap_max_pool_percent) */
  61static u64 zswap_pool_limit_hit;
  62/* Pages written back when pool limit was reached */
  63static u64 zswap_written_back_pages;
  64/* Store failed due to a reclaim failure after pool limit was reached */
  65static u64 zswap_reject_reclaim_fail;
 
 
  66/* Compressed page was too big for the allocator to (optimally) store */
  67static u64 zswap_reject_compress_poor;
  68/* Store failed because underlying allocator could not get memory */
  69static u64 zswap_reject_alloc_fail;
  70/* Store failed because the entry metadata could not be allocated (rare) */
  71static u64 zswap_reject_kmemcache_fail;
  72/* Duplicate store was encountered (rare) */
  73static u64 zswap_duplicate_entry;
  74
 
 
 
 
 
  75/*********************************
  76* tunables
  77**********************************/
  78
  79/* Enable/disable zswap (disabled by default) */
  80static bool zswap_enabled;
  81module_param_named(enabled, zswap_enabled, bool, 0644);
 
 
 
 
 
 
 
 
 
 
  82
  83/* Crypto compressor to use */
  84#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
  85static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
  86static int zswap_compressor_param_set(const char *,
  87				      const struct kernel_param *);
  88static struct kernel_param_ops zswap_compressor_param_ops = {
  89	.set =		zswap_compressor_param_set,
  90	.get =		param_get_charp,
  91	.free =		param_free_charp,
  92};
  93module_param_cb(compressor, &zswap_compressor_param_ops,
  94		&zswap_compressor, 0644);
  95
  96/* Compressed storage zpool to use */
  97#define ZSWAP_ZPOOL_DEFAULT "zbud"
  98static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
  99static int zswap_zpool_param_set(const char *, const struct kernel_param *);
 100static struct kernel_param_ops zswap_zpool_param_ops = {
 101	.set =		zswap_zpool_param_set,
 102	.get =		param_get_charp,
 103	.free =		param_free_charp,
 104};
 105module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
 106
 107/* The maximum percentage of memory that the compressed pool can occupy */
 108static unsigned int zswap_max_pool_percent = 20;
 109module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111/*********************************
 112* data structures
 113**********************************/
 114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 115struct zswap_pool {
 116	struct zpool *zpool;
 117	struct crypto_comp * __percpu *tfm;
 118	struct kref kref;
 119	struct list_head list;
 120	struct rcu_head rcu_head;
 121	struct notifier_block notifier;
 
 122	char tfm_name[CRYPTO_MAX_ALG_NAME];
 
 
 
 
 123};
 124
 125/*
 126 * struct zswap_entry
 127 *
 128 * This structure contains the metadata for tracking a single compressed
 129 * page within zswap.
 130 *
 131 * rbnode - links the entry into red-black tree for the appropriate swap type
 132 * offset - the swap offset for the entry.  Index into the red-black tree.
 133 * refcount - the number of outstanding reference to the entry. This is needed
 134 *            to protect against premature freeing of the entry by code
 135 *            concurrent calls to load, invalidate, and writeback.  The lock
 136 *            for the zswap_tree structure that contains the entry must
 137 *            be held while changing the refcount.  Since the lock must
 138 *            be held, there is no reason to also make refcount atomic.
 139 * length - the length in bytes of the compressed page data.  Needed during
 140 *          decompression
 
 141 * pool - the zswap_pool the entry's data is in
 142 * handle - zpool allocation handle that stores the compressed page data
 
 
 
 143 */
 144struct zswap_entry {
 145	struct rb_node rbnode;
 146	pgoff_t offset;
 147	int refcount;
 148	unsigned int length;
 149	struct zswap_pool *pool;
 150	unsigned long handle;
 151};
 152
 153struct zswap_header {
 154	swp_entry_t swpentry;
 
 155};
 156
 157/*
 158 * The tree lock in the zswap_tree struct protects a few things:
 159 * - the rbtree
 160 * - the refcount field of each entry in the tree
 161 */
 162struct zswap_tree {
 163	struct rb_root rbroot;
 164	spinlock_t lock;
 165};
 166
 167static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 168
 169/* RCU-protected iteration */
 170static LIST_HEAD(zswap_pools);
 171/* protects zswap_pools list modification */
 172static DEFINE_SPINLOCK(zswap_pools_lock);
 173/* pool counter to provide unique names to zpool */
 174static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 175
 176/* used by param callback function */
 177static bool zswap_init_started;
 
 
 
 
 
 
 
 
 
 
 
 178
 179/*********************************
 180* helpers and fwd declarations
 181**********************************/
 182
 183#define zswap_pool_debug(msg, p)				\
 184	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
 185		 zpool_get_type((p)->zpool))
 186
 187static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
 
 188static int zswap_pool_get(struct zswap_pool *pool);
 189static void zswap_pool_put(struct zswap_pool *pool);
 190
 191static const struct zpool_ops zswap_zpool_ops = {
 192	.evict = zswap_writeback_entry
 193};
 
 
 194
 195static bool zswap_is_full(void)
 196{
 197	return totalram_pages * zswap_max_pool_percent / 100 <
 198		DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 199}
 200
 201static void zswap_update_total_size(void)
 202{
 203	struct zswap_pool *pool;
 204	u64 total = 0;
 205
 206	rcu_read_lock();
 207
 208	list_for_each_entry_rcu(pool, &zswap_pools, list)
 209		total += zpool_get_total_size(pool->zpool);
 210
 211	rcu_read_unlock();
 212
 213	zswap_pool_total_size = total;
 214}
 215
 216/*********************************
 217* zswap entry functions
 218**********************************/
 219static struct kmem_cache *zswap_entry_cache;
 
 
 
 
 
 
 
 
 220
 221static int __init zswap_entry_cache_create(void)
 222{
 223	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
 224	return zswap_entry_cache == NULL;
 225}
 226
 227static void __init zswap_entry_cache_destroy(void)
 228{
 229	kmem_cache_destroy(zswap_entry_cache);
 
 
 
 
 
 
 
 
 230}
 231
 232static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
 
 
 
 
 
 233{
 234	struct zswap_entry *entry;
 235	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
 236	if (!entry)
 237		return NULL;
 238	entry->refcount = 1;
 239	RB_CLEAR_NODE(&entry->rbnode);
 240	return entry;
 241}
 242
 243static void zswap_entry_cache_free(struct zswap_entry *entry)
 244{
 245	kmem_cache_free(zswap_entry_cache, entry);
 246}
 247
 248/*********************************
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 249* rbtree functions
 250**********************************/
 251static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
 252{
 253	struct rb_node *node = root->rb_node;
 254	struct zswap_entry *entry;
 
 255
 256	while (node) {
 257		entry = rb_entry(node, struct zswap_entry, rbnode);
 258		if (entry->offset > offset)
 
 259			node = node->rb_left;
 260		else if (entry->offset < offset)
 261			node = node->rb_right;
 262		else
 263			return entry;
 264	}
 265	return NULL;
 266}
 267
 268/*
 269 * In the case that a entry with the same offset is found, a pointer to
 270 * the existing entry is stored in dupentry and the function returns -EEXIST
 271 */
 272static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
 273			struct zswap_entry **dupentry)
 274{
 275	struct rb_node **link = &root->rb_node, *parent = NULL;
 276	struct zswap_entry *myentry;
 
 277
 278	while (*link) {
 279		parent = *link;
 280		myentry = rb_entry(parent, struct zswap_entry, rbnode);
 281		if (myentry->offset > entry->offset)
 
 282			link = &(*link)->rb_left;
 283		else if (myentry->offset < entry->offset)
 284			link = &(*link)->rb_right;
 285		else {
 286			*dupentry = myentry;
 287			return -EEXIST;
 288		}
 289	}
 290	rb_link_node(&entry->rbnode, parent, link);
 291	rb_insert_color(&entry->rbnode, root);
 292	return 0;
 293}
 294
 295static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
 296{
 297	if (!RB_EMPTY_NODE(&entry->rbnode)) {
 298		rb_erase(&entry->rbnode, root);
 299		RB_CLEAR_NODE(&entry->rbnode);
 
 300	}
 
 
 
 
 
 
 
 
 
 
 
 301}
 302
 303/*
 304 * Carries out the common pattern of freeing and entry's zpool allocation,
 305 * freeing the entry itself, and decrementing the number of stored pages.
 306 */
 307static void zswap_free_entry(struct zswap_entry *entry)
 308{
 309	zpool_free(entry->pool->zpool, entry->handle);
 310	zswap_pool_put(entry->pool);
 
 
 
 
 
 
 
 
 
 
 311	zswap_entry_cache_free(entry);
 312	atomic_dec(&zswap_stored_pages);
 313	zswap_update_total_size();
 314}
 315
 316/* caller must hold the tree lock */
 317static void zswap_entry_get(struct zswap_entry *entry)
 318{
 319	entry->refcount++;
 320}
 321
 322/* caller must hold the tree lock
 323* remove from the tree and free it, if nobody reference the entry
 324*/
 325static void zswap_entry_put(struct zswap_tree *tree,
 326			struct zswap_entry *entry)
 327{
 328	int refcount = --entry->refcount;
 329
 330	BUG_ON(refcount < 0);
 331	if (refcount == 0) {
 332		zswap_rb_erase(&tree->rbroot, entry);
 333		zswap_free_entry(entry);
 334	}
 335}
 336
 337/* caller must hold the tree lock */
 338static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
 339				pgoff_t offset)
 340{
 341	struct zswap_entry *entry;
 342
 343	entry = zswap_rb_search(root, offset);
 344	if (entry)
 345		zswap_entry_get(entry);
 346
 347	return entry;
 348}
 349
 350/*********************************
 351* per-cpu code
 352**********************************/
 353static DEFINE_PER_CPU(u8 *, zswap_dstmem);
 
 354
 355static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu)
 
 356{
 357	u8 *dst;
 
 
 
 
 
 
 
 
 
 
 
 
 
 358
 359	switch (action) {
 360	case CPU_UP_PREPARE:
 361		dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
 362		if (!dst) {
 363			pr_err("can't allocate compressor buffer\n");
 364			return NOTIFY_BAD;
 365		}
 366		per_cpu(zswap_dstmem, cpu) = dst;
 367		break;
 368	case CPU_DEAD:
 369	case CPU_UP_CANCELED:
 370		dst = per_cpu(zswap_dstmem, cpu);
 371		kfree(dst);
 372		per_cpu(zswap_dstmem, cpu) = NULL;
 373		break;
 374	default:
 375		break;
 376	}
 377	return NOTIFY_OK;
 
 
 
 
 
 
 
 378}
 379
 380static int zswap_cpu_dstmem_notifier(struct notifier_block *nb,
 381				     unsigned long action, void *pcpu)
 382{
 383	return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu);
 384}
 
 
 
 
 
 385
 386static struct notifier_block zswap_dstmem_notifier = {
 387	.notifier_call =	zswap_cpu_dstmem_notifier,
 388};
 
 
 
 
 
 
 389
 390static int __init zswap_cpu_dstmem_init(void)
 391{
 392	unsigned long cpu;
 393
 394	cpu_notifier_register_begin();
 395	for_each_online_cpu(cpu)
 396		if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) ==
 397		    NOTIFY_BAD)
 398			goto cleanup;
 399	__register_cpu_notifier(&zswap_dstmem_notifier);
 400	cpu_notifier_register_done();
 401	return 0;
 402
 403cleanup:
 404	for_each_online_cpu(cpu)
 405		__zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
 406	cpu_notifier_register_done();
 407	return -ENOMEM;
 
 
 408}
 409
 410static void zswap_cpu_dstmem_destroy(void)
 411{
 412	unsigned long cpu;
 
 
 
 413
 414	cpu_notifier_register_begin();
 415	for_each_online_cpu(cpu)
 416		__zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
 417	__unregister_cpu_notifier(&zswap_dstmem_notifier);
 418	cpu_notifier_register_done();
 419}
 420
 421static int __zswap_cpu_comp_notifier(struct zswap_pool *pool,
 422				     unsigned long action, unsigned long cpu)
 
 
 423{
 424	struct crypto_comp *tfm;
 
 
 
 
 425
 426	switch (action) {
 427	case CPU_UP_PREPARE:
 428		if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
 429			break;
 430		tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
 431		if (IS_ERR_OR_NULL(tfm)) {
 432			pr_err("could not alloc crypto comp %s : %ld\n",
 433			       pool->tfm_name, PTR_ERR(tfm));
 434			return NOTIFY_BAD;
 435		}
 436		*per_cpu_ptr(pool->tfm, cpu) = tfm;
 437		break;
 438	case CPU_DEAD:
 439	case CPU_UP_CANCELED:
 440		tfm = *per_cpu_ptr(pool->tfm, cpu);
 441		if (!IS_ERR_OR_NULL(tfm))
 442			crypto_free_comp(tfm);
 443		*per_cpu_ptr(pool->tfm, cpu) = NULL;
 444		break;
 445	default:
 446		break;
 447	}
 448	return NOTIFY_OK;
 449}
 450
 451static int zswap_cpu_comp_notifier(struct notifier_block *nb,
 452				   unsigned long action, void *pcpu)
 453{
 454	unsigned long cpu = (unsigned long)pcpu;
 455	struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier);
 456
 457	return __zswap_cpu_comp_notifier(pool, action, cpu);
 458}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 459
 460static int zswap_cpu_comp_init(struct zswap_pool *pool)
 461{
 462	unsigned long cpu;
 
 
 
 
 
 463
 464	memset(&pool->notifier, 0, sizeof(pool->notifier));
 465	pool->notifier.notifier_call = zswap_cpu_comp_notifier;
 466
 467	cpu_notifier_register_begin();
 468	for_each_online_cpu(cpu)
 469		if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) ==
 470		    NOTIFY_BAD)
 471			goto cleanup;
 472	__register_cpu_notifier(&pool->notifier);
 473	cpu_notifier_register_done();
 474	return 0;
 475
 476cleanup:
 477	for_each_online_cpu(cpu)
 478		__zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
 479	cpu_notifier_register_done();
 480	return -ENOMEM;
 481}
 482
 483static void zswap_cpu_comp_destroy(struct zswap_pool *pool)
 484{
 485	unsigned long cpu;
 
 
 
 
 
 
 
 
 
 486
 487	cpu_notifier_register_begin();
 488	for_each_online_cpu(cpu)
 489		__zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
 490	__unregister_cpu_notifier(&pool->notifier);
 491	cpu_notifier_register_done();
 492}
 493
 494/*********************************
 495* pool functions
 496**********************************/
 497
 498static struct zswap_pool *__zswap_pool_current(void)
 499{
 500	struct zswap_pool *pool;
 501
 502	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
 503	WARN_ON(!pool);
 
 504
 505	return pool;
 506}
 507
 508static struct zswap_pool *zswap_pool_current(void)
 509{
 510	assert_spin_locked(&zswap_pools_lock);
 511
 512	return __zswap_pool_current();
 513}
 514
 515static struct zswap_pool *zswap_pool_current_get(void)
 516{
 517	struct zswap_pool *pool;
 518
 519	rcu_read_lock();
 520
 521	pool = __zswap_pool_current();
 522	if (!pool || !zswap_pool_get(pool))
 523		pool = NULL;
 524
 525	rcu_read_unlock();
 526
 527	return pool;
 528}
 529
 530static struct zswap_pool *zswap_pool_last_get(void)
 531{
 532	struct zswap_pool *pool, *last = NULL;
 533
 534	rcu_read_lock();
 535
 536	list_for_each_entry_rcu(pool, &zswap_pools, list)
 537		last = pool;
 538	if (!WARN_ON(!last) && !zswap_pool_get(last))
 
 
 539		last = NULL;
 540
 541	rcu_read_unlock();
 542
 543	return last;
 544}
 545
 546/* type and compressor must be null-terminated */
 547static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 548{
 549	struct zswap_pool *pool;
 550
 551	assert_spin_locked(&zswap_pools_lock);
 552
 553	list_for_each_entry_rcu(pool, &zswap_pools, list) {
 554		if (strcmp(pool->tfm_name, compressor))
 555			continue;
 556		if (strcmp(zpool_get_type(pool->zpool), type))
 
 557			continue;
 558		/* if we can't get it, it's about to be destroyed */
 559		if (!zswap_pool_get(pool))
 560			continue;
 561		return pool;
 562	}
 563
 564	return NULL;
 565}
 566
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 567static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 568{
 
 569	struct zswap_pool *pool;
 570	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
 571	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 
 
 
 
 
 
 
 
 
 
 
 
 572
 573	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 574	if (!pool) {
 575		pr_err("pool alloc failed\n");
 576		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 577	}
 
 578
 579	/* unique name for each pool specifically required by zsmalloc */
 580	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
 581
 582	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
 583	if (!pool->zpool) {
 584		pr_err("%s zpool not available\n", type);
 585		goto error;
 586	}
 587	pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
 588
 589	strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
 590	pool->tfm = alloc_percpu(struct crypto_comp *);
 591	if (!pool->tfm) {
 592		pr_err("percpu alloc failed\n");
 593		goto error;
 594	}
 595
 596	if (zswap_cpu_comp_init(pool))
 
 597		goto error;
 
 598	pr_debug("using %s compressor\n", pool->tfm_name);
 599
 600	/* being the current pool takes 1 ref; this func expects the
 601	 * caller to always add the new pool as the current pool
 602	 */
 603	kref_init(&pool->kref);
 604	INIT_LIST_HEAD(&pool->list);
 
 
 
 
 
 605
 606	zswap_pool_debug("created", pool);
 607
 608	return pool;
 609
 
 
 
 610error:
 611	free_percpu(pool->tfm);
 612	if (pool->zpool)
 613		zpool_destroy_pool(pool->zpool);
 
 614	kfree(pool);
 615	return NULL;
 616}
 617
 618static __init struct zswap_pool *__zswap_pool_create_fallback(void)
 619{
 620	if (!crypto_has_comp(zswap_compressor, 0, 0)) {
 621		if (!strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
 622			pr_err("default compressor %s not available\n",
 623			       zswap_compressor);
 624			return NULL;
 625		}
 626		pr_err("compressor %s not available, using default %s\n",
 627		       zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
 
 
 
 
 
 
 
 628		param_free_charp(&zswap_compressor);
 629		zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
 630	}
 631	if (!zpool_has_pool(zswap_zpool_type)) {
 632		if (!strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
 633			pr_err("default zpool %s not available\n",
 634			       zswap_zpool_type);
 635			return NULL;
 636		}
 637		pr_err("zpool %s not available, using default %s\n",
 638		       zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
 639		param_free_charp(&zswap_zpool_type);
 640		zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
 
 641	}
 
 
 
 
 
 
 
 
 
 642
 643	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
 644}
 645
 646static void zswap_pool_destroy(struct zswap_pool *pool)
 647{
 
 
 648	zswap_pool_debug("destroying", pool);
 649
 650	zswap_cpu_comp_destroy(pool);
 651	free_percpu(pool->tfm);
 652	zpool_destroy_pool(pool->zpool);
 
 
 
 
 
 
 
 
 
 653	kfree(pool);
 654}
 655
 656static int __must_check zswap_pool_get(struct zswap_pool *pool)
 657{
 
 
 
 658	return kref_get_unless_zero(&pool->kref);
 659}
 660
 661static void __zswap_pool_release(struct rcu_head *head)
 662{
 663	struct zswap_pool *pool = container_of(head, typeof(*pool), rcu_head);
 
 
 
 664
 665	/* nobody should have been able to get a kref... */
 666	WARN_ON(kref_get_unless_zero(&pool->kref));
 667
 668	/* pool is now off zswap_pools list and has no references. */
 669	zswap_pool_destroy(pool);
 670}
 671
 672static void __zswap_pool_empty(struct kref *kref)
 673{
 674	struct zswap_pool *pool;
 675
 676	pool = container_of(kref, typeof(*pool), kref);
 677
 678	spin_lock(&zswap_pools_lock);
 679
 680	WARN_ON(pool == zswap_pool_current());
 681
 682	list_del_rcu(&pool->list);
 683	call_rcu(&pool->rcu_head, __zswap_pool_release);
 
 
 684
 685	spin_unlock(&zswap_pools_lock);
 686}
 687
 688static void zswap_pool_put(struct zswap_pool *pool)
 689{
 690	kref_put(&pool->kref, __zswap_pool_empty);
 691}
 692
 693/*********************************
 694* param callbacks
 695**********************************/
 696
 
 
 
 
 
 
 
 
 697/* val must be a null-terminated string */
 698static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 699			     char *type, char *compressor)
 700{
 701	struct zswap_pool *pool, *put_pool = NULL;
 702	char *s = strstrip((char *)val);
 703	int ret;
 
 704
 705	/* no change required */
 706	if (!strcmp(s, *(char **)kp->arg))
 707		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 708
 709	/* if this is load-time (pre-init) param setting,
 710	 * don't create a pool; that's done during init.
 711	 */
 712	if (!zswap_init_started)
 713		return param_set_charp(s, kp);
 714
 715	if (!type) {
 716		if (!zpool_has_pool(s)) {
 717			pr_err("zpool %s not available\n", s);
 718			return -ENOENT;
 719		}
 720		type = s;
 721	} else if (!compressor) {
 722		if (!crypto_has_comp(s, 0, 0)) {
 723			pr_err("compressor %s not available\n", s);
 724			return -ENOENT;
 725		}
 726		compressor = s;
 727	} else {
 728		WARN_ON(1);
 729		return -EINVAL;
 730	}
 731
 732	spin_lock(&zswap_pools_lock);
 733
 734	pool = zswap_pool_find_get(type, compressor);
 735	if (pool) {
 736		zswap_pool_debug("using existing", pool);
 
 737		list_del_rcu(&pool->list);
 738	} else {
 739		spin_unlock(&zswap_pools_lock);
 
 
 
 740		pool = zswap_pool_create(type, compressor);
 741		spin_lock(&zswap_pools_lock);
 742	}
 743
 744	if (pool)
 745		ret = param_set_charp(s, kp);
 746	else
 747		ret = -EINVAL;
 748
 
 
 749	if (!ret) {
 750		put_pool = zswap_pool_current();
 751		list_add_rcu(&pool->list, &zswap_pools);
 
 752	} else if (pool) {
 753		/* add the possibly pre-existing pool to the end of the pools
 754		 * list; if it's new (and empty) then it'll be removed and
 755		 * destroyed by the put after we drop the lock
 756		 */
 757		list_add_tail_rcu(&pool->list, &zswap_pools);
 758		put_pool = pool;
 759	}
 760
 761	spin_unlock(&zswap_pools_lock);
 762
 
 
 
 
 
 
 
 
 
 
 
 763	/* drop the ref from either the old current pool,
 764	 * or the new pool we failed to add
 765	 */
 766	if (put_pool)
 767		zswap_pool_put(put_pool);
 768
 769	return ret;
 770}
 771
 772static int zswap_compressor_param_set(const char *val,
 773				      const struct kernel_param *kp)
 774{
 775	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
 776}
 777
 778static int zswap_zpool_param_set(const char *val,
 779				 const struct kernel_param *kp)
 780{
 781	return __zswap_param_set(val, kp, NULL, zswap_compressor);
 782}
 783
 784/*********************************
 785* writeback code
 786**********************************/
 787/* return enum for zswap_get_swap_cache_page */
 788enum zswap_get_swap_ret {
 789	ZSWAP_SWAPCACHE_NEW,
 790	ZSWAP_SWAPCACHE_EXIST,
 791	ZSWAP_SWAPCACHE_FAIL,
 792};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793
 794/*
 795 * zswap_get_swap_cache_page
 796 *
 797 * This is an adaption of read_swap_cache_async()
 798 *
 799 * This function tries to find a page with the given swap entry
 800 * in the swapper_space address space (the swap cache).  If the page
 801 * is found, it is returned in retpage.  Otherwise, a page is allocated,
 802 * added to the swap cache, and returned in retpage.
 803 *
 804 * If success, the swap cache page is returned in retpage
 805 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
 806 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
 807 *     the new page is added to swapcache and locked
 808 * Returns ZSWAP_SWAPCACHE_FAIL on error
 809 */
 810static int zswap_get_swap_cache_page(swp_entry_t entry,
 811				struct page **retpage)
 812{
 813	bool page_was_allocated;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 814
 815	*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
 816			NULL, 0, &page_was_allocated);
 817	if (page_was_allocated)
 818		return ZSWAP_SWAPCACHE_NEW;
 819	if (!*retpage)
 820		return ZSWAP_SWAPCACHE_FAIL;
 821	return ZSWAP_SWAPCACHE_EXIST;
 822}
 823
 
 
 
 824/*
 825 * Attempts to free an entry by adding a page to the swap cache,
 826 * decompressing the entry data into the page, and issuing a
 827 * bio write to write the page back to the swap device.
 828 *
 829 * This can be thought of as a "resumed writeback" of the page
 830 * to the swap device.  We are basically resuming the same swap
 831 * writeback path that was intercepted with the frontswap_store()
 832 * in the first place.  After the page has been decompressed into
 833 * the swap cache, the compressed version stored by zswap can be
 834 * freed.
 835 */
 836static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
 
 837{
 838	struct zswap_header *zhdr;
 839	swp_entry_t swpentry;
 840	struct zswap_tree *tree;
 841	pgoff_t offset;
 842	struct zswap_entry *entry;
 843	struct page *page;
 844	struct crypto_comp *tfm;
 845	u8 *src, *dst;
 846	unsigned int dlen;
 847	int ret;
 848	struct writeback_control wbc = {
 849		.sync_mode = WB_SYNC_NONE,
 850	};
 851
 852	/* extract swpentry from data */
 853	zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
 854	swpentry = zhdr->swpentry; /* here */
 855	zpool_unmap_handle(pool, handle);
 856	tree = zswap_trees[swp_type(swpentry)];
 857	offset = swp_offset(swpentry);
 858
 859	/* find and ref zswap entry */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 860	spin_lock(&tree->lock);
 861	entry = zswap_entry_find_get(&tree->rbroot, offset);
 862	if (!entry) {
 863		/* entry was invalidated */
 864		spin_unlock(&tree->lock);
 865		return 0;
 
 
 
 866	}
 867	spin_unlock(&tree->lock);
 868	BUG_ON(offset != entry->offset);
 869
 870	/* try to allocate swap cache page */
 871	switch (zswap_get_swap_cache_page(swpentry, &page)) {
 872	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
 873		ret = -ENOMEM;
 874		goto fail;
 875
 876	case ZSWAP_SWAPCACHE_EXIST:
 877		/* page is already in the swap cache, ignore for now */
 878		put_page(page);
 879		ret = -EEXIST;
 880		goto fail;
 881
 882	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
 883		/* decompress */
 884		dlen = PAGE_SIZE;
 885		src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
 886				ZPOOL_MM_RO) + sizeof(struct zswap_header);
 887		dst = kmap_atomic(page);
 888		tfm = *get_cpu_ptr(entry->pool->tfm);
 889		ret = crypto_comp_decompress(tfm, src, entry->length,
 890					     dst, &dlen);
 891		put_cpu_ptr(entry->pool->tfm);
 892		kunmap_atomic(dst);
 893		zpool_unmap_handle(entry->pool->zpool, entry->handle);
 894		BUG_ON(ret);
 895		BUG_ON(dlen != PAGE_SIZE);
 896
 897		/* page is up to date */
 898		SetPageUptodate(page);
 899	}
 900
 901	/* move it to the tail of the inactive list after end_writeback */
 902	SetPageReclaim(page);
 903
 904	/* start writeback */
 905	__swap_writepage(page, &wbc, end_swap_bio_write);
 906	put_page(page);
 907	zswap_written_back_pages++;
 
 
 
 
 
 
 
 
 908
 909	spin_lock(&tree->lock);
 910	/* drop local reference */
 911	zswap_entry_put(tree, entry);
 912
 913	/*
 914	* There are two possible situations for entry here:
 915	* (1) refcount is 1(normal case),  entry is valid and on the tree
 916	* (2) refcount is 0, entry is freed and not on the tree
 917	*     because invalidate happened during writeback
 918	*  search the tree and free the entry if find entry
 919	*/
 920	if (entry == zswap_rb_search(&tree->rbroot, offset))
 921		zswap_entry_put(tree, entry);
 922	spin_unlock(&tree->lock);
 923
 924	goto end;
 
 
 
 925
 926	/*
 927	* if we get here due to ZSWAP_SWAPCACHE_EXIST
 928	* a load may happening concurrently
 929	* it is safe and okay to not free the entry
 930	* if we free the entry in the following put
 931	* it it either okay to return !0
 932	*/
 933fail:
 934	spin_lock(&tree->lock);
 935	zswap_entry_put(tree, entry);
 936	spin_unlock(&tree->lock);
 937
 938end:
 939	return ret;
 940}
 941
 942static int zswap_shrink(void)
 943{
 944	struct zswap_pool *pool;
 945	int ret;
 946
 947	pool = zswap_pool_last_get();
 948	if (!pool)
 949		return -ENOENT;
 950
 951	ret = zpool_shrink(pool->zpool, 1, NULL);
 952
 953	zswap_pool_put(pool);
 954
 955	return ret;
 956}
 957
 958/*********************************
 959* frontswap hooks
 960**********************************/
 961/* attempts to compress and store an single page */
 962static int zswap_frontswap_store(unsigned type, pgoff_t offset,
 963				struct page *page)
 964{
 
 
 
 
 965	struct zswap_tree *tree = zswap_trees[type];
 966	struct zswap_entry *entry, *dupentry;
 967	struct crypto_comp *tfm;
 968	int ret;
 969	unsigned int dlen = PAGE_SIZE, len;
 970	unsigned long handle;
 
 
 
 
 971	char *buf;
 972	u8 *src, *dst;
 973	struct zswap_header *zhdr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974
 975	if (!zswap_enabled || !tree) {
 976		ret = -ENODEV;
 977		goto reject;
 
 
 
 
 
 978	}
 979
 980	/* reclaim space if needed */
 981	if (zswap_is_full()) {
 982		zswap_pool_limit_hit++;
 983		if (zswap_shrink()) {
 984			zswap_reject_reclaim_fail++;
 985			ret = -ENOMEM;
 986			goto reject;
 987		}
 
 
 
 
 988	}
 989
 990	/* allocate entry */
 991	entry = zswap_entry_cache_alloc(GFP_KERNEL);
 992	if (!entry) {
 993		zswap_reject_kmemcache_fail++;
 994		ret = -ENOMEM;
 995		goto reject;
 996	}
 997
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 998	/* if entry is successfully added, it keeps the reference */
 999	entry->pool = zswap_pool_current_get();
1000	if (!entry->pool) {
1001		ret = -EINVAL;
1002		goto freepage;
 
 
 
 
 
 
 
 
1003	}
1004
1005	/* compress */
1006	dst = get_cpu_var(zswap_dstmem);
1007	tfm = *get_cpu_ptr(entry->pool->tfm);
1008	src = kmap_atomic(page);
1009	ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1010	kunmap_atomic(src);
1011	put_cpu_ptr(entry->pool->tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1012	if (ret) {
1013		ret = -EINVAL;
1014		goto put_dstmem;
1015	}
1016
1017	/* store */
1018	len = dlen + sizeof(struct zswap_header);
1019	ret = zpool_malloc(entry->pool->zpool, len,
1020			   __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
1021			   &handle);
 
1022	if (ret == -ENOSPC) {
1023		zswap_reject_compress_poor++;
1024		goto put_dstmem;
1025	}
1026	if (ret) {
1027		zswap_reject_alloc_fail++;
1028		goto put_dstmem;
1029	}
1030	zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1031	zhdr->swpentry = swp_entry(type, offset);
1032	buf = (u8 *)(zhdr + 1);
1033	memcpy(buf, dst, dlen);
1034	zpool_unmap_handle(entry->pool->zpool, handle);
1035	put_cpu_var(zswap_dstmem);
1036
1037	/* populate entry */
1038	entry->offset = offset;
1039	entry->handle = handle;
1040	entry->length = dlen;
1041
 
 
 
 
 
 
 
 
1042	/* map */
1043	spin_lock(&tree->lock);
1044	do {
1045		ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1046		if (ret == -EEXIST) {
1047			zswap_duplicate_entry++;
1048			/* remove from rbtree */
1049			zswap_rb_erase(&tree->rbroot, dupentry);
1050			zswap_entry_put(tree, dupentry);
1051		}
1052	} while (ret == -EEXIST);
 
 
 
 
 
 
 
1053	spin_unlock(&tree->lock);
1054
1055	/* update stats */
1056	atomic_inc(&zswap_stored_pages);
1057	zswap_update_total_size();
 
1058
1059	return 0;
1060
1061put_dstmem:
1062	put_cpu_var(zswap_dstmem);
 
1063	zswap_pool_put(entry->pool);
1064freepage:
1065	zswap_entry_cache_free(entry);
1066reject:
1067	return ret;
 
 
 
 
 
 
 
 
1068}
1069
1070/*
1071 * returns 0 if the page was successfully decompressed
1072 * return -1 on entry not found or error
1073*/
1074static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1075				struct page *page)
1076{
 
 
 
 
1077	struct zswap_tree *tree = zswap_trees[type];
1078	struct zswap_entry *entry;
1079	struct crypto_comp *tfm;
1080	u8 *src, *dst;
1081	unsigned int dlen;
1082	int ret;
1083
1084	/* find */
1085	spin_lock(&tree->lock);
1086	entry = zswap_entry_find_get(&tree->rbroot, offset);
1087	if (!entry) {
1088		/* entry was written back */
1089		spin_unlock(&tree->lock);
1090		return -1;
1091	}
1092	spin_unlock(&tree->lock);
1093
1094	/* decompress */
1095	dlen = PAGE_SIZE;
1096	src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
1097			ZPOOL_MM_RO) + sizeof(struct zswap_header);
1098	dst = kmap_atomic(page);
1099	tfm = *get_cpu_ptr(entry->pool->tfm);
1100	ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1101	put_cpu_ptr(entry->pool->tfm);
1102	kunmap_atomic(dst);
1103	zpool_unmap_handle(entry->pool->zpool, entry->handle);
1104	BUG_ON(ret);
1105
1106	spin_lock(&tree->lock);
 
 
 
 
 
 
 
1107	zswap_entry_put(tree, entry);
1108	spin_unlock(&tree->lock);
1109
1110	return 0;
1111}
1112
1113/* frees an entry in zswap */
1114static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1115{
1116	struct zswap_tree *tree = zswap_trees[type];
1117	struct zswap_entry *entry;
1118
1119	/* find */
1120	spin_lock(&tree->lock);
1121	entry = zswap_rb_search(&tree->rbroot, offset);
1122	if (!entry) {
1123		/* entry was written back */
1124		spin_unlock(&tree->lock);
1125		return;
1126	}
 
 
 
1127
1128	/* remove from rbtree */
1129	zswap_rb_erase(&tree->rbroot, entry);
 
1130
1131	/* drop the initial reference from entry creation */
1132	zswap_entry_put(tree, entry);
 
 
 
1133
1134	spin_unlock(&tree->lock);
 
 
1135}
1136
1137/* frees all zswap entries for the given swap type */
1138static void zswap_frontswap_invalidate_area(unsigned type)
1139{
1140	struct zswap_tree *tree = zswap_trees[type];
1141	struct zswap_entry *entry, *n;
1142
1143	if (!tree)
1144		return;
1145
1146	/* walk the tree and free everything */
1147	spin_lock(&tree->lock);
1148	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1149		zswap_free_entry(entry);
1150	tree->rbroot = RB_ROOT;
1151	spin_unlock(&tree->lock);
1152	kfree(tree);
1153	zswap_trees[type] = NULL;
1154}
1155
1156static void zswap_frontswap_init(unsigned type)
1157{
1158	struct zswap_tree *tree;
1159
1160	tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
1161	if (!tree) {
1162		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1163		return;
1164	}
1165
1166	tree->rbroot = RB_ROOT;
1167	spin_lock_init(&tree->lock);
1168	zswap_trees[type] = tree;
1169}
1170
1171static struct frontswap_ops zswap_frontswap_ops = {
1172	.store = zswap_frontswap_store,
1173	.load = zswap_frontswap_load,
1174	.invalidate_page = zswap_frontswap_invalidate_page,
1175	.invalidate_area = zswap_frontswap_invalidate_area,
1176	.init = zswap_frontswap_init
1177};
1178
1179/*********************************
1180* debugfs functions
1181**********************************/
1182#ifdef CONFIG_DEBUG_FS
1183#include <linux/debugfs.h>
1184
1185static struct dentry *zswap_debugfs_root;
1186
1187static int __init zswap_debugfs_init(void)
1188{
1189	if (!debugfs_initialized())
1190		return -ENODEV;
1191
1192	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1193	if (!zswap_debugfs_root)
1194		return -ENOMEM;
1195
1196	debugfs_create_u64("pool_limit_hit", S_IRUGO,
1197			zswap_debugfs_root, &zswap_pool_limit_hit);
1198	debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
1199			zswap_debugfs_root, &zswap_reject_reclaim_fail);
1200	debugfs_create_u64("reject_alloc_fail", S_IRUGO,
1201			zswap_debugfs_root, &zswap_reject_alloc_fail);
1202	debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
1203			zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1204	debugfs_create_u64("reject_compress_poor", S_IRUGO,
1205			zswap_debugfs_root, &zswap_reject_compress_poor);
1206	debugfs_create_u64("written_back_pages", S_IRUGO,
1207			zswap_debugfs_root, &zswap_written_back_pages);
1208	debugfs_create_u64("duplicate_entry", S_IRUGO,
1209			zswap_debugfs_root, &zswap_duplicate_entry);
1210	debugfs_create_u64("pool_total_size", S_IRUGO,
1211			zswap_debugfs_root, &zswap_pool_total_size);
1212	debugfs_create_atomic_t("stored_pages", S_IRUGO,
1213			zswap_debugfs_root, &zswap_stored_pages);
 
 
 
 
1214
1215	return 0;
1216}
1217
1218static void __exit zswap_debugfs_exit(void)
1219{
1220	debugfs_remove_recursive(zswap_debugfs_root);
1221}
1222#else
1223static int __init zswap_debugfs_init(void)
1224{
1225	return 0;
1226}
1227
1228static void __exit zswap_debugfs_exit(void) { }
1229#endif
1230
1231/*********************************
1232* module init and exit
1233**********************************/
1234static int __init init_zswap(void)
1235{
1236	struct zswap_pool *pool;
 
1237
1238	zswap_init_started = true;
1239
1240	if (zswap_entry_cache_create()) {
1241		pr_err("entry cache creation failed\n");
1242		goto cache_fail;
1243	}
1244
1245	if (zswap_cpu_dstmem_init()) {
1246		pr_err("dstmem alloc failed\n");
1247		goto dstmem_fail;
1248	}
 
 
1249
1250	pool = __zswap_pool_create_fallback();
1251	if (!pool) {
 
 
 
 
 
1252		pr_err("pool creation failed\n");
1253		goto pool_fail;
1254	}
1255	pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1256		zpool_get_type(pool->zpool));
1257
1258	list_add(&pool->list, &zswap_pools);
 
 
1259
1260	frontswap_register_ops(&zswap_frontswap_ops);
1261	if (zswap_debugfs_init())
1262		pr_warn("debugfs initialization failed\n");
 
1263	return 0;
1264
1265pool_fail:
1266	zswap_cpu_dstmem_destroy();
1267dstmem_fail:
1268	zswap_entry_cache_destroy();
 
1269cache_fail:
 
 
 
1270	return -ENOMEM;
1271}
 
 
 
 
 
 
 
1272/* must be late so crypto has time to come up */
1273late_initcall(init_zswap);
1274
1275MODULE_LICENSE("GPL");
1276MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1277MODULE_DESCRIPTION("Compressed cache for swap pages");