Linux Audio

Check our new training course

Loading...
   1/*
   2 * zcache.c
   3 *
   4 * Copyright (c) 2010,2011, Dan Magenheimer, Oracle Corp.
   5 * Copyright (c) 2010,2011, Nitin Gupta
   6 *
   7 * Zcache provides an in-kernel "host implementation" for transcendent memory
   8 * and, thus indirectly, for cleancache and frontswap.  Zcache includes two
   9 * page-accessible memory [1] interfaces, both utilizing the crypto compression
  10 * API:
  11 * 1) "compression buddies" ("zbud") is used for ephemeral pages
  12 * 2) zsmalloc is used for persistent pages.
  13 * Xvmalloc (based on the TLSF allocator) has very low fragmentation
  14 * so maximizes space efficiency, while zbud allows pairs (and potentially,
  15 * in the future, more than a pair of) compressed pages to be closely linked
  16 * so that reclaiming can be done via the kernel's physical-page-oriented
  17 * "shrinker" interface.
  18 *
  19 * [1] For a definition of page-accessible memory (aka PAM), see:
  20 *   http://marc.info/?l=linux-mm&m=127811271605009
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/cpu.h>
  25#include <linux/highmem.h>
  26#include <linux/list.h>
  27#include <linux/slab.h>
  28#include <linux/spinlock.h>
  29#include <linux/types.h>
  30#include <linux/atomic.h>
  31#include <linux/math64.h>
  32#include <linux/crypto.h>
  33#include <linux/string.h>
  34#include "tmem.h"
  35
  36#include "../zsmalloc/zsmalloc.h"
  37
  38#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
  39#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
  40#endif
  41#ifdef CONFIG_CLEANCACHE
  42#include <linux/cleancache.h>
  43#endif
  44#ifdef CONFIG_FRONTSWAP
  45#include <linux/frontswap.h>
  46#endif
  47
  48#if 0
  49/* this is more aggressive but may cause other problems? */
  50#define ZCACHE_GFP_MASK	(GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
  51#else
  52#define ZCACHE_GFP_MASK \
  53	(__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
  54#endif
  55
  56#define MAX_POOLS_PER_CLIENT 16
  57
  58#define MAX_CLIENTS 16
  59#define LOCAL_CLIENT ((uint16_t)-1)
  60
  61MODULE_LICENSE("GPL");
  62
  63struct zcache_client {
  64	struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
  65	struct zs_pool *zspool;
  66	bool allocated;
  67	atomic_t refcount;
  68};
  69
  70static struct zcache_client zcache_host;
  71static struct zcache_client zcache_clients[MAX_CLIENTS];
  72
  73static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
  74{
  75	BUG_ON(cli == NULL);
  76	if (cli == &zcache_host)
  77		return LOCAL_CLIENT;
  78	return cli - &zcache_clients[0];
  79}
  80
  81static inline bool is_local_client(struct zcache_client *cli)
  82{
  83	return cli == &zcache_host;
  84}
  85
  86/* crypto API for zcache  */
  87#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
  88static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
  89static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
  90
  91enum comp_op {
  92	ZCACHE_COMPOP_COMPRESS,
  93	ZCACHE_COMPOP_DECOMPRESS
  94};
  95
  96static inline int zcache_comp_op(enum comp_op op,
  97				const u8 *src, unsigned int slen,
  98				u8 *dst, unsigned int *dlen)
  99{
 100	struct crypto_comp *tfm;
 101	int ret;
 102
 103	BUG_ON(!zcache_comp_pcpu_tfms);
 104	tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
 105	BUG_ON(!tfm);
 106	switch (op) {
 107	case ZCACHE_COMPOP_COMPRESS:
 108		ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
 109		break;
 110	case ZCACHE_COMPOP_DECOMPRESS:
 111		ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
 112		break;
 113	}
 114	put_cpu();
 115	return ret;
 116}
 117
 118/**********
 119 * Compression buddies ("zbud") provides for packing two (or, possibly
 120 * in the future, more) compressed ephemeral pages into a single "raw"
 121 * (physical) page and tracking them with data structures so that
 122 * the raw pages can be easily reclaimed.
 123 *
 124 * A zbud page ("zbpg") is an aligned page containing a list_head,
 125 * a lock, and two "zbud headers".  The remainder of the physical
 126 * page is divided up into aligned 64-byte "chunks" which contain
 127 * the compressed data for zero, one, or two zbuds.  Each zbpg
 128 * resides on: (1) an "unused list" if it has no zbuds; (2) a
 129 * "buddied" list if it is fully populated  with two zbuds; or
 130 * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
 131 * the one unbuddied zbud uses.  The data inside a zbpg cannot be
 132 * read or written unless the zbpg's lock is held.
 133 */
 134
 135#define ZBH_SENTINEL  0x43214321
 136#define ZBPG_SENTINEL  0xdeadbeef
 137
 138#define ZBUD_MAX_BUDS 2
 139
 140struct zbud_hdr {
 141	uint16_t client_id;
 142	uint16_t pool_id;
 143	struct tmem_oid oid;
 144	uint32_t index;
 145	uint16_t size; /* compressed size in bytes, zero means unused */
 146	DECL_SENTINEL
 147};
 148
 149struct zbud_page {
 150	struct list_head bud_list;
 151	spinlock_t lock;
 152	struct zbud_hdr buddy[ZBUD_MAX_BUDS];
 153	DECL_SENTINEL
 154	/* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
 155};
 156
 157#define CHUNK_SHIFT	6
 158#define CHUNK_SIZE	(1 << CHUNK_SHIFT)
 159#define CHUNK_MASK	(~(CHUNK_SIZE-1))
 160#define NCHUNKS		(((PAGE_SIZE - sizeof(struct zbud_page)) & \
 161				CHUNK_MASK) >> CHUNK_SHIFT)
 162#define MAX_CHUNK	(NCHUNKS-1)
 163
 164static struct {
 165	struct list_head list;
 166	unsigned count;
 167} zbud_unbuddied[NCHUNKS];
 168/* list N contains pages with N chunks USED and NCHUNKS-N unused */
 169/* element 0 is never used but optimizing that isn't worth it */
 170static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
 171
 172struct list_head zbud_buddied_list;
 173static unsigned long zcache_zbud_buddied_count;
 174
 175/* protects the buddied list and all unbuddied lists */
 176static DEFINE_SPINLOCK(zbud_budlists_spinlock);
 177
 178static LIST_HEAD(zbpg_unused_list);
 179static unsigned long zcache_zbpg_unused_list_count;
 180
 181/* protects the unused page list */
 182static DEFINE_SPINLOCK(zbpg_unused_list_spinlock);
 183
 184static atomic_t zcache_zbud_curr_raw_pages;
 185static atomic_t zcache_zbud_curr_zpages;
 186static unsigned long zcache_zbud_curr_zbytes;
 187static unsigned long zcache_zbud_cumul_zpages;
 188static unsigned long zcache_zbud_cumul_zbytes;
 189static unsigned long zcache_compress_poor;
 190static unsigned long zcache_mean_compress_poor;
 191
 192/* forward references */
 193static void *zcache_get_free_page(void);
 194static void zcache_free_page(void *p);
 195
 196/*
 197 * zbud helper functions
 198 */
 199
 200static inline unsigned zbud_max_buddy_size(void)
 201{
 202	return MAX_CHUNK << CHUNK_SHIFT;
 203}
 204
 205static inline unsigned zbud_size_to_chunks(unsigned size)
 206{
 207	BUG_ON(size == 0 || size > zbud_max_buddy_size());
 208	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
 209}
 210
 211static inline int zbud_budnum(struct zbud_hdr *zh)
 212{
 213	unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
 214	struct zbud_page *zbpg = NULL;
 215	unsigned budnum = -1U;
 216	int i;
 217
 218	for (i = 0; i < ZBUD_MAX_BUDS; i++)
 219		if (offset == offsetof(typeof(*zbpg), buddy[i])) {
 220			budnum = i;
 221			break;
 222		}
 223	BUG_ON(budnum == -1U);
 224	return budnum;
 225}
 226
 227static char *zbud_data(struct zbud_hdr *zh, unsigned size)
 228{
 229	struct zbud_page *zbpg;
 230	char *p;
 231	unsigned budnum;
 232
 233	ASSERT_SENTINEL(zh, ZBH);
 234	budnum = zbud_budnum(zh);
 235	BUG_ON(size == 0 || size > zbud_max_buddy_size());
 236	zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
 237	ASSERT_SPINLOCK(&zbpg->lock);
 238	p = (char *)zbpg;
 239	if (budnum == 0)
 240		p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
 241							CHUNK_MASK);
 242	else if (budnum == 1)
 243		p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
 244	return p;
 245}
 246
 247/*
 248 * zbud raw page management
 249 */
 250
 251static struct zbud_page *zbud_alloc_raw_page(void)
 252{
 253	struct zbud_page *zbpg = NULL;
 254	struct zbud_hdr *zh0, *zh1;
 255	bool recycled = 0;
 256
 257	/* if any pages on the zbpg list, use one */
 258	spin_lock(&zbpg_unused_list_spinlock);
 259	if (!list_empty(&zbpg_unused_list)) {
 260		zbpg = list_first_entry(&zbpg_unused_list,
 261				struct zbud_page, bud_list);
 262		list_del_init(&zbpg->bud_list);
 263		zcache_zbpg_unused_list_count--;
 264		recycled = 1;
 265	}
 266	spin_unlock(&zbpg_unused_list_spinlock);
 267	if (zbpg == NULL)
 268		/* none on zbpg list, try to get a kernel page */
 269		zbpg = zcache_get_free_page();
 270	if (likely(zbpg != NULL)) {
 271		INIT_LIST_HEAD(&zbpg->bud_list);
 272		zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
 273		spin_lock_init(&zbpg->lock);
 274		if (recycled) {
 275			ASSERT_INVERTED_SENTINEL(zbpg, ZBPG);
 276			SET_SENTINEL(zbpg, ZBPG);
 277			BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
 278			BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
 279		} else {
 280			atomic_inc(&zcache_zbud_curr_raw_pages);
 281			INIT_LIST_HEAD(&zbpg->bud_list);
 282			SET_SENTINEL(zbpg, ZBPG);
 283			zh0->size = 0; zh1->size = 0;
 284			tmem_oid_set_invalid(&zh0->oid);
 285			tmem_oid_set_invalid(&zh1->oid);
 286		}
 287	}
 288	return zbpg;
 289}
 290
 291static void zbud_free_raw_page(struct zbud_page *zbpg)
 292{
 293	struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
 294
 295	ASSERT_SENTINEL(zbpg, ZBPG);
 296	BUG_ON(!list_empty(&zbpg->bud_list));
 297	ASSERT_SPINLOCK(&zbpg->lock);
 298	BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
 299	BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
 300	INVERT_SENTINEL(zbpg, ZBPG);
 301	spin_unlock(&zbpg->lock);
 302	spin_lock(&zbpg_unused_list_spinlock);
 303	list_add(&zbpg->bud_list, &zbpg_unused_list);
 304	zcache_zbpg_unused_list_count++;
 305	spin_unlock(&zbpg_unused_list_spinlock);
 306}
 307
 308/*
 309 * core zbud handling routines
 310 */
 311
 312static unsigned zbud_free(struct zbud_hdr *zh)
 313{
 314	unsigned size;
 315
 316	ASSERT_SENTINEL(zh, ZBH);
 317	BUG_ON(!tmem_oid_valid(&zh->oid));
 318	size = zh->size;
 319	BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
 320	zh->size = 0;
 321	tmem_oid_set_invalid(&zh->oid);
 322	INVERT_SENTINEL(zh, ZBH);
 323	zcache_zbud_curr_zbytes -= size;
 324	atomic_dec(&zcache_zbud_curr_zpages);
 325	return size;
 326}
 327
 328static void zbud_free_and_delist(struct zbud_hdr *zh)
 329{
 330	unsigned chunks;
 331	struct zbud_hdr *zh_other;
 332	unsigned budnum = zbud_budnum(zh), size;
 333	struct zbud_page *zbpg =
 334		container_of(zh, struct zbud_page, buddy[budnum]);
 335
 336	spin_lock(&zbud_budlists_spinlock);
 337	spin_lock(&zbpg->lock);
 338	if (list_empty(&zbpg->bud_list)) {
 339		/* ignore zombie page... see zbud_evict_pages() */
 340		spin_unlock(&zbpg->lock);
 341		spin_unlock(&zbud_budlists_spinlock);
 342		return;
 343	}
 344	size = zbud_free(zh);
 345	ASSERT_SPINLOCK(&zbpg->lock);
 346	zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
 347	if (zh_other->size == 0) { /* was unbuddied: unlist and free */
 348		chunks = zbud_size_to_chunks(size) ;
 349		BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
 350		list_del_init(&zbpg->bud_list);
 351		zbud_unbuddied[chunks].count--;
 352		spin_unlock(&zbud_budlists_spinlock);
 353		zbud_free_raw_page(zbpg);
 354	} else { /* was buddied: move remaining buddy to unbuddied list */
 355		chunks = zbud_size_to_chunks(zh_other->size) ;
 356		list_del_init(&zbpg->bud_list);
 357		zcache_zbud_buddied_count--;
 358		list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
 359		zbud_unbuddied[chunks].count++;
 360		spin_unlock(&zbud_budlists_spinlock);
 361		spin_unlock(&zbpg->lock);
 362	}
 363}
 364
 365static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
 366					struct tmem_oid *oid,
 367					uint32_t index, struct page *page,
 368					void *cdata, unsigned size)
 369{
 370	struct zbud_hdr *zh0, *zh1, *zh = NULL;
 371	struct zbud_page *zbpg = NULL, *ztmp;
 372	unsigned nchunks;
 373	char *to;
 374	int i, found_good_buddy = 0;
 375
 376	nchunks = zbud_size_to_chunks(size) ;
 377	for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
 378		spin_lock(&zbud_budlists_spinlock);
 379		if (!list_empty(&zbud_unbuddied[i].list)) {
 380			list_for_each_entry_safe(zbpg, ztmp,
 381				    &zbud_unbuddied[i].list, bud_list) {
 382				if (spin_trylock(&zbpg->lock)) {
 383					found_good_buddy = i;
 384					goto found_unbuddied;
 385				}
 386			}
 387		}
 388		spin_unlock(&zbud_budlists_spinlock);
 389	}
 390	/* didn't find a good buddy, try allocating a new page */
 391	zbpg = zbud_alloc_raw_page();
 392	if (unlikely(zbpg == NULL))
 393		goto out;
 394	/* ok, have a page, now compress the data before taking locks */
 395	spin_lock(&zbud_budlists_spinlock);
 396	spin_lock(&zbpg->lock);
 397	list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
 398	zbud_unbuddied[nchunks].count++;
 399	zh = &zbpg->buddy[0];
 400	goto init_zh;
 401
 402found_unbuddied:
 403	ASSERT_SPINLOCK(&zbpg->lock);
 404	zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
 405	BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
 406	if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
 407		ASSERT_SENTINEL(zh0, ZBH);
 408		zh = zh1;
 409	} else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
 410		ASSERT_SENTINEL(zh1, ZBH);
 411		zh = zh0;
 412	} else
 413		BUG();
 414	list_del_init(&zbpg->bud_list);
 415	zbud_unbuddied[found_good_buddy].count--;
 416	list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
 417	zcache_zbud_buddied_count++;
 418
 419init_zh:
 420	SET_SENTINEL(zh, ZBH);
 421	zh->size = size;
 422	zh->index = index;
 423	zh->oid = *oid;
 424	zh->pool_id = pool_id;
 425	zh->client_id = client_id;
 426	to = zbud_data(zh, size);
 427	memcpy(to, cdata, size);
 428	spin_unlock(&zbpg->lock);
 429	spin_unlock(&zbud_budlists_spinlock);
 430
 431	zbud_cumul_chunk_counts[nchunks]++;
 432	atomic_inc(&zcache_zbud_curr_zpages);
 433	zcache_zbud_cumul_zpages++;
 434	zcache_zbud_curr_zbytes += size;
 435	zcache_zbud_cumul_zbytes += size;
 436out:
 437	return zh;
 438}
 439
 440static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
 441{
 442	struct zbud_page *zbpg;
 443	unsigned budnum = zbud_budnum(zh);
 444	unsigned int out_len = PAGE_SIZE;
 445	char *to_va, *from_va;
 446	unsigned size;
 447	int ret = 0;
 448
 449	zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
 450	spin_lock(&zbpg->lock);
 451	if (list_empty(&zbpg->bud_list)) {
 452		/* ignore zombie page... see zbud_evict_pages() */
 453		ret = -EINVAL;
 454		goto out;
 455	}
 456	ASSERT_SENTINEL(zh, ZBH);
 457	BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
 458	to_va = kmap_atomic(page);
 459	size = zh->size;
 460	from_va = zbud_data(zh, size);
 461	ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
 462				to_va, &out_len);
 463	BUG_ON(ret);
 464	BUG_ON(out_len != PAGE_SIZE);
 465	kunmap_atomic(to_va);
 466out:
 467	spin_unlock(&zbpg->lock);
 468	return ret;
 469}
 470
 471/*
 472 * The following routines handle shrinking of ephemeral pages by evicting
 473 * pages "least valuable" first.
 474 */
 475
 476static unsigned long zcache_evicted_raw_pages;
 477static unsigned long zcache_evicted_buddied_pages;
 478static unsigned long zcache_evicted_unbuddied_pages;
 479
 480static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
 481						uint16_t poolid);
 482static void zcache_put_pool(struct tmem_pool *pool);
 483
 484/*
 485 * Flush and free all zbuds in a zbpg, then free the pageframe
 486 */
 487static void zbud_evict_zbpg(struct zbud_page *zbpg)
 488{
 489	struct zbud_hdr *zh;
 490	int i, j;
 491	uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
 492	uint32_t index[ZBUD_MAX_BUDS];
 493	struct tmem_oid oid[ZBUD_MAX_BUDS];
 494	struct tmem_pool *pool;
 495
 496	ASSERT_SPINLOCK(&zbpg->lock);
 497	BUG_ON(!list_empty(&zbpg->bud_list));
 498	for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
 499		zh = &zbpg->buddy[i];
 500		if (zh->size) {
 501			client_id[j] = zh->client_id;
 502			pool_id[j] = zh->pool_id;
 503			oid[j] = zh->oid;
 504			index[j] = zh->index;
 505			j++;
 506			zbud_free(zh);
 507		}
 508	}
 509	spin_unlock(&zbpg->lock);
 510	for (i = 0; i < j; i++) {
 511		pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
 512		if (pool != NULL) {
 513			tmem_flush_page(pool, &oid[i], index[i]);
 514			zcache_put_pool(pool);
 515		}
 516	}
 517	ASSERT_SENTINEL(zbpg, ZBPG);
 518	spin_lock(&zbpg->lock);
 519	zbud_free_raw_page(zbpg);
 520}
 521
 522/*
 523 * Free nr pages.  This code is funky because we want to hold the locks
 524 * protecting various lists for as short a time as possible, and in some
 525 * circumstances the list may change asynchronously when the list lock is
 526 * not held.  In some cases we also trylock not only to avoid waiting on a
 527 * page in use by another cpu, but also to avoid potential deadlock due to
 528 * lock inversion.
 529 */
 530static void zbud_evict_pages(int nr)
 531{
 532	struct zbud_page *zbpg;
 533	int i;
 534
 535	/* first try freeing any pages on unused list */
 536retry_unused_list:
 537	spin_lock_bh(&zbpg_unused_list_spinlock);
 538	if (!list_empty(&zbpg_unused_list)) {
 539		/* can't walk list here, since it may change when unlocked */
 540		zbpg = list_first_entry(&zbpg_unused_list,
 541				struct zbud_page, bud_list);
 542		list_del_init(&zbpg->bud_list);
 543		zcache_zbpg_unused_list_count--;
 544		atomic_dec(&zcache_zbud_curr_raw_pages);
 545		spin_unlock_bh(&zbpg_unused_list_spinlock);
 546		zcache_free_page(zbpg);
 547		zcache_evicted_raw_pages++;
 548		if (--nr <= 0)
 549			goto out;
 550		goto retry_unused_list;
 551	}
 552	spin_unlock_bh(&zbpg_unused_list_spinlock);
 553
 554	/* now try freeing unbuddied pages, starting with least space avail */
 555	for (i = 0; i < MAX_CHUNK; i++) {
 556retry_unbud_list_i:
 557		spin_lock_bh(&zbud_budlists_spinlock);
 558		if (list_empty(&zbud_unbuddied[i].list)) {
 559			spin_unlock_bh(&zbud_budlists_spinlock);
 560			continue;
 561		}
 562		list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
 563			if (unlikely(!spin_trylock(&zbpg->lock)))
 564				continue;
 565			list_del_init(&zbpg->bud_list);
 566			zbud_unbuddied[i].count--;
 567			spin_unlock(&zbud_budlists_spinlock);
 568			zcache_evicted_unbuddied_pages++;
 569			/* want budlists unlocked when doing zbpg eviction */
 570			zbud_evict_zbpg(zbpg);
 571			local_bh_enable();
 572			if (--nr <= 0)
 573				goto out;
 574			goto retry_unbud_list_i;
 575		}
 576		spin_unlock_bh(&zbud_budlists_spinlock);
 577	}
 578
 579	/* as a last resort, free buddied pages */
 580retry_bud_list:
 581	spin_lock_bh(&zbud_budlists_spinlock);
 582	if (list_empty(&zbud_buddied_list)) {
 583		spin_unlock_bh(&zbud_budlists_spinlock);
 584		goto out;
 585	}
 586	list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
 587		if (unlikely(!spin_trylock(&zbpg->lock)))
 588			continue;
 589		list_del_init(&zbpg->bud_list);
 590		zcache_zbud_buddied_count--;
 591		spin_unlock(&zbud_budlists_spinlock);
 592		zcache_evicted_buddied_pages++;
 593		/* want budlists unlocked when doing zbpg eviction */
 594		zbud_evict_zbpg(zbpg);
 595		local_bh_enable();
 596		if (--nr <= 0)
 597			goto out;
 598		goto retry_bud_list;
 599	}
 600	spin_unlock_bh(&zbud_budlists_spinlock);
 601out:
 602	return;
 603}
 604
 605static void zbud_init(void)
 606{
 607	int i;
 608
 609	INIT_LIST_HEAD(&zbud_buddied_list);
 610	zcache_zbud_buddied_count = 0;
 611	for (i = 0; i < NCHUNKS; i++) {
 612		INIT_LIST_HEAD(&zbud_unbuddied[i].list);
 613		zbud_unbuddied[i].count = 0;
 614	}
 615}
 616
 617#ifdef CONFIG_SYSFS
 618/*
 619 * These sysfs routines show a nice distribution of how many zbpg's are
 620 * currently (and have ever been placed) in each unbuddied list.  It's fun
 621 * to watch but can probably go away before final merge.
 622 */
 623static int zbud_show_unbuddied_list_counts(char *buf)
 624{
 625	int i;
 626	char *p = buf;
 627
 628	for (i = 0; i < NCHUNKS; i++)
 629		p += sprintf(p, "%u ", zbud_unbuddied[i].count);
 630	return p - buf;
 631}
 632
 633static int zbud_show_cumul_chunk_counts(char *buf)
 634{
 635	unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
 636	unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
 637	unsigned long total_chunks_lte_42 = 0;
 638	char *p = buf;
 639
 640	for (i = 0; i < NCHUNKS; i++) {
 641		p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
 642		chunks += zbud_cumul_chunk_counts[i];
 643		total_chunks += zbud_cumul_chunk_counts[i];
 644		sum_total_chunks += i * zbud_cumul_chunk_counts[i];
 645		if (i == 21)
 646			total_chunks_lte_21 = total_chunks;
 647		if (i == 32)
 648			total_chunks_lte_32 = total_chunks;
 649		if (i == 42)
 650			total_chunks_lte_42 = total_chunks;
 651	}
 652	p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
 653		total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
 654		chunks == 0 ? 0 : sum_total_chunks / chunks);
 655	return p - buf;
 656}
 657#endif
 658
 659/**********
 660 * This "zv" PAM implementation combines the slab-based zsmalloc
 661 * with the crypto compression API to maximize the amount of data that can
 662 * be packed into a physical page.
 663 *
 664 * Zv represents a PAM page with the index and object (plus a "size" value
 665 * necessary for decompression) immediately preceding the compressed data.
 666 */
 667
 668#define ZVH_SENTINEL  0x43214321
 669
 670struct zv_hdr {
 671	uint32_t pool_id;
 672	struct tmem_oid oid;
 673	uint32_t index;
 674	size_t size;
 675	DECL_SENTINEL
 676};
 677
 678/* rudimentary policy limits */
 679/* total number of persistent pages may not exceed this percentage */
 680static unsigned int zv_page_count_policy_percent = 75;
 681/*
 682 * byte count defining poor compression; pages with greater zsize will be
 683 * rejected
 684 */
 685static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
 686/*
 687 * byte count defining poor *mean* compression; pages with greater zsize
 688 * will be rejected until sufficient better-compressed pages are accepted
 689 * driving the mean below this threshold
 690 */
 691static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
 692
 693static atomic_t zv_curr_dist_counts[NCHUNKS];
 694static atomic_t zv_cumul_dist_counts[NCHUNKS];
 695
 696static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id,
 697				struct tmem_oid *oid, uint32_t index,
 698				void *cdata, unsigned clen)
 699{
 700	struct zv_hdr *zv;
 701	u32 size = clen + sizeof(struct zv_hdr);
 702	int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
 703	void *handle = NULL;
 704
 705	BUG_ON(!irqs_disabled());
 706	BUG_ON(chunks >= NCHUNKS);
 707	handle = zs_malloc(pool, size);
 708	if (!handle)
 709		goto out;
 710	atomic_inc(&zv_curr_dist_counts[chunks]);
 711	atomic_inc(&zv_cumul_dist_counts[chunks]);
 712	zv = zs_map_object(pool, handle);
 713	zv->index = index;
 714	zv->oid = *oid;
 715	zv->pool_id = pool_id;
 716	zv->size = clen;
 717	SET_SENTINEL(zv, ZVH);
 718	memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
 719	zs_unmap_object(pool, handle);
 720out:
 721	return handle;
 722}
 723
 724static void zv_free(struct zs_pool *pool, void *handle)
 725{
 726	unsigned long flags;
 727	struct zv_hdr *zv;
 728	uint16_t size;
 729	int chunks;
 730
 731	zv = zs_map_object(pool, handle);
 732	ASSERT_SENTINEL(zv, ZVH);
 733	size = zv->size + sizeof(struct zv_hdr);
 734	INVERT_SENTINEL(zv, ZVH);
 735	zs_unmap_object(pool, handle);
 736
 737	chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
 738	BUG_ON(chunks >= NCHUNKS);
 739	atomic_dec(&zv_curr_dist_counts[chunks]);
 740
 741	local_irq_save(flags);
 742	zs_free(pool, handle);
 743	local_irq_restore(flags);
 744}
 745
 746static void zv_decompress(struct page *page, void *handle)
 747{
 748	unsigned int clen = PAGE_SIZE;
 749	char *to_va;
 750	int ret;
 751	struct zv_hdr *zv;
 752
 753	zv = zs_map_object(zcache_host.zspool, handle);
 754	BUG_ON(zv->size == 0);
 755	ASSERT_SENTINEL(zv, ZVH);
 756	to_va = kmap_atomic(page);
 757	ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
 758				zv->size, to_va, &clen);
 759	kunmap_atomic(to_va);
 760	zs_unmap_object(zcache_host.zspool, handle);
 761	BUG_ON(ret);
 762	BUG_ON(clen != PAGE_SIZE);
 763}
 764
 765#ifdef CONFIG_SYSFS
 766/*
 767 * show a distribution of compression stats for zv pages.
 768 */
 769
 770static int zv_curr_dist_counts_show(char *buf)
 771{
 772	unsigned long i, n, chunks = 0, sum_total_chunks = 0;
 773	char *p = buf;
 774
 775	for (i = 0; i < NCHUNKS; i++) {
 776		n = atomic_read(&zv_curr_dist_counts[i]);
 777		p += sprintf(p, "%lu ", n);
 778		chunks += n;
 779		sum_total_chunks += i * n;
 780	}
 781	p += sprintf(p, "mean:%lu\n",
 782		chunks == 0 ? 0 : sum_total_chunks / chunks);
 783	return p - buf;
 784}
 785
 786static int zv_cumul_dist_counts_show(char *buf)
 787{
 788	unsigned long i, n, chunks = 0, sum_total_chunks = 0;
 789	char *p = buf;
 790
 791	for (i = 0; i < NCHUNKS; i++) {
 792		n = atomic_read(&zv_cumul_dist_counts[i]);
 793		p += sprintf(p, "%lu ", n);
 794		chunks += n;
 795		sum_total_chunks += i * n;
 796	}
 797	p += sprintf(p, "mean:%lu\n",
 798		chunks == 0 ? 0 : sum_total_chunks / chunks);
 799	return p - buf;
 800}
 801
 802/*
 803 * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
 804 * pages that don't compress to less than this value (including metadata
 805 * overhead) to be rejected.  We don't allow the value to get too close
 806 * to PAGE_SIZE.
 807 */
 808static ssize_t zv_max_zsize_show(struct kobject *kobj,
 809				    struct kobj_attribute *attr,
 810				    char *buf)
 811{
 812	return sprintf(buf, "%u\n", zv_max_zsize);
 813}
 814
 815static ssize_t zv_max_zsize_store(struct kobject *kobj,
 816				    struct kobj_attribute *attr,
 817				    const char *buf, size_t count)
 818{
 819	unsigned long val;
 820	int err;
 821
 822	if (!capable(CAP_SYS_ADMIN))
 823		return -EPERM;
 824
 825	err = kstrtoul(buf, 10, &val);
 826	if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
 827		return -EINVAL;
 828	zv_max_zsize = val;
 829	return count;
 830}
 831
 832/*
 833 * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
 834 * pages that don't compress to less than this value (including metadata
 835 * overhead) to be rejected UNLESS the mean compression is also smaller
 836 * than this value.  In other words, we are load-balancing-by-zsize the
 837 * accepted pages.  Again, we don't allow the value to get too close
 838 * to PAGE_SIZE.
 839 */
 840static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
 841				    struct kobj_attribute *attr,
 842				    char *buf)
 843{
 844	return sprintf(buf, "%u\n", zv_max_mean_zsize);
 845}
 846
 847static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
 848				    struct kobj_attribute *attr,
 849				    const char *buf, size_t count)
 850{
 851	unsigned long val;
 852	int err;
 853
 854	if (!capable(CAP_SYS_ADMIN))
 855		return -EPERM;
 856
 857	err = kstrtoul(buf, 10, &val);
 858	if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
 859		return -EINVAL;
 860	zv_max_mean_zsize = val;
 861	return count;
 862}
 863
 864/*
 865 * setting zv_page_count_policy_percent via sysfs sets an upper bound of
 866 * persistent (e.g. swap) pages that will be retained according to:
 867 *     (zv_page_count_policy_percent * totalram_pages) / 100)
 868 * when that limit is reached, further puts will be rejected (until
 869 * some pages have been flushed).  Note that, due to compression,
 870 * this number may exceed 100; it defaults to 75 and we set an
 871 * arbitary limit of 150.  A poor choice will almost certainly result
 872 * in OOM's, so this value should only be changed prudently.
 873 */
 874static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
 875						 struct kobj_attribute *attr,
 876						 char *buf)
 877{
 878	return sprintf(buf, "%u\n", zv_page_count_policy_percent);
 879}
 880
 881static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
 882						  struct kobj_attribute *attr,
 883						  const char *buf, size_t count)
 884{
 885	unsigned long val;
 886	int err;
 887
 888	if (!capable(CAP_SYS_ADMIN))
 889		return -EPERM;
 890
 891	err = kstrtoul(buf, 10, &val);
 892	if (err || (val == 0) || (val > 150))
 893		return -EINVAL;
 894	zv_page_count_policy_percent = val;
 895	return count;
 896}
 897
 898static struct kobj_attribute zcache_zv_max_zsize_attr = {
 899		.attr = { .name = "zv_max_zsize", .mode = 0644 },
 900		.show = zv_max_zsize_show,
 901		.store = zv_max_zsize_store,
 902};
 903
 904static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
 905		.attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
 906		.show = zv_max_mean_zsize_show,
 907		.store = zv_max_mean_zsize_store,
 908};
 909
 910static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
 911		.attr = { .name = "zv_page_count_policy_percent",
 912			  .mode = 0644 },
 913		.show = zv_page_count_policy_percent_show,
 914		.store = zv_page_count_policy_percent_store,
 915};
 916#endif
 917
 918/*
 919 * zcache core code starts here
 920 */
 921
 922/* useful stats not collected by cleancache or frontswap */
 923static unsigned long zcache_flush_total;
 924static unsigned long zcache_flush_found;
 925static unsigned long zcache_flobj_total;
 926static unsigned long zcache_flobj_found;
 927static unsigned long zcache_failed_eph_puts;
 928static unsigned long zcache_failed_pers_puts;
 929
 930/*
 931 * Tmem operations assume the poolid implies the invoking client.
 932 * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
 933 * RAMster has each client numbered by cluster node, and a KVM version
 934 * of zcache would have one client per guest and each client might
 935 * have a poolid==N.
 936 */
 937static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
 938{
 939	struct tmem_pool *pool = NULL;
 940	struct zcache_client *cli = NULL;
 941
 942	if (cli_id == LOCAL_CLIENT)
 943		cli = &zcache_host;
 944	else {
 945		if (cli_id >= MAX_CLIENTS)
 946			goto out;
 947		cli = &zcache_clients[cli_id];
 948		if (cli == NULL)
 949			goto out;
 950		atomic_inc(&cli->refcount);
 951	}
 952	if (poolid < MAX_POOLS_PER_CLIENT) {
 953		pool = cli->tmem_pools[poolid];
 954		if (pool != NULL)
 955			atomic_inc(&pool->refcount);
 956	}
 957out:
 958	return pool;
 959}
 960
 961static void zcache_put_pool(struct tmem_pool *pool)
 962{
 963	struct zcache_client *cli = NULL;
 964
 965	if (pool == NULL)
 966		BUG();
 967	cli = pool->client;
 968	atomic_dec(&pool->refcount);
 969	atomic_dec(&cli->refcount);
 970}
 971
 972int zcache_new_client(uint16_t cli_id)
 973{
 974	struct zcache_client *cli = NULL;
 975	int ret = -1;
 976
 977	if (cli_id == LOCAL_CLIENT)
 978		cli = &zcache_host;
 979	else if ((unsigned int)cli_id < MAX_CLIENTS)
 980		cli = &zcache_clients[cli_id];
 981	if (cli == NULL)
 982		goto out;
 983	if (cli->allocated)
 984		goto out;
 985	cli->allocated = 1;
 986#ifdef CONFIG_FRONTSWAP
 987	cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
 988	if (cli->zspool == NULL)
 989		goto out;
 990#endif
 991	ret = 0;
 992out:
 993	return ret;
 994}
 995
 996/* counters for debugging */
 997static unsigned long zcache_failed_get_free_pages;
 998static unsigned long zcache_failed_alloc;
 999static unsigned long zcache_put_to_flush;
1000
1001/*
1002 * for now, used named slabs so can easily track usage; later can
1003 * either just use kmalloc, or perhaps add a slab-like allocator
1004 * to more carefully manage total memory utilization
1005 */
1006static struct kmem_cache *zcache_objnode_cache;
1007static struct kmem_cache *zcache_obj_cache;
1008static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
1009static unsigned long zcache_curr_obj_count_max;
1010static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
1011static unsigned long zcache_curr_objnode_count_max;
1012
1013/*
1014 * to avoid memory allocation recursion (e.g. due to direct reclaim), we
1015 * preload all necessary data structures so the hostops callbacks never
1016 * actually do a malloc
1017 */
1018struct zcache_preload {
1019	void *page;
1020	struct tmem_obj *obj;
1021	int nr;
1022	struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
1023};
1024static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
1025
1026static int zcache_do_preload(struct tmem_pool *pool)
1027{
1028	struct zcache_preload *kp;
1029	struct tmem_objnode *objnode;
1030	struct tmem_obj *obj;
1031	void *page;
1032	int ret = -ENOMEM;
1033
1034	if (unlikely(zcache_objnode_cache == NULL))
1035		goto out;
1036	if (unlikely(zcache_obj_cache == NULL))
1037		goto out;
1038	preempt_disable();
1039	kp = &__get_cpu_var(zcache_preloads);
1040	while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
1041		preempt_enable_no_resched();
1042		objnode = kmem_cache_alloc(zcache_objnode_cache,
1043				ZCACHE_GFP_MASK);
1044		if (unlikely(objnode == NULL)) {
1045			zcache_failed_alloc++;
1046			goto out;
1047		}
1048		preempt_disable();
1049		kp = &__get_cpu_var(zcache_preloads);
1050		if (kp->nr < ARRAY_SIZE(kp->objnodes))
1051			kp->objnodes[kp->nr++] = objnode;
1052		else
1053			kmem_cache_free(zcache_objnode_cache, objnode);
1054	}
1055	preempt_enable_no_resched();
1056	obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
1057	if (unlikely(obj == NULL)) {
1058		zcache_failed_alloc++;
1059		goto out;
1060	}
1061	page = (void *)__get_free_page(ZCACHE_GFP_MASK);
1062	if (unlikely(page == NULL)) {
1063		zcache_failed_get_free_pages++;
1064		kmem_cache_free(zcache_obj_cache, obj);
1065		goto out;
1066	}
1067	preempt_disable();
1068	kp = &__get_cpu_var(zcache_preloads);
1069	if (kp->obj == NULL)
1070		kp->obj = obj;
1071	else
1072		kmem_cache_free(zcache_obj_cache, obj);
1073	if (kp->page == NULL)
1074		kp->page = page;
1075	else
1076		free_page((unsigned long)page);
1077	ret = 0;
1078out:
1079	return ret;
1080}
1081
1082static void *zcache_get_free_page(void)
1083{
1084	struct zcache_preload *kp;
1085	void *page;
1086
1087	kp = &__get_cpu_var(zcache_preloads);
1088	page = kp->page;
1089	BUG_ON(page == NULL);
1090	kp->page = NULL;
1091	return page;
1092}
1093
1094static void zcache_free_page(void *p)
1095{
1096	free_page((unsigned long)p);
1097}
1098
1099/*
1100 * zcache implementation for tmem host ops
1101 */
1102
1103static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
1104{
1105	struct tmem_objnode *objnode = NULL;
1106	unsigned long count;
1107	struct zcache_preload *kp;
1108
1109	kp = &__get_cpu_var(zcache_preloads);
1110	if (kp->nr <= 0)
1111		goto out;
1112	objnode = kp->objnodes[kp->nr - 1];
1113	BUG_ON(objnode == NULL);
1114	kp->objnodes[kp->nr - 1] = NULL;
1115	kp->nr--;
1116	count = atomic_inc_return(&zcache_curr_objnode_count);
1117	if (count > zcache_curr_objnode_count_max)
1118		zcache_curr_objnode_count_max = count;
1119out:
1120	return objnode;
1121}
1122
1123static void zcache_objnode_free(struct tmem_objnode *objnode,
1124					struct tmem_pool *pool)
1125{
1126	atomic_dec(&zcache_curr_objnode_count);
1127	BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
1128	kmem_cache_free(zcache_objnode_cache, objnode);
1129}
1130
1131static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
1132{
1133	struct tmem_obj *obj = NULL;
1134	unsigned long count;
1135	struct zcache_preload *kp;
1136
1137	kp = &__get_cpu_var(zcache_preloads);
1138	obj = kp->obj;
1139	BUG_ON(obj == NULL);
1140	kp->obj = NULL;
1141	count = atomic_inc_return(&zcache_curr_obj_count);
1142	if (count > zcache_curr_obj_count_max)
1143		zcache_curr_obj_count_max = count;
1144	return obj;
1145}
1146
1147static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
1148{
1149	atomic_dec(&zcache_curr_obj_count);
1150	BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
1151	kmem_cache_free(zcache_obj_cache, obj);
1152}
1153
1154static struct tmem_hostops zcache_hostops = {
1155	.obj_alloc = zcache_obj_alloc,
1156	.obj_free = zcache_obj_free,
1157	.objnode_alloc = zcache_objnode_alloc,
1158	.objnode_free = zcache_objnode_free,
1159};
1160
1161/*
1162 * zcache implementations for PAM page descriptor ops
1163 */
1164
1165static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
1166static unsigned long zcache_curr_eph_pampd_count_max;
1167static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
1168static unsigned long zcache_curr_pers_pampd_count_max;
1169
1170/* forward reference */
1171static int zcache_compress(struct page *from, void **out_va, unsigned *out_len);
1172
1173static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1174				struct tmem_pool *pool, struct tmem_oid *oid,
1175				 uint32_t index)
1176{
1177	void *pampd = NULL, *cdata;
1178	unsigned clen;
1179	int ret;
1180	unsigned long count;
1181	struct page *page = (struct page *)(data);
1182	struct zcache_client *cli = pool->client;
1183	uint16_t client_id = get_client_id_from_client(cli);
1184	unsigned long zv_mean_zsize;
1185	unsigned long curr_pers_pampd_count;
1186	u64 total_zsize;
1187
1188	if (eph) {
1189		ret = zcache_compress(page, &cdata, &clen);
1190		if (ret == 0)
1191			goto out;
1192		if (clen == 0 || clen > zbud_max_buddy_size()) {
1193			zcache_compress_poor++;
1194			goto out;
1195		}
1196		pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
1197						index, page, cdata, clen);
1198		if (pampd != NULL) {
1199			count = atomic_inc_return(&zcache_curr_eph_pampd_count);
1200			if (count > zcache_curr_eph_pampd_count_max)
1201				zcache_curr_eph_pampd_count_max = count;
1202		}
1203	} else {
1204		curr_pers_pampd_count =
1205			atomic_read(&zcache_curr_pers_pampd_count);
1206		if (curr_pers_pampd_count >
1207		    (zv_page_count_policy_percent * totalram_pages) / 100)
1208			goto out;
1209		ret = zcache_compress(page, &cdata, &clen);
1210		if (ret == 0)
1211			goto out;
1212		/* reject if compression is too poor */
1213		if (clen > zv_max_zsize) {
1214			zcache_compress_poor++;
1215			goto out;
1216		}
1217		/* reject if mean compression is too poor */
1218		if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
1219			total_zsize = zs_get_total_size_bytes(cli->zspool);
1220			zv_mean_zsize = div_u64(total_zsize,
1221						curr_pers_pampd_count);
1222			if (zv_mean_zsize > zv_max_mean_zsize) {
1223				zcache_mean_compress_poor++;
1224				goto out;
1225			}
1226		}
1227		pampd = (void *)zv_create(cli->zspool, pool->pool_id,
1228						oid, index, cdata, clen);
1229		if (pampd == NULL)
1230			goto out;
1231		count = atomic_inc_return(&zcache_curr_pers_pampd_count);
1232		if (count > zcache_curr_pers_pampd_count_max)
1233			zcache_curr_pers_pampd_count_max = count;
1234	}
1235out:
1236	return pampd;
1237}
1238
1239/*
1240 * fill the pageframe corresponding to the struct page with the data
1241 * from the passed pampd
1242 */
1243static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
1244					void *pampd, struct tmem_pool *pool,
1245					struct tmem_oid *oid, uint32_t index)
1246{
1247	int ret = 0;
1248
1249	BUG_ON(is_ephemeral(pool));
1250	zv_decompress((struct page *)(data), pampd);
1251	return ret;
1252}
1253
1254/*
1255 * fill the pageframe corresponding to the struct page with the data
1256 * from the passed pampd
1257 */
1258static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
1259					void *pampd, struct tmem_pool *pool,
1260					struct tmem_oid *oid, uint32_t index)
1261{
1262	BUG_ON(!is_ephemeral(pool));
1263	if (zbud_decompress((struct page *)(data), pampd) < 0)
1264		return -EINVAL;
1265	zbud_free_and_delist((struct zbud_hdr *)pampd);
1266	atomic_dec(&zcache_curr_eph_pampd_count);
1267	return 0;
1268}
1269
1270/*
1271 * free the pampd and remove it from any zcache lists
1272 * pampd must no longer be pointed to from any tmem data structures!
1273 */
1274static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
1275				struct tmem_oid *oid, uint32_t index)
1276{
1277	struct zcache_client *cli = pool->client;
1278
1279	if (is_ephemeral(pool)) {
1280		zbud_free_and_delist((struct zbud_hdr *)pampd);
1281		atomic_dec(&zcache_curr_eph_pampd_count);
1282		BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
1283	} else {
1284		zv_free(cli->zspool, pampd);
1285		atomic_dec(&zcache_curr_pers_pampd_count);
1286		BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
1287	}
1288}
1289
1290static void zcache_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj)
1291{
1292}
1293
1294static void zcache_pampd_new_obj(struct tmem_obj *obj)
1295{
1296}
1297
1298static int zcache_pampd_replace_in_obj(void *pampd, struct tmem_obj *obj)
1299{
1300	return -1;
1301}
1302
1303static bool zcache_pampd_is_remote(void *pampd)
1304{
1305	return 0;
1306}
1307
1308static struct tmem_pamops zcache_pamops = {
1309	.create = zcache_pampd_create,
1310	.get_data = zcache_pampd_get_data,
1311	.get_data_and_free = zcache_pampd_get_data_and_free,
1312	.free = zcache_pampd_free,
1313	.free_obj = zcache_pampd_free_obj,
1314	.new_obj = zcache_pampd_new_obj,
1315	.replace_in_obj = zcache_pampd_replace_in_obj,
1316	.is_remote = zcache_pampd_is_remote,
1317};
1318
1319/*
1320 * zcache compression/decompression and related per-cpu stuff
1321 */
1322
1323static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
1324#define ZCACHE_DSTMEM_ORDER 1
1325
1326static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
1327{
1328	int ret = 0;
1329	unsigned char *dmem = __get_cpu_var(zcache_dstmem);
1330	char *from_va;
1331
1332	BUG_ON(!irqs_disabled());
1333	if (unlikely(dmem == NULL))
1334		goto out;  /* no buffer or no compressor so can't compress */
1335	*out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
1336	from_va = kmap_atomic(from);
1337	mb();
1338	ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
1339				out_len);
1340	BUG_ON(ret);
1341	*out_va = dmem;
1342	kunmap_atomic(from_va);
1343	ret = 1;
1344out:
1345	return ret;
1346}
1347
1348static int zcache_comp_cpu_up(int cpu)
1349{
1350	struct crypto_comp *tfm;
1351
1352	tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
1353	if (IS_ERR(tfm))
1354		return NOTIFY_BAD;
1355	*per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
1356	return NOTIFY_OK;
1357}
1358
1359static void zcache_comp_cpu_down(int cpu)
1360{
1361	struct crypto_comp *tfm;
1362
1363	tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
1364	crypto_free_comp(tfm);
1365	*per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
1366}
1367
1368static int zcache_cpu_notifier(struct notifier_block *nb,
1369				unsigned long action, void *pcpu)
1370{
1371	int ret, cpu = (long)pcpu;
1372	struct zcache_preload *kp;
1373
1374	switch (action) {
1375	case CPU_UP_PREPARE:
1376		ret = zcache_comp_cpu_up(cpu);
1377		if (ret != NOTIFY_OK) {
1378			pr_err("zcache: can't allocate compressor transform\n");
1379			return ret;
1380		}
1381		per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
1382			GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
1383		break;
1384	case CPU_DEAD:
1385	case CPU_UP_CANCELED:
1386		zcache_comp_cpu_down(cpu);
1387		free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
1388			ZCACHE_DSTMEM_ORDER);
1389		per_cpu(zcache_dstmem, cpu) = NULL;
1390		kp = &per_cpu(zcache_preloads, cpu);
1391		while (kp->nr) {
1392			kmem_cache_free(zcache_objnode_cache,
1393					kp->objnodes[kp->nr - 1]);
1394			kp->objnodes[kp->nr - 1] = NULL;
1395			kp->nr--;
1396		}
1397		if (kp->obj) {
1398			kmem_cache_free(zcache_obj_cache, kp->obj);
1399			kp->obj = NULL;
1400		}
1401		if (kp->page) {
1402			free_page((unsigned long)kp->page);
1403			kp->page = NULL;
1404		}
1405		break;
1406	default:
1407		break;
1408	}
1409	return NOTIFY_OK;
1410}
1411
1412static struct notifier_block zcache_cpu_notifier_block = {
1413	.notifier_call = zcache_cpu_notifier
1414};
1415
1416#ifdef CONFIG_SYSFS
1417#define ZCACHE_SYSFS_RO(_name) \
1418	static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1419				struct kobj_attribute *attr, char *buf) \
1420	{ \
1421		return sprintf(buf, "%lu\n", zcache_##_name); \
1422	} \
1423	static struct kobj_attribute zcache_##_name##_attr = { \
1424		.attr = { .name = __stringify(_name), .mode = 0444 }, \
1425		.show = zcache_##_name##_show, \
1426	}
1427
1428#define ZCACHE_SYSFS_RO_ATOMIC(_name) \
1429	static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1430				struct kobj_attribute *attr, char *buf) \
1431	{ \
1432	    return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
1433	} \
1434	static struct kobj_attribute zcache_##_name##_attr = { \
1435		.attr = { .name = __stringify(_name), .mode = 0444 }, \
1436		.show = zcache_##_name##_show, \
1437	}
1438
1439#define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
1440	static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1441				struct kobj_attribute *attr, char *buf) \
1442	{ \
1443	    return _func(buf); \
1444	} \
1445	static struct kobj_attribute zcache_##_name##_attr = { \
1446		.attr = { .name = __stringify(_name), .mode = 0444 }, \
1447		.show = zcache_##_name##_show, \
1448	}
1449
1450ZCACHE_SYSFS_RO(curr_obj_count_max);
1451ZCACHE_SYSFS_RO(curr_objnode_count_max);
1452ZCACHE_SYSFS_RO(flush_total);
1453ZCACHE_SYSFS_RO(flush_found);
1454ZCACHE_SYSFS_RO(flobj_total);
1455ZCACHE_SYSFS_RO(flobj_found);
1456ZCACHE_SYSFS_RO(failed_eph_puts);
1457ZCACHE_SYSFS_RO(failed_pers_puts);
1458ZCACHE_SYSFS_RO(zbud_curr_zbytes);
1459ZCACHE_SYSFS_RO(zbud_cumul_zpages);
1460ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
1461ZCACHE_SYSFS_RO(zbud_buddied_count);
1462ZCACHE_SYSFS_RO(zbpg_unused_list_count);
1463ZCACHE_SYSFS_RO(evicted_raw_pages);
1464ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
1465ZCACHE_SYSFS_RO(evicted_buddied_pages);
1466ZCACHE_SYSFS_RO(failed_get_free_pages);
1467ZCACHE_SYSFS_RO(failed_alloc);
1468ZCACHE_SYSFS_RO(put_to_flush);
1469ZCACHE_SYSFS_RO(compress_poor);
1470ZCACHE_SYSFS_RO(mean_compress_poor);
1471ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
1472ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
1473ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
1474ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
1475ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
1476			zbud_show_unbuddied_list_counts);
1477ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
1478			zbud_show_cumul_chunk_counts);
1479ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
1480			zv_curr_dist_counts_show);
1481ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
1482			zv_cumul_dist_counts_show);
1483
1484static struct attribute *zcache_attrs[] = {
1485	&zcache_curr_obj_count_attr.attr,
1486	&zcache_curr_obj_count_max_attr.attr,
1487	&zcache_curr_objnode_count_attr.attr,
1488	&zcache_curr_objnode_count_max_attr.attr,
1489	&zcache_flush_total_attr.attr,
1490	&zcache_flobj_total_attr.attr,
1491	&zcache_flush_found_attr.attr,
1492	&zcache_flobj_found_attr.attr,
1493	&zcache_failed_eph_puts_attr.attr,
1494	&zcache_failed_pers_puts_attr.attr,
1495	&zcache_compress_poor_attr.attr,
1496	&zcache_mean_compress_poor_attr.attr,
1497	&zcache_zbud_curr_raw_pages_attr.attr,
1498	&zcache_zbud_curr_zpages_attr.attr,
1499	&zcache_zbud_curr_zbytes_attr.attr,
1500	&zcache_zbud_cumul_zpages_attr.attr,
1501	&zcache_zbud_cumul_zbytes_attr.attr,
1502	&zcache_zbud_buddied_count_attr.attr,
1503	&zcache_zbpg_unused_list_count_attr.attr,
1504	&zcache_evicted_raw_pages_attr.attr,
1505	&zcache_evicted_unbuddied_pages_attr.attr,
1506	&zcache_evicted_buddied_pages_attr.attr,
1507	&zcache_failed_get_free_pages_attr.attr,
1508	&zcache_failed_alloc_attr.attr,
1509	&zcache_put_to_flush_attr.attr,
1510	&zcache_zbud_unbuddied_list_counts_attr.attr,
1511	&zcache_zbud_cumul_chunk_counts_attr.attr,
1512	&zcache_zv_curr_dist_counts_attr.attr,
1513	&zcache_zv_cumul_dist_counts_attr.attr,
1514	&zcache_zv_max_zsize_attr.attr,
1515	&zcache_zv_max_mean_zsize_attr.attr,
1516	&zcache_zv_page_count_policy_percent_attr.attr,
1517	NULL,
1518};
1519
1520static struct attribute_group zcache_attr_group = {
1521	.attrs = zcache_attrs,
1522	.name = "zcache",
1523};
1524
1525#endif /* CONFIG_SYSFS */
1526/*
1527 * When zcache is disabled ("frozen"), pools can be created and destroyed,
1528 * but all puts (and thus all other operations that require memory allocation)
1529 * must fail.  If zcache is unfrozen, accepts puts, then frozen again,
1530 * data consistency requires all puts while frozen to be converted into
1531 * flushes.
1532 */
1533static bool zcache_freeze;
1534
1535/*
1536 * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
1537 */
1538static int shrink_zcache_memory(struct shrinker *shrink,
1539				struct shrink_control *sc)
1540{
1541	int ret = -1;
1542	int nr = sc->nr_to_scan;
1543	gfp_t gfp_mask = sc->gfp_mask;
1544
1545	if (nr >= 0) {
1546		if (!(gfp_mask & __GFP_FS))
1547			/* does this case really need to be skipped? */
1548			goto out;
1549		zbud_evict_pages(nr);
1550	}
1551	ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
1552out:
1553	return ret;
1554}
1555
1556static struct shrinker zcache_shrinker = {
1557	.shrink = shrink_zcache_memory,
1558	.seeks = DEFAULT_SEEKS,
1559};
1560
1561/*
1562 * zcache shims between cleancache/frontswap ops and tmem
1563 */
1564
1565static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1566				uint32_t index, struct page *page)
1567{
1568	struct tmem_pool *pool;
1569	int ret = -1;
1570
1571	BUG_ON(!irqs_disabled());
1572	pool = zcache_get_pool_by_id(cli_id, pool_id);
1573	if (unlikely(pool == NULL))
1574		goto out;
1575	if (!zcache_freeze && zcache_do_preload(pool) == 0) {
1576		/* preload does preempt_disable on success */
1577		ret = tmem_put(pool, oidp, index, (char *)(page),
1578				PAGE_SIZE, 0, is_ephemeral(pool));
1579		if (ret < 0) {
1580			if (is_ephemeral(pool))
1581				zcache_failed_eph_puts++;
1582			else
1583				zcache_failed_pers_puts++;
1584		}
1585		zcache_put_pool(pool);
1586		preempt_enable_no_resched();
1587	} else {
1588		zcache_put_to_flush++;
1589		if (atomic_read(&pool->obj_count) > 0)
1590			/* the put fails whether the flush succeeds or not */
1591			(void)tmem_flush_page(pool, oidp, index);
1592		zcache_put_pool(pool);
1593	}
1594out:
1595	return ret;
1596}
1597
1598static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1599				uint32_t index, struct page *page)
1600{
1601	struct tmem_pool *pool;
1602	int ret = -1;
1603	unsigned long flags;
1604	size_t size = PAGE_SIZE;
1605
1606	local_irq_save(flags);
1607	pool = zcache_get_pool_by_id(cli_id, pool_id);
1608	if (likely(pool != NULL)) {
1609		if (atomic_read(&pool->obj_count) > 0)
1610			ret = tmem_get(pool, oidp, index, (char *)(page),
1611					&size, 0, is_ephemeral(pool));
1612		zcache_put_pool(pool);
1613	}
1614	local_irq_restore(flags);
1615	return ret;
1616}
1617
1618static int zcache_flush_page(int cli_id, int pool_id,
1619				struct tmem_oid *oidp, uint32_t index)
1620{
1621	struct tmem_pool *pool;
1622	int ret = -1;
1623	unsigned long flags;
1624
1625	local_irq_save(flags);
1626	zcache_flush_total++;
1627	pool = zcache_get_pool_by_id(cli_id, pool_id);
1628	if (likely(pool != NULL)) {
1629		if (atomic_read(&pool->obj_count) > 0)
1630			ret = tmem_flush_page(pool, oidp, index);
1631		zcache_put_pool(pool);
1632	}
1633	if (ret >= 0)
1634		zcache_flush_found++;
1635	local_irq_restore(flags);
1636	return ret;
1637}
1638
1639static int zcache_flush_object(int cli_id, int pool_id,
1640				struct tmem_oid *oidp)
1641{
1642	struct tmem_pool *pool;
1643	int ret = -1;
1644	unsigned long flags;
1645
1646	local_irq_save(flags);
1647	zcache_flobj_total++;
1648	pool = zcache_get_pool_by_id(cli_id, pool_id);
1649	if (likely(pool != NULL)) {
1650		if (atomic_read(&pool->obj_count) > 0)
1651			ret = tmem_flush_object(pool, oidp);
1652		zcache_put_pool(pool);
1653	}
1654	if (ret >= 0)
1655		zcache_flobj_found++;
1656	local_irq_restore(flags);
1657	return ret;
1658}
1659
1660static int zcache_destroy_pool(int cli_id, int pool_id)
1661{
1662	struct tmem_pool *pool = NULL;
1663	struct zcache_client *cli = NULL;
1664	int ret = -1;
1665
1666	if (pool_id < 0)
1667		goto out;
1668	if (cli_id == LOCAL_CLIENT)
1669		cli = &zcache_host;
1670	else if ((unsigned int)cli_id < MAX_CLIENTS)
1671		cli = &zcache_clients[cli_id];
1672	if (cli == NULL)
1673		goto out;
1674	atomic_inc(&cli->refcount);
1675	pool = cli->tmem_pools[pool_id];
1676	if (pool == NULL)
1677		goto out;
1678	cli->tmem_pools[pool_id] = NULL;
1679	/* wait for pool activity on other cpus to quiesce */
1680	while (atomic_read(&pool->refcount) != 0)
1681		;
1682	atomic_dec(&cli->refcount);
1683	local_bh_disable();
1684	ret = tmem_destroy_pool(pool);
1685	local_bh_enable();
1686	kfree(pool);
1687	pr_info("zcache: destroyed pool id=%d, cli_id=%d\n",
1688			pool_id, cli_id);
1689out:
1690	return ret;
1691}
1692
1693static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
1694{
1695	int poolid = -1;
1696	struct tmem_pool *pool;
1697	struct zcache_client *cli = NULL;
1698
1699	if (cli_id == LOCAL_CLIENT)
1700		cli = &zcache_host;
1701	else if ((unsigned int)cli_id < MAX_CLIENTS)
1702		cli = &zcache_clients[cli_id];
1703	if (cli == NULL)
1704		goto out;
1705	atomic_inc(&cli->refcount);
1706	pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
1707	if (pool == NULL) {
1708		pr_info("zcache: pool creation failed: out of memory\n");
1709		goto out;
1710	}
1711
1712	for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
1713		if (cli->tmem_pools[poolid] == NULL)
1714			break;
1715	if (poolid >= MAX_POOLS_PER_CLIENT) {
1716		pr_info("zcache: pool creation failed: max exceeded\n");
1717		kfree(pool);
1718		poolid = -1;
1719		goto out;
1720	}
1721	atomic_set(&pool->refcount, 0);
1722	pool->client = cli;
1723	pool->pool_id = poolid;
1724	tmem_new_pool(pool, flags);
1725	cli->tmem_pools[poolid] = pool;
1726	pr_info("zcache: created %s tmem pool, id=%d, client=%d\n",
1727		flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1728		poolid, cli_id);
1729out:
1730	if (cli != NULL)
1731		atomic_dec(&cli->refcount);
1732	return poolid;
1733}
1734
1735/**********
1736 * Two kernel functionalities currently can be layered on top of tmem.
1737 * These are "cleancache" which is used as a second-chance cache for clean
1738 * page cache pages; and "frontswap" which is used for swap pages
1739 * to avoid writes to disk.  A generic "shim" is provided here for each
1740 * to translate in-kernel semantics to zcache semantics.
1741 */
1742
1743#ifdef CONFIG_CLEANCACHE
1744static void zcache_cleancache_put_page(int pool_id,
1745					struct cleancache_filekey key,
1746					pgoff_t index, struct page *page)
1747{
1748	u32 ind = (u32) index;
1749	struct tmem_oid oid = *(struct tmem_oid *)&key;
1750
1751	if (likely(ind == index))
1752		(void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index, page);
1753}
1754
1755static int zcache_cleancache_get_page(int pool_id,
1756					struct cleancache_filekey key,
1757					pgoff_t index, struct page *page)
1758{
1759	u32 ind = (u32) index;
1760	struct tmem_oid oid = *(struct tmem_oid *)&key;
1761	int ret = -1;
1762
1763	if (likely(ind == index))
1764		ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index, page);
1765	return ret;
1766}
1767
1768static void zcache_cleancache_flush_page(int pool_id,
1769					struct cleancache_filekey key,
1770					pgoff_t index)
1771{
1772	u32 ind = (u32) index;
1773	struct tmem_oid oid = *(struct tmem_oid *)&key;
1774
1775	if (likely(ind == index))
1776		(void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
1777}
1778
1779static void zcache_cleancache_flush_inode(int pool_id,
1780					struct cleancache_filekey key)
1781{
1782	struct tmem_oid oid = *(struct tmem_oid *)&key;
1783
1784	(void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
1785}
1786
1787static void zcache_cleancache_flush_fs(int pool_id)
1788{
1789	if (pool_id >= 0)
1790		(void)zcache_destroy_pool(LOCAL_CLIENT, pool_id);
1791}
1792
1793static int zcache_cleancache_init_fs(size_t pagesize)
1794{
1795	BUG_ON(sizeof(struct cleancache_filekey) !=
1796				sizeof(struct tmem_oid));
1797	BUG_ON(pagesize != PAGE_SIZE);
1798	return zcache_new_pool(LOCAL_CLIENT, 0);
1799}
1800
1801static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
1802{
1803	/* shared pools are unsupported and map to private */
1804	BUG_ON(sizeof(struct cleancache_filekey) !=
1805				sizeof(struct tmem_oid));
1806	BUG_ON(pagesize != PAGE_SIZE);
1807	return zcache_new_pool(LOCAL_CLIENT, 0);
1808}
1809
1810static struct cleancache_ops zcache_cleancache_ops = {
1811	.put_page = zcache_cleancache_put_page,
1812	.get_page = zcache_cleancache_get_page,
1813	.invalidate_page = zcache_cleancache_flush_page,
1814	.invalidate_inode = zcache_cleancache_flush_inode,
1815	.invalidate_fs = zcache_cleancache_flush_fs,
1816	.init_shared_fs = zcache_cleancache_init_shared_fs,
1817	.init_fs = zcache_cleancache_init_fs
1818};
1819
1820struct cleancache_ops zcache_cleancache_register_ops(void)
1821{
1822	struct cleancache_ops old_ops =
1823		cleancache_register_ops(&zcache_cleancache_ops);
1824
1825	return old_ops;
1826}
1827#endif
1828
1829#ifdef CONFIG_FRONTSWAP
1830/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1831static int zcache_frontswap_poolid = -1;
1832
1833/*
1834 * Swizzling increases objects per swaptype, increasing tmem concurrency
1835 * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
1836 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1837 * frontswap_load(), but has side-effects. Hence using 8.
1838 */
1839#define SWIZ_BITS		8
1840#define SWIZ_MASK		((1 << SWIZ_BITS) - 1)
1841#define _oswiz(_type, _ind)	((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1842#define iswiz(_ind)		(_ind >> SWIZ_BITS)
1843
1844static inline struct tmem_oid oswiz(unsigned type, u32 ind)
1845{
1846	struct tmem_oid oid = { .oid = { 0 } };
1847	oid.oid[0] = _oswiz(type, ind);
1848	return oid;
1849}
1850
1851static int zcache_frontswap_store(unsigned type, pgoff_t offset,
1852				   struct page *page)
1853{
1854	u64 ind64 = (u64)offset;
1855	u32 ind = (u32)offset;
1856	struct tmem_oid oid = oswiz(type, ind);
1857	int ret = -1;
1858	unsigned long flags;
1859
1860	BUG_ON(!PageLocked(page));
1861	if (likely(ind64 == ind)) {
1862		local_irq_save(flags);
1863		ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1864					&oid, iswiz(ind), page);
1865		local_irq_restore(flags);
1866	}
1867	return ret;
1868}
1869
1870/* returns 0 if the page was successfully gotten from frontswap, -1 if
1871 * was not present (should never happen!) */
1872static int zcache_frontswap_load(unsigned type, pgoff_t offset,
1873				   struct page *page)
1874{
1875	u64 ind64 = (u64)offset;
1876	u32 ind = (u32)offset;
1877	struct tmem_oid oid = oswiz(type, ind);
1878	int ret = -1;
1879
1880	BUG_ON(!PageLocked(page));
1881	if (likely(ind64 == ind))
1882		ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1883					&oid, iswiz(ind), page);
1884	return ret;
1885}
1886
1887/* flush a single page from frontswap */
1888static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
1889{
1890	u64 ind64 = (u64)offset;
1891	u32 ind = (u32)offset;
1892	struct tmem_oid oid = oswiz(type, ind);
1893
1894	if (likely(ind64 == ind))
1895		(void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1896					&oid, iswiz(ind));
1897}
1898
1899/* flush all pages from the passed swaptype */
1900static void zcache_frontswap_flush_area(unsigned type)
1901{
1902	struct tmem_oid oid;
1903	int ind;
1904
1905	for (ind = SWIZ_MASK; ind >= 0; ind--) {
1906		oid = oswiz(type, ind);
1907		(void)zcache_flush_object(LOCAL_CLIENT,
1908						zcache_frontswap_poolid, &oid);
1909	}
1910}
1911
1912static void zcache_frontswap_init(unsigned ignored)
1913{
1914	/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1915	if (zcache_frontswap_poolid < 0)
1916		zcache_frontswap_poolid =
1917			zcache_new_pool(LOCAL_CLIENT, TMEM_POOL_PERSIST);
1918}
1919
1920static struct frontswap_ops zcache_frontswap_ops = {
1921	.store = zcache_frontswap_store,
1922	.load = zcache_frontswap_load,
1923	.invalidate_page = zcache_frontswap_flush_page,
1924	.invalidate_area = zcache_frontswap_flush_area,
1925	.init = zcache_frontswap_init
1926};
1927
1928struct frontswap_ops zcache_frontswap_register_ops(void)
1929{
1930	struct frontswap_ops old_ops =
1931		frontswap_register_ops(&zcache_frontswap_ops);
1932
1933	return old_ops;
1934}
1935#endif
1936
1937/*
1938 * zcache initialization
1939 * NOTE FOR NOW zcache MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
1940 * NOTHING HAPPENS!
1941 */
1942
1943static int zcache_enabled;
1944
1945static int __init enable_zcache(char *s)
1946{
1947	zcache_enabled = 1;
1948	return 1;
1949}
1950__setup("zcache", enable_zcache);
1951
1952/* allow independent dynamic disabling of cleancache and frontswap */
1953
1954static int use_cleancache = 1;
1955
1956static int __init no_cleancache(char *s)
1957{
1958	use_cleancache = 0;
1959	return 1;
1960}
1961
1962__setup("nocleancache", no_cleancache);
1963
1964static int use_frontswap = 1;
1965
1966static int __init no_frontswap(char *s)
1967{
1968	use_frontswap = 0;
1969	return 1;
1970}
1971
1972__setup("nofrontswap", no_frontswap);
1973
1974static int __init enable_zcache_compressor(char *s)
1975{
1976	strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
1977	zcache_enabled = 1;
1978	return 1;
1979}
1980__setup("zcache=", enable_zcache_compressor);
1981
1982
1983static int zcache_comp_init(void)
1984{
1985	int ret = 0;
1986
1987	/* check crypto algorithm */
1988	if (*zcache_comp_name != '\0') {
1989		ret = crypto_has_comp(zcache_comp_name, 0, 0);
1990		if (!ret)
1991			pr_info("zcache: %s not supported\n",
1992					zcache_comp_name);
1993	}
1994	if (!ret)
1995		strcpy(zcache_comp_name, "lzo");
1996	ret = crypto_has_comp(zcache_comp_name, 0, 0);
1997	if (!ret) {
1998		ret = 1;
1999		goto out;
2000	}
2001	pr_info("zcache: using %s compressor\n", zcache_comp_name);
2002
2003	/* alloc percpu transforms */
2004	ret = 0;
2005	zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
2006	if (!zcache_comp_pcpu_tfms)
2007		ret = 1;
2008out:
2009	return ret;
2010}
2011
2012static int __init zcache_init(void)
2013{
2014	int ret = 0;
2015
2016#ifdef CONFIG_SYSFS
2017	ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
2018	if (ret) {
2019		pr_err("zcache: can't create sysfs\n");
2020		goto out;
2021	}
2022#endif /* CONFIG_SYSFS */
2023#if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
2024	if (zcache_enabled) {
2025		unsigned int cpu;
2026
2027		tmem_register_hostops(&zcache_hostops);
2028		tmem_register_pamops(&zcache_pamops);
2029		ret = register_cpu_notifier(&zcache_cpu_notifier_block);
2030		if (ret) {
2031			pr_err("zcache: can't register cpu notifier\n");
2032			goto out;
2033		}
2034		ret = zcache_comp_init();
2035		if (ret) {
2036			pr_err("zcache: compressor initialization failed\n");
2037			goto out;
2038		}
2039		for_each_online_cpu(cpu) {
2040			void *pcpu = (void *)(long)cpu;
2041			zcache_cpu_notifier(&zcache_cpu_notifier_block,
2042				CPU_UP_PREPARE, pcpu);
2043		}
2044	}
2045	zcache_objnode_cache = kmem_cache_create("zcache_objnode",
2046				sizeof(struct tmem_objnode), 0, 0, NULL);
2047	zcache_obj_cache = kmem_cache_create("zcache_obj",
2048				sizeof(struct tmem_obj), 0, 0, NULL);
2049	ret = zcache_new_client(LOCAL_CLIENT);
2050	if (ret) {
2051		pr_err("zcache: can't create client\n");
2052		goto out;
2053	}
2054#endif
2055#ifdef CONFIG_CLEANCACHE
2056	if (zcache_enabled && use_cleancache) {
2057		struct cleancache_ops old_ops;
2058
2059		zbud_init();
2060		register_shrinker(&zcache_shrinker);
2061		old_ops = zcache_cleancache_register_ops();
2062		pr_info("zcache: cleancache enabled using kernel "
2063			"transcendent memory and compression buddies\n");
2064		if (old_ops.init_fs != NULL)
2065			pr_warning("zcache: cleancache_ops overridden");
2066	}
2067#endif
2068#ifdef CONFIG_FRONTSWAP
2069	if (zcache_enabled && use_frontswap) {
2070		struct frontswap_ops old_ops;
2071
2072		old_ops = zcache_frontswap_register_ops();
2073		pr_info("zcache: frontswap enabled using kernel "
2074			"transcendent memory and zsmalloc\n");
2075		if (old_ops.init != NULL)
2076			pr_warning("zcache: frontswap_ops overridden");
2077	}
2078#endif
2079out:
2080	return ret;
2081}
2082
2083module_init(zcache_init)