Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * z3fold.c
   3 *
   4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
   5 * Copyright (C) 2016, Sony Mobile Communications Inc.
   6 *
   7 * This implementation is based on zbud written by Seth Jennings.
   8 *
   9 * z3fold is an special purpose allocator for storing compressed pages. It
  10 * can store up to three compressed pages per page which improves the
  11 * compression ratio of zbud while retaining its main concepts (e. g. always
  12 * storing an integral number of objects per page) and simplicity.
  13 * It still has simple and deterministic reclaim properties that make it
  14 * preferable to a higher density approach (with no requirement on integral
  15 * number of object per page) when reclaim is used.
  16 *
  17 * As in zbud, pages are divided into "chunks".  The size of the chunks is
  18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
  19 *
  20 * z3fold doesn't export any API and is meant to be used via zpool API.
  21 */
  22
  23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24
  25#include <linux/atomic.h>
  26#include <linux/sched.h>
  27#include <linux/list.h>
  28#include <linux/mm.h>
  29#include <linux/module.h>
  30#include <linux/percpu.h>
  31#include <linux/preempt.h>
  32#include <linux/workqueue.h>
  33#include <linux/slab.h>
  34#include <linux/spinlock.h>
  35#include <linux/zpool.h>
  36
  37/*****************
  38 * Structures
  39*****************/
  40struct z3fold_pool;
  41struct z3fold_ops {
  42	int (*evict)(struct z3fold_pool *pool, unsigned long handle);
  43};
  44
  45enum buddy {
  46	HEADLESS = 0,
  47	FIRST,
  48	MIDDLE,
  49	LAST,
  50	BUDDIES_MAX
  51};
  52
  53/*
  54 * struct z3fold_header - z3fold page metadata occupying first chunks of each
  55 *			z3fold page, except for HEADLESS pages
  56 * @buddy:		links the z3fold page into the relevant list in the
  57 *			pool
  58 * @page_lock:		per-page lock
  59 * @refcount:		reference count for the z3fold page
  60 * @work:		work_struct for page layout optimization
  61 * @pool:		pointer to the pool which this page belongs to
  62 * @cpu:		CPU which this page "belongs" to
  63 * @first_chunks:	the size of the first buddy in chunks, 0 if free
  64 * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
  65 * @last_chunks:	the size of the last buddy in chunks, 0 if free
  66 * @first_num:		the starting number (for the first handle)
  67 */
  68struct z3fold_header {
  69	struct list_head buddy;
  70	spinlock_t page_lock;
  71	struct kref refcount;
  72	struct work_struct work;
  73	struct z3fold_pool *pool;
  74	short cpu;
  75	unsigned short first_chunks;
  76	unsigned short middle_chunks;
  77	unsigned short last_chunks;
  78	unsigned short start_middle;
  79	unsigned short first_num:2;
  80};
  81
  82/*
  83 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
  84 * adjusting internal fragmentation.  It also determines the number of
  85 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
  86 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
  87 * in the beginning of an allocated page are occupied by z3fold header, so
  88 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
  89 * which shows the max number of free chunks in z3fold page, also there will
  90 * be 63, or 62, respectively, freelists per pool.
  91 */
  92#define NCHUNKS_ORDER	6
  93
  94#define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
  95#define CHUNK_SIZE	(1 << CHUNK_SHIFT)
  96#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
  97#define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
  98#define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
  99#define NCHUNKS		((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
 100
 101#define BUDDY_MASK	(0x3)
 102
 103/**
 104 * struct z3fold_pool - stores metadata for each z3fold pool
 105 * @name:	pool name
 106 * @lock:	protects pool unbuddied/lru lists
 107 * @stale_lock:	protects pool stale page list
 108 * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
 109 *		buddies; the list each z3fold page is added to depends on
 110 *		the size of its free region.
 111 * @lru:	list tracking the z3fold pages in LRU order by most recently
 112 *		added buddy.
 113 * @stale:	list of pages marked for freeing
 114 * @pages_nr:	number of z3fold pages in the pool.
 115 * @ops:	pointer to a structure of user defined operations specified at
 116 *		pool creation time.
 117 * @compact_wq:	workqueue for page layout background optimization
 118 * @release_wq:	workqueue for safe page release
 119 * @work:	work_struct for safe page release
 120 *
 121 * This structure is allocated at pool creation time and maintains metadata
 122 * pertaining to a particular z3fold pool.
 123 */
 124struct z3fold_pool {
 125	const char *name;
 126	spinlock_t lock;
 127	spinlock_t stale_lock;
 128	struct list_head *unbuddied;
 129	struct list_head lru;
 130	struct list_head stale;
 131	atomic64_t pages_nr;
 132	const struct z3fold_ops *ops;
 133	struct zpool *zpool;
 134	const struct zpool_ops *zpool_ops;
 135	struct workqueue_struct *compact_wq;
 136	struct workqueue_struct *release_wq;
 137	struct work_struct work;
 138};
 139
 140/*
 141 * Internal z3fold page flags
 142 */
 143enum z3fold_page_flags {
 144	PAGE_HEADLESS = 0,
 145	MIDDLE_CHUNK_MAPPED,
 146	NEEDS_COMPACTING,
 147	PAGE_STALE,
 148	UNDER_RECLAIM
 149};
 150
 151/*****************
 152 * Helpers
 153*****************/
 154
 155/* Converts an allocation size in bytes to size in z3fold chunks */
 156static int size_to_chunks(size_t size)
 157{
 158	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
 159}
 160
 161#define for_each_unbuddied_list(_iter, _begin) \
 162	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
 163
 164static void compact_page_work(struct work_struct *w);
 165
 166/* Initializes the z3fold header of a newly allocated z3fold page */
 167static struct z3fold_header *init_z3fold_page(struct page *page,
 168					struct z3fold_pool *pool)
 169{
 170	struct z3fold_header *zhdr = page_address(page);
 171
 172	INIT_LIST_HEAD(&page->lru);
 173	clear_bit(PAGE_HEADLESS, &page->private);
 174	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
 175	clear_bit(NEEDS_COMPACTING, &page->private);
 176	clear_bit(PAGE_STALE, &page->private);
 177	clear_bit(UNDER_RECLAIM, &page->private);
 178
 179	spin_lock_init(&zhdr->page_lock);
 180	kref_init(&zhdr->refcount);
 181	zhdr->first_chunks = 0;
 182	zhdr->middle_chunks = 0;
 183	zhdr->last_chunks = 0;
 184	zhdr->first_num = 0;
 185	zhdr->start_middle = 0;
 186	zhdr->cpu = -1;
 187	zhdr->pool = pool;
 188	INIT_LIST_HEAD(&zhdr->buddy);
 189	INIT_WORK(&zhdr->work, compact_page_work);
 190	return zhdr;
 191}
 192
 193/* Resets the struct page fields and frees the page */
 194static void free_z3fold_page(struct page *page)
 195{
 196	__free_page(page);
 197}
 198
 199/* Lock a z3fold page */
 200static inline void z3fold_page_lock(struct z3fold_header *zhdr)
 201{
 202	spin_lock(&zhdr->page_lock);
 203}
 204
 205/* Try to lock a z3fold page */
 206static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
 207{
 208	return spin_trylock(&zhdr->page_lock);
 209}
 210
 211/* Unlock a z3fold page */
 212static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
 213{
 214	spin_unlock(&zhdr->page_lock);
 215}
 216
 217/*
 218 * Encodes the handle of a particular buddy within a z3fold page
 219 * Pool lock should be held as this function accesses first_num
 220 */
 221static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
 222{
 223	unsigned long handle;
 224
 225	handle = (unsigned long)zhdr;
 226	if (bud != HEADLESS)
 227		handle += (bud + zhdr->first_num) & BUDDY_MASK;
 228	return handle;
 229}
 230
 231/* Returns the z3fold page where a given handle is stored */
 232static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
 233{
 234	return (struct z3fold_header *)(handle & PAGE_MASK);
 235}
 236
 237/*
 238 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
 239 *  but that doesn't matter. because the masking will result in the
 240 *  correct buddy number.
 241 */
 242static enum buddy handle_to_buddy(unsigned long handle)
 243{
 244	struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
 245	return (handle - zhdr->first_num) & BUDDY_MASK;
 246}
 247
 248static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
 249{
 250	struct page *page = virt_to_page(zhdr);
 251	struct z3fold_pool *pool = zhdr->pool;
 252
 253	WARN_ON(!list_empty(&zhdr->buddy));
 254	set_bit(PAGE_STALE, &page->private);
 255	clear_bit(NEEDS_COMPACTING, &page->private);
 256	spin_lock(&pool->lock);
 257	if (!list_empty(&page->lru))
 258		list_del(&page->lru);
 259	spin_unlock(&pool->lock);
 260	if (locked)
 261		z3fold_page_unlock(zhdr);
 262	spin_lock(&pool->stale_lock);
 263	list_add(&zhdr->buddy, &pool->stale);
 264	queue_work(pool->release_wq, &pool->work);
 265	spin_unlock(&pool->stale_lock);
 266}
 267
 268static void __attribute__((__unused__))
 269			release_z3fold_page(struct kref *ref)
 270{
 271	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
 272						refcount);
 273	__release_z3fold_page(zhdr, false);
 274}
 275
 276static void release_z3fold_page_locked(struct kref *ref)
 277{
 278	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
 279						refcount);
 280	WARN_ON(z3fold_page_trylock(zhdr));
 281	__release_z3fold_page(zhdr, true);
 282}
 283
 284static void release_z3fold_page_locked_list(struct kref *ref)
 285{
 286	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
 287					       refcount);
 288	spin_lock(&zhdr->pool->lock);
 289	list_del_init(&zhdr->buddy);
 290	spin_unlock(&zhdr->pool->lock);
 291
 292	WARN_ON(z3fold_page_trylock(zhdr));
 293	__release_z3fold_page(zhdr, true);
 294}
 295
 296static void free_pages_work(struct work_struct *w)
 297{
 298	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
 299
 300	spin_lock(&pool->stale_lock);
 301	while (!list_empty(&pool->stale)) {
 302		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
 303						struct z3fold_header, buddy);
 304		struct page *page = virt_to_page(zhdr);
 305
 306		list_del(&zhdr->buddy);
 307		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
 308			continue;
 309		spin_unlock(&pool->stale_lock);
 310		cancel_work_sync(&zhdr->work);
 311		free_z3fold_page(page);
 312		cond_resched();
 313		spin_lock(&pool->stale_lock);
 314	}
 315	spin_unlock(&pool->stale_lock);
 316}
 317
 318/*
 319 * Returns the number of free chunks in a z3fold page.
 320 * NB: can't be used with HEADLESS pages.
 321 */
 322static int num_free_chunks(struct z3fold_header *zhdr)
 323{
 324	int nfree;
 325	/*
 326	 * If there is a middle object, pick up the bigger free space
 327	 * either before or after it. Otherwise just subtract the number
 328	 * of chunks occupied by the first and the last objects.
 329	 */
 330	if (zhdr->middle_chunks != 0) {
 331		int nfree_before = zhdr->first_chunks ?
 332			0 : zhdr->start_middle - ZHDR_CHUNKS;
 333		int nfree_after = zhdr->last_chunks ?
 334			0 : TOTAL_CHUNKS -
 335				(zhdr->start_middle + zhdr->middle_chunks);
 336		nfree = max(nfree_before, nfree_after);
 337	} else
 338		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
 339	return nfree;
 340}
 341
 342static inline void *mchunk_memmove(struct z3fold_header *zhdr,
 343				unsigned short dst_chunk)
 344{
 345	void *beg = zhdr;
 346	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
 347		       beg + (zhdr->start_middle << CHUNK_SHIFT),
 348		       zhdr->middle_chunks << CHUNK_SHIFT);
 349}
 350
 351#define BIG_CHUNK_GAP	3
 352/* Has to be called with lock held */
 353static int z3fold_compact_page(struct z3fold_header *zhdr)
 354{
 355	struct page *page = virt_to_page(zhdr);
 356
 357	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
 358		return 0; /* can't move middle chunk, it's used */
 359
 360	if (zhdr->middle_chunks == 0)
 361		return 0; /* nothing to compact */
 362
 363	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
 364		/* move to the beginning */
 365		mchunk_memmove(zhdr, ZHDR_CHUNKS);
 366		zhdr->first_chunks = zhdr->middle_chunks;
 367		zhdr->middle_chunks = 0;
 368		zhdr->start_middle = 0;
 369		zhdr->first_num++;
 370		return 1;
 371	}
 372
 373	/*
 374	 * moving data is expensive, so let's only do that if
 375	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
 376	 */
 377	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
 378	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
 379			BIG_CHUNK_GAP) {
 380		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
 381		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
 382		return 1;
 383	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
 384		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
 385					+ zhdr->middle_chunks) >=
 386			BIG_CHUNK_GAP) {
 387		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
 388			zhdr->middle_chunks;
 389		mchunk_memmove(zhdr, new_start);
 390		zhdr->start_middle = new_start;
 391		return 1;
 392	}
 393
 394	return 0;
 395}
 396
 397static void do_compact_page(struct z3fold_header *zhdr, bool locked)
 398{
 399	struct z3fold_pool *pool = zhdr->pool;
 400	struct page *page;
 401	struct list_head *unbuddied;
 402	int fchunks;
 403
 404	page = virt_to_page(zhdr);
 405	if (locked)
 406		WARN_ON(z3fold_page_trylock(zhdr));
 407	else
 408		z3fold_page_lock(zhdr);
 409	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
 410		z3fold_page_unlock(zhdr);
 411		return;
 412	}
 413	spin_lock(&pool->lock);
 414	list_del_init(&zhdr->buddy);
 415	spin_unlock(&pool->lock);
 416
 417	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
 418		atomic64_dec(&pool->pages_nr);
 419		return;
 420	}
 421
 422	z3fold_compact_page(zhdr);
 423	unbuddied = get_cpu_ptr(pool->unbuddied);
 424	fchunks = num_free_chunks(zhdr);
 425	if (fchunks < NCHUNKS &&
 426	    (!zhdr->first_chunks || !zhdr->middle_chunks ||
 427			!zhdr->last_chunks)) {
 428		/* the page's not completely free and it's unbuddied */
 429		spin_lock(&pool->lock);
 430		list_add(&zhdr->buddy, &unbuddied[fchunks]);
 431		spin_unlock(&pool->lock);
 432		zhdr->cpu = smp_processor_id();
 433	}
 434	put_cpu_ptr(pool->unbuddied);
 435	z3fold_page_unlock(zhdr);
 436}
 437
 438static void compact_page_work(struct work_struct *w)
 439{
 440	struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
 441						work);
 442
 443	do_compact_page(zhdr, false);
 444}
 445
 446
 447/*
 448 * API Functions
 449 */
 450
 451/**
 452 * z3fold_create_pool() - create a new z3fold pool
 453 * @name:	pool name
 454 * @gfp:	gfp flags when allocating the z3fold pool structure
 455 * @ops:	user-defined operations for the z3fold pool
 456 *
 457 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
 458 * failed.
 459 */
 460static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
 461		const struct z3fold_ops *ops)
 462{
 463	struct z3fold_pool *pool = NULL;
 464	int i, cpu;
 465
 466	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
 467	if (!pool)
 468		goto out;
 469	spin_lock_init(&pool->lock);
 470	spin_lock_init(&pool->stale_lock);
 471	pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
 472	if (!pool->unbuddied)
 473		goto out_pool;
 474	for_each_possible_cpu(cpu) {
 475		struct list_head *unbuddied =
 476				per_cpu_ptr(pool->unbuddied, cpu);
 477		for_each_unbuddied_list(i, 0)
 478			INIT_LIST_HEAD(&unbuddied[i]);
 479	}
 480	INIT_LIST_HEAD(&pool->lru);
 481	INIT_LIST_HEAD(&pool->stale);
 482	atomic64_set(&pool->pages_nr, 0);
 483	pool->name = name;
 484	pool->compact_wq = create_singlethread_workqueue(pool->name);
 485	if (!pool->compact_wq)
 486		goto out_unbuddied;
 487	pool->release_wq = create_singlethread_workqueue(pool->name);
 488	if (!pool->release_wq)
 489		goto out_wq;
 490	INIT_WORK(&pool->work, free_pages_work);
 491	pool->ops = ops;
 492	return pool;
 493
 494out_wq:
 495	destroy_workqueue(pool->compact_wq);
 496out_unbuddied:
 497	free_percpu(pool->unbuddied);
 498out_pool:
 499	kfree(pool);
 500out:
 501	return NULL;
 502}
 503
 504/**
 505 * z3fold_destroy_pool() - destroys an existing z3fold pool
 506 * @pool:	the z3fold pool to be destroyed
 507 *
 508 * The pool should be emptied before this function is called.
 509 */
 510static void z3fold_destroy_pool(struct z3fold_pool *pool)
 511{
 512	destroy_workqueue(pool->release_wq);
 513	destroy_workqueue(pool->compact_wq);
 514	kfree(pool);
 515}
 516
 517/**
 518 * z3fold_alloc() - allocates a region of a given size
 519 * @pool:	z3fold pool from which to allocate
 520 * @size:	size in bytes of the desired allocation
 521 * @gfp:	gfp flags used if the pool needs to grow
 522 * @handle:	handle of the new allocation
 523 *
 524 * This function will attempt to find a free region in the pool large enough to
 525 * satisfy the allocation request.  A search of the unbuddied lists is
 526 * performed first. If no suitable free region is found, then a new page is
 527 * allocated and added to the pool to satisfy the request.
 528 *
 529 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
 530 * as z3fold pool pages.
 531 *
 532 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
 533 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
 534 * a new page.
 535 */
 536static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
 537			unsigned long *handle)
 538{
 539	int chunks = 0, i, freechunks;
 540	struct z3fold_header *zhdr = NULL;
 541	struct page *page = NULL;
 542	enum buddy bud;
 543	bool can_sleep = gfpflags_allow_blocking(gfp);
 544
 545	if (!size || (gfp & __GFP_HIGHMEM))
 546		return -EINVAL;
 547
 548	if (size > PAGE_SIZE)
 549		return -ENOSPC;
 550
 551	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
 552		bud = HEADLESS;
 553	else {
 554		struct list_head *unbuddied;
 555		chunks = size_to_chunks(size);
 556
 557lookup:
 558		/* First, try to find an unbuddied z3fold page. */
 559		unbuddied = get_cpu_ptr(pool->unbuddied);
 560		for_each_unbuddied_list(i, chunks) {
 561			struct list_head *l = &unbuddied[i];
 562
 563			zhdr = list_first_entry_or_null(READ_ONCE(l),
 564						struct z3fold_header, buddy);
 565
 566			if (!zhdr)
 567				continue;
 568
 569			/* Re-check under lock. */
 570			spin_lock(&pool->lock);
 571			l = &unbuddied[i];
 572			if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
 573					struct z3fold_header, buddy)) ||
 574			    !z3fold_page_trylock(zhdr)) {
 575				spin_unlock(&pool->lock);
 576				put_cpu_ptr(pool->unbuddied);
 577				goto lookup;
 578			}
 579			list_del_init(&zhdr->buddy);
 580			zhdr->cpu = -1;
 581			spin_unlock(&pool->lock);
 582
 583			page = virt_to_page(zhdr);
 584			if (test_bit(NEEDS_COMPACTING, &page->private)) {
 585				z3fold_page_unlock(zhdr);
 586				zhdr = NULL;
 587				put_cpu_ptr(pool->unbuddied);
 588				if (can_sleep)
 589					cond_resched();
 590				goto lookup;
 591			}
 592
 593			/*
 594			 * this page could not be removed from its unbuddied
 595			 * list while pool lock was held, and then we've taken
 596			 * page lock so kref_put could not be called before
 597			 * we got here, so it's safe to just call kref_get()
 598			 */
 599			kref_get(&zhdr->refcount);
 600			break;
 601		}
 602		put_cpu_ptr(pool->unbuddied);
 603
 604		if (zhdr) {
 605			if (zhdr->first_chunks == 0) {
 606				if (zhdr->middle_chunks != 0 &&
 607				    chunks >= zhdr->start_middle)
 608					bud = LAST;
 609				else
 610					bud = FIRST;
 611			} else if (zhdr->last_chunks == 0)
 612				bud = LAST;
 613			else if (zhdr->middle_chunks == 0)
 614				bud = MIDDLE;
 615			else {
 616				if (kref_put(&zhdr->refcount,
 617					     release_z3fold_page_locked))
 618					atomic64_dec(&pool->pages_nr);
 619				else
 620					z3fold_page_unlock(zhdr);
 621				pr_err("No free chunks in unbuddied\n");
 622				WARN_ON(1);
 623				goto lookup;
 624			}
 625			goto found;
 626		}
 627		bud = FIRST;
 628	}
 629
 630	page = NULL;
 631	if (can_sleep) {
 632		spin_lock(&pool->stale_lock);
 633		zhdr = list_first_entry_or_null(&pool->stale,
 634						struct z3fold_header, buddy);
 635		/*
 636		 * Before allocating a page, let's see if we can take one from
 637		 * the stale pages list. cancel_work_sync() can sleep so we
 638		 * limit this case to the contexts where we can sleep
 639		 */
 640		if (zhdr) {
 641			list_del(&zhdr->buddy);
 642			spin_unlock(&pool->stale_lock);
 643			cancel_work_sync(&zhdr->work);
 644			page = virt_to_page(zhdr);
 645		} else {
 646			spin_unlock(&pool->stale_lock);
 647		}
 648	}
 649	if (!page)
 650		page = alloc_page(gfp);
 651
 652	if (!page)
 653		return -ENOMEM;
 654
 655	atomic64_inc(&pool->pages_nr);
 656	zhdr = init_z3fold_page(page, pool);
 657
 658	if (bud == HEADLESS) {
 659		set_bit(PAGE_HEADLESS, &page->private);
 660		goto headless;
 661	}
 662	z3fold_page_lock(zhdr);
 663
 664found:
 665	if (bud == FIRST)
 666		zhdr->first_chunks = chunks;
 667	else if (bud == LAST)
 668		zhdr->last_chunks = chunks;
 669	else {
 670		zhdr->middle_chunks = chunks;
 671		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
 672	}
 673
 674	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
 675			zhdr->middle_chunks == 0) {
 676		struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
 677
 678		/* Add to unbuddied list */
 679		freechunks = num_free_chunks(zhdr);
 680		spin_lock(&pool->lock);
 681		list_add(&zhdr->buddy, &unbuddied[freechunks]);
 682		spin_unlock(&pool->lock);
 683		zhdr->cpu = smp_processor_id();
 684		put_cpu_ptr(pool->unbuddied);
 685	}
 686
 687headless:
 688	spin_lock(&pool->lock);
 689	/* Add/move z3fold page to beginning of LRU */
 690	if (!list_empty(&page->lru))
 691		list_del(&page->lru);
 692
 693	list_add(&page->lru, &pool->lru);
 694
 695	*handle = encode_handle(zhdr, bud);
 696	spin_unlock(&pool->lock);
 697	if (bud != HEADLESS)
 698		z3fold_page_unlock(zhdr);
 699
 700	return 0;
 701}
 702
 703/**
 704 * z3fold_free() - frees the allocation associated with the given handle
 705 * @pool:	pool in which the allocation resided
 706 * @handle:	handle associated with the allocation returned by z3fold_alloc()
 707 *
 708 * In the case that the z3fold page in which the allocation resides is under
 709 * reclaim, as indicated by the PG_reclaim flag being set, this function
 710 * only sets the first|last_chunks to 0.  The page is actually freed
 711 * once both buddies are evicted (see z3fold_reclaim_page() below).
 712 */
 713static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 714{
 715	struct z3fold_header *zhdr;
 716	struct page *page;
 717	enum buddy bud;
 718
 719	zhdr = handle_to_z3fold_header(handle);
 720	page = virt_to_page(zhdr);
 721
 722	if (test_bit(PAGE_HEADLESS, &page->private)) {
 723		/* HEADLESS page stored */
 724		bud = HEADLESS;
 725	} else {
 726		z3fold_page_lock(zhdr);
 727		bud = handle_to_buddy(handle);
 728
 729		switch (bud) {
 730		case FIRST:
 731			zhdr->first_chunks = 0;
 732			break;
 733		case MIDDLE:
 734			zhdr->middle_chunks = 0;
 735			zhdr->start_middle = 0;
 736			break;
 737		case LAST:
 738			zhdr->last_chunks = 0;
 739			break;
 740		default:
 741			pr_err("%s: unknown bud %d\n", __func__, bud);
 742			WARN_ON(1);
 743			z3fold_page_unlock(zhdr);
 744			return;
 745		}
 746	}
 747
 748	if (bud == HEADLESS) {
 749		spin_lock(&pool->lock);
 750		list_del(&page->lru);
 751		spin_unlock(&pool->lock);
 752		free_z3fold_page(page);
 753		atomic64_dec(&pool->pages_nr);
 754		return;
 755	}
 756
 757	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
 758		atomic64_dec(&pool->pages_nr);
 759		return;
 760	}
 761	if (test_bit(UNDER_RECLAIM, &page->private)) {
 762		z3fold_page_unlock(zhdr);
 763		return;
 764	}
 765	if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
 766		z3fold_page_unlock(zhdr);
 767		return;
 768	}
 769	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
 770		spin_lock(&pool->lock);
 771		list_del_init(&zhdr->buddy);
 772		spin_unlock(&pool->lock);
 773		zhdr->cpu = -1;
 774		kref_get(&zhdr->refcount);
 775		do_compact_page(zhdr, true);
 776		return;
 777	}
 778	kref_get(&zhdr->refcount);
 779	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
 780	z3fold_page_unlock(zhdr);
 781}
 782
 783/**
 784 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
 785 * @pool:	pool from which a page will attempt to be evicted
 786 * @retries:	number of pages on the LRU list for which eviction will
 787 *		be attempted before failing
 788 *
 789 * z3fold reclaim is different from normal system reclaim in that it is done
 790 * from the bottom, up. This is because only the bottom layer, z3fold, has
 791 * information on how the allocations are organized within each z3fold page.
 792 * This has the potential to create interesting locking situations between
 793 * z3fold and the user, however.
 794 *
 795 * To avoid these, this is how z3fold_reclaim_page() should be called:
 796 *
 797 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
 798 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
 799 * call the user-defined eviction handler with the pool and handle as
 800 * arguments.
 801 *
 802 * If the handle can not be evicted, the eviction handler should return
 803 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
 804 * appropriate list and try the next z3fold page on the LRU up to
 805 * a user defined number of retries.
 806 *
 807 * If the handle is successfully evicted, the eviction handler should
 808 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
 809 * contains logic to delay freeing the page if the page is under reclaim,
 810 * as indicated by the setting of the PG_reclaim flag on the underlying page.
 811 *
 812 * If all buddies in the z3fold page are successfully evicted, then the
 813 * z3fold page can be freed.
 814 *
 815 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
 816 * no pages to evict or an eviction handler is not registered, -EAGAIN if
 817 * the retry limit was hit.
 818 */
 819static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
 820{
 821	int i, ret = 0;
 822	struct z3fold_header *zhdr = NULL;
 823	struct page *page = NULL;
 824	struct list_head *pos;
 825	unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
 826
 827	spin_lock(&pool->lock);
 828	if (!pool->ops || !pool->ops->evict || retries == 0) {
 829		spin_unlock(&pool->lock);
 830		return -EINVAL;
 831	}
 832	for (i = 0; i < retries; i++) {
 833		if (list_empty(&pool->lru)) {
 834			spin_unlock(&pool->lock);
 835			return -EINVAL;
 836		}
 837		list_for_each_prev(pos, &pool->lru) {
 838			page = list_entry(pos, struct page, lru);
 839			if (test_bit(PAGE_HEADLESS, &page->private))
 840				/* candidate found */
 841				break;
 842
 843			zhdr = page_address(page);
 844			if (!z3fold_page_trylock(zhdr))
 845				continue; /* can't evict at this point */
 846			kref_get(&zhdr->refcount);
 847			list_del_init(&zhdr->buddy);
 848			zhdr->cpu = -1;
 849			set_bit(UNDER_RECLAIM, &page->private);
 850			break;
 851		}
 852
 853		list_del_init(&page->lru);
 854		spin_unlock(&pool->lock);
 855
 856		if (!test_bit(PAGE_HEADLESS, &page->private)) {
 857			/*
 858			 * We need encode the handles before unlocking, since
 859			 * we can race with free that will set
 860			 * (first|last)_chunks to 0
 861			 */
 862			first_handle = 0;
 863			last_handle = 0;
 864			middle_handle = 0;
 865			if (zhdr->first_chunks)
 866				first_handle = encode_handle(zhdr, FIRST);
 867			if (zhdr->middle_chunks)
 868				middle_handle = encode_handle(zhdr, MIDDLE);
 869			if (zhdr->last_chunks)
 870				last_handle = encode_handle(zhdr, LAST);
 871			/*
 872			 * it's safe to unlock here because we hold a
 873			 * reference to this page
 874			 */
 875			z3fold_page_unlock(zhdr);
 876		} else {
 877			first_handle = encode_handle(zhdr, HEADLESS);
 878			last_handle = middle_handle = 0;
 879		}
 880
 881		/* Issue the eviction callback(s) */
 882		if (middle_handle) {
 883			ret = pool->ops->evict(pool, middle_handle);
 884			if (ret)
 885				goto next;
 886		}
 887		if (first_handle) {
 888			ret = pool->ops->evict(pool, first_handle);
 889			if (ret)
 890				goto next;
 891		}
 892		if (last_handle) {
 893			ret = pool->ops->evict(pool, last_handle);
 894			if (ret)
 895				goto next;
 896		}
 897next:
 898		if (test_bit(PAGE_HEADLESS, &page->private)) {
 899			if (ret == 0) {
 900				free_z3fold_page(page);
 901				return 0;
 902			}
 903			spin_lock(&pool->lock);
 904			list_add(&page->lru, &pool->lru);
 905			spin_unlock(&pool->lock);
 906		} else {
 907			z3fold_page_lock(zhdr);
 908			clear_bit(UNDER_RECLAIM, &page->private);
 909			if (kref_put(&zhdr->refcount,
 910					release_z3fold_page_locked)) {
 911				atomic64_dec(&pool->pages_nr);
 912				return 0;
 913			}
 914			/*
 915			 * if we are here, the page is still not completely
 916			 * free. Take the global pool lock then to be able
 917			 * to add it back to the lru list
 918			 */
 919			spin_lock(&pool->lock);
 920			list_add(&page->lru, &pool->lru);
 921			spin_unlock(&pool->lock);
 922			z3fold_page_unlock(zhdr);
 923		}
 924
 925		/* We started off locked to we need to lock the pool back */
 926		spin_lock(&pool->lock);
 927	}
 928	spin_unlock(&pool->lock);
 929	return -EAGAIN;
 930}
 931
 932/**
 933 * z3fold_map() - maps the allocation associated with the given handle
 934 * @pool:	pool in which the allocation resides
 935 * @handle:	handle associated with the allocation to be mapped
 936 *
 937 * Extracts the buddy number from handle and constructs the pointer to the
 938 * correct starting chunk within the page.
 939 *
 940 * Returns: a pointer to the mapped allocation
 941 */
 942static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
 943{
 944	struct z3fold_header *zhdr;
 945	struct page *page;
 946	void *addr;
 947	enum buddy buddy;
 948
 949	zhdr = handle_to_z3fold_header(handle);
 950	addr = zhdr;
 951	page = virt_to_page(zhdr);
 952
 953	if (test_bit(PAGE_HEADLESS, &page->private))
 954		goto out;
 955
 956	z3fold_page_lock(zhdr);
 957	buddy = handle_to_buddy(handle);
 958	switch (buddy) {
 959	case FIRST:
 960		addr += ZHDR_SIZE_ALIGNED;
 961		break;
 962	case MIDDLE:
 963		addr += zhdr->start_middle << CHUNK_SHIFT;
 964		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
 965		break;
 966	case LAST:
 967		addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
 968		break;
 969	default:
 970		pr_err("unknown buddy id %d\n", buddy);
 971		WARN_ON(1);
 972		addr = NULL;
 973		break;
 974	}
 975
 976	z3fold_page_unlock(zhdr);
 977out:
 978	return addr;
 979}
 980
 981/**
 982 * z3fold_unmap() - unmaps the allocation associated with the given handle
 983 * @pool:	pool in which the allocation resides
 984 * @handle:	handle associated with the allocation to be unmapped
 985 */
 986static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
 987{
 988	struct z3fold_header *zhdr;
 989	struct page *page;
 990	enum buddy buddy;
 991
 992	zhdr = handle_to_z3fold_header(handle);
 993	page = virt_to_page(zhdr);
 994
 995	if (test_bit(PAGE_HEADLESS, &page->private))
 996		return;
 997
 998	z3fold_page_lock(zhdr);
 999	buddy = handle_to_buddy(handle);
1000	if (buddy == MIDDLE)
1001		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1002	z3fold_page_unlock(zhdr);
1003}
1004
1005/**
1006 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1007 * @pool:	pool whose size is being queried
1008 *
1009 * Returns: size in pages of the given pool.
1010 */
1011static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1012{
1013	return atomic64_read(&pool->pages_nr);
1014}
1015
1016/*****************
1017 * zpool
1018 ****************/
1019
1020static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1021{
1022	if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1023		return pool->zpool_ops->evict(pool->zpool, handle);
1024	else
1025		return -ENOENT;
1026}
1027
1028static const struct z3fold_ops z3fold_zpool_ops = {
1029	.evict =	z3fold_zpool_evict
1030};
1031
1032static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1033			       const struct zpool_ops *zpool_ops,
1034			       struct zpool *zpool)
1035{
1036	struct z3fold_pool *pool;
1037
1038	pool = z3fold_create_pool(name, gfp,
1039				zpool_ops ? &z3fold_zpool_ops : NULL);
1040	if (pool) {
1041		pool->zpool = zpool;
1042		pool->zpool_ops = zpool_ops;
1043	}
1044	return pool;
1045}
1046
1047static void z3fold_zpool_destroy(void *pool)
1048{
1049	z3fold_destroy_pool(pool);
1050}
1051
1052static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1053			unsigned long *handle)
1054{
1055	return z3fold_alloc(pool, size, gfp, handle);
1056}
1057static void z3fold_zpool_free(void *pool, unsigned long handle)
1058{
1059	z3fold_free(pool, handle);
1060}
1061
1062static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1063			unsigned int *reclaimed)
1064{
1065	unsigned int total = 0;
1066	int ret = -EINVAL;
1067
1068	while (total < pages) {
1069		ret = z3fold_reclaim_page(pool, 8);
1070		if (ret < 0)
1071			break;
1072		total++;
1073	}
1074
1075	if (reclaimed)
1076		*reclaimed = total;
1077
1078	return ret;
1079}
1080
1081static void *z3fold_zpool_map(void *pool, unsigned long handle,
1082			enum zpool_mapmode mm)
1083{
1084	return z3fold_map(pool, handle);
1085}
1086static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1087{
1088	z3fold_unmap(pool, handle);
1089}
1090
1091static u64 z3fold_zpool_total_size(void *pool)
1092{
1093	return z3fold_get_pool_size(pool) * PAGE_SIZE;
1094}
1095
1096static struct zpool_driver z3fold_zpool_driver = {
1097	.type =		"z3fold",
1098	.owner =	THIS_MODULE,
1099	.create =	z3fold_zpool_create,
1100	.destroy =	z3fold_zpool_destroy,
1101	.malloc =	z3fold_zpool_malloc,
1102	.free =		z3fold_zpool_free,
1103	.shrink =	z3fold_zpool_shrink,
1104	.map =		z3fold_zpool_map,
1105	.unmap =	z3fold_zpool_unmap,
1106	.total_size =	z3fold_zpool_total_size,
1107};
1108
1109MODULE_ALIAS("zpool-z3fold");
1110
1111static int __init init_z3fold(void)
1112{
1113	/* Make sure the z3fold header is not larger than the page size */
1114	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1115	zpool_register_driver(&z3fold_zpool_driver);
1116
1117	return 0;
1118}
1119
1120static void __exit exit_z3fold(void)
1121{
1122	zpool_unregister_driver(&z3fold_zpool_driver);
1123}
1124
1125module_init(init_z3fold);
1126module_exit(exit_z3fold);
1127
1128MODULE_LICENSE("GPL");
1129MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1130MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");