Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * z3fold.c
   4 *
   5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
   6 * Copyright (C) 2016, Sony Mobile Communications Inc.
   7 *
   8 * This implementation is based on zbud written by Seth Jennings.
   9 *
  10 * z3fold is an special purpose allocator for storing compressed pages. It
  11 * can store up to three compressed pages per page which improves the
  12 * compression ratio of zbud while retaining its main concepts (e. g. always
  13 * storing an integral number of objects per page) and simplicity.
  14 * It still has simple and deterministic reclaim properties that make it
  15 * preferable to a higher density approach (with no requirement on integral
  16 * number of object per page) when reclaim is used.
  17 *
  18 * As in zbud, pages are divided into "chunks".  The size of the chunks is
  19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
  20 *
  21 * z3fold doesn't export any API and is meant to be used via zpool API.
  22 */
  23
  24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  25
  26#include <linux/atomic.h>
  27#include <linux/sched.h>
  28#include <linux/cpumask.h>
  29#include <linux/list.h>
  30#include <linux/mm.h>
  31#include <linux/module.h>
  32#include <linux/page-flags.h>
  33#include <linux/migrate.h>
  34#include <linux/node.h>
  35#include <linux/compaction.h>
  36#include <linux/percpu.h>
  37#include <linux/mount.h>
  38#include <linux/pseudo_fs.h>
  39#include <linux/fs.h>
  40#include <linux/preempt.h>
  41#include <linux/workqueue.h>
  42#include <linux/slab.h>
  43#include <linux/spinlock.h>
  44#include <linux/zpool.h>
  45#include <linux/magic.h>
  46
  47/*
  48 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
  49 * adjusting internal fragmentation.  It also determines the number of
  50 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
  51 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
  52 * in the beginning of an allocated page are occupied by z3fold header, so
  53 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
  54 * which shows the max number of free chunks in z3fold page, also there will
  55 * be 63, or 62, respectively, freelists per pool.
  56 */
  57#define NCHUNKS_ORDER	6
  58
  59#define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
  60#define CHUNK_SIZE	(1 << CHUNK_SHIFT)
  61#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
  62#define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
  63#define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
  64#define NCHUNKS		((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
  65
  66#define BUDDY_MASK	(0x3)
  67#define BUDDY_SHIFT	2
  68#define SLOTS_ALIGN	(0x40)
  69
  70/*****************
  71 * Structures
  72*****************/
  73struct z3fold_pool;
  74struct z3fold_ops {
  75	int (*evict)(struct z3fold_pool *pool, unsigned long handle);
  76};
  77
  78enum buddy {
  79	HEADLESS = 0,
  80	FIRST,
  81	MIDDLE,
  82	LAST,
  83	BUDDIES_MAX = LAST
  84};
  85
  86struct z3fold_buddy_slots {
  87	/*
  88	 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
  89	 * be enough slots to hold all possible variants
  90	 */
  91	unsigned long slot[BUDDY_MASK + 1];
  92	unsigned long pool; /* back link + flags */
 
  93};
  94#define HANDLE_FLAG_MASK	(0x03)
  95
  96/*
  97 * struct z3fold_header - z3fold page metadata occupying first chunks of each
  98 *			z3fold page, except for HEADLESS pages
  99 * @buddy:		links the z3fold page into the relevant list in the
 100 *			pool
 101 * @page_lock:		per-page lock
 102 * @refcount:		reference count for the z3fold page
 103 * @work:		work_struct for page layout optimization
 104 * @slots:		pointer to the structure holding buddy slots
 105 * @pool:		pointer to the containing pool
 106 * @cpu:		CPU which this page "belongs" to
 107 * @first_chunks:	the size of the first buddy in chunks, 0 if free
 108 * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
 109 * @last_chunks:	the size of the last buddy in chunks, 0 if free
 110 * @first_num:		the starting number (for the first handle)
 111 * @mapped_count:	the number of objects currently mapped
 112 */
 113struct z3fold_header {
 114	struct list_head buddy;
 115	spinlock_t page_lock;
 116	struct kref refcount;
 117	struct work_struct work;
 118	struct z3fold_buddy_slots *slots;
 119	struct z3fold_pool *pool;
 120	short cpu;
 121	unsigned short first_chunks;
 122	unsigned short middle_chunks;
 123	unsigned short last_chunks;
 124	unsigned short start_middle;
 125	unsigned short first_num:2;
 126	unsigned short mapped_count:2;
 
 127};
 128
 129/**
 130 * struct z3fold_pool - stores metadata for each z3fold pool
 131 * @name:	pool name
 132 * @lock:	protects pool unbuddied/lru lists
 133 * @stale_lock:	protects pool stale page list
 134 * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
 135 *		buddies; the list each z3fold page is added to depends on
 136 *		the size of its free region.
 137 * @lru:	list tracking the z3fold pages in LRU order by most recently
 138 *		added buddy.
 139 * @stale:	list of pages marked for freeing
 140 * @pages_nr:	number of z3fold pages in the pool.
 141 * @c_handle:	cache for z3fold_buddy_slots allocation
 142 * @ops:	pointer to a structure of user defined operations specified at
 143 *		pool creation time.
 144 * @compact_wq:	workqueue for page layout background optimization
 145 * @release_wq:	workqueue for safe page release
 146 * @work:	work_struct for safe page release
 147 * @inode:	inode for z3fold pseudo filesystem
 148 *
 149 * This structure is allocated at pool creation time and maintains metadata
 150 * pertaining to a particular z3fold pool.
 151 */
 152struct z3fold_pool {
 153	const char *name;
 154	spinlock_t lock;
 155	spinlock_t stale_lock;
 156	struct list_head *unbuddied;
 157	struct list_head lru;
 158	struct list_head stale;
 159	atomic64_t pages_nr;
 160	struct kmem_cache *c_handle;
 161	const struct z3fold_ops *ops;
 162	struct zpool *zpool;
 163	const struct zpool_ops *zpool_ops;
 164	struct workqueue_struct *compact_wq;
 165	struct workqueue_struct *release_wq;
 166	struct work_struct work;
 167	struct inode *inode;
 168};
 169
 170/*
 171 * Internal z3fold page flags
 172 */
 173enum z3fold_page_flags {
 174	PAGE_HEADLESS = 0,
 175	MIDDLE_CHUNK_MAPPED,
 176	NEEDS_COMPACTING,
 177	PAGE_STALE,
 178	PAGE_CLAIMED, /* by either reclaim or free */
 
 
 
 
 
 
 
 
 179};
 180
 
 
 
 
 
 
 181/*****************
 182 * Helpers
 183*****************/
 184
 185/* Converts an allocation size in bytes to size in z3fold chunks */
 186static int size_to_chunks(size_t size)
 187{
 188	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
 189}
 190
 191#define for_each_unbuddied_list(_iter, _begin) \
 192	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
 193
 194static void compact_page_work(struct work_struct *w);
 195
 196static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
 197							gfp_t gfp)
 198{
 199	struct z3fold_buddy_slots *slots;
 200
 201	slots = kmem_cache_alloc(pool->c_handle,
 202				 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
 203
 204	if (slots) {
 205		memset(slots->slot, 0, sizeof(slots->slot));
 
 206		slots->pool = (unsigned long)pool;
 
 207	}
 208
 209	return slots;
 210}
 211
 212static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
 213{
 214	return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
 215}
 216
 217static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
 218{
 219	return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
 220}
 221
 222static inline void free_handle(unsigned long handle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 223{
 224	struct z3fold_buddy_slots *slots;
 225	int i;
 226	bool is_free;
 227
 228	if (handle & (1 << PAGE_HEADLESS))
 229		return;
 230
 231	WARN_ON(*(unsigned long *)handle == 0);
 232	*(unsigned long *)handle = 0;
 233	slots = handle_to_slots(handle);
 
 
 
 
 
 
 
 
 
 
 
 234	is_free = true;
 235	for (i = 0; i <= BUDDY_MASK; i++) {
 236		if (slots->slot[i]) {
 237			is_free = false;
 238			break;
 239		}
 240	}
 
 241
 242	if (is_free) {
 243		struct z3fold_pool *pool = slots_to_pool(slots);
 244
 
 
 245		kmem_cache_free(pool->c_handle, slots);
 246	}
 247}
 248
 249static int z3fold_init_fs_context(struct fs_context *fc)
 250{
 251	return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
 252}
 253
 254static struct file_system_type z3fold_fs = {
 255	.name		= "z3fold",
 256	.init_fs_context = z3fold_init_fs_context,
 257	.kill_sb	= kill_anon_super,
 258};
 259
 260static struct vfsmount *z3fold_mnt;
 261static int z3fold_mount(void)
 262{
 263	int ret = 0;
 264
 265	z3fold_mnt = kern_mount(&z3fold_fs);
 266	if (IS_ERR(z3fold_mnt))
 267		ret = PTR_ERR(z3fold_mnt);
 268
 269	return ret;
 270}
 271
 272static void z3fold_unmount(void)
 273{
 274	kern_unmount(z3fold_mnt);
 275}
 276
 277static const struct address_space_operations z3fold_aops;
 278static int z3fold_register_migration(struct z3fold_pool *pool)
 279{
 280	pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
 281	if (IS_ERR(pool->inode)) {
 282		pool->inode = NULL;
 283		return 1;
 284	}
 285
 286	pool->inode->i_mapping->private_data = pool;
 287	pool->inode->i_mapping->a_ops = &z3fold_aops;
 288	return 0;
 289}
 290
 291static void z3fold_unregister_migration(struct z3fold_pool *pool)
 292{
 293	if (pool->inode)
 294		iput(pool->inode);
 295 }
 296
 297/* Initializes the z3fold header of a newly allocated z3fold page */
 298static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
 299					struct z3fold_pool *pool, gfp_t gfp)
 300{
 301	struct z3fold_header *zhdr = page_address(page);
 302	struct z3fold_buddy_slots *slots;
 303
 304	INIT_LIST_HEAD(&page->lru);
 305	clear_bit(PAGE_HEADLESS, &page->private);
 306	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
 307	clear_bit(NEEDS_COMPACTING, &page->private);
 308	clear_bit(PAGE_STALE, &page->private);
 309	clear_bit(PAGE_CLAIMED, &page->private);
 
 310	if (headless)
 311		return zhdr;
 312
 313	slots = alloc_slots(pool, gfp);
 314	if (!slots)
 315		return NULL;
 316
 
 317	spin_lock_init(&zhdr->page_lock);
 318	kref_init(&zhdr->refcount);
 319	zhdr->first_chunks = 0;
 320	zhdr->middle_chunks = 0;
 321	zhdr->last_chunks = 0;
 322	zhdr->first_num = 0;
 323	zhdr->start_middle = 0;
 324	zhdr->cpu = -1;
 325	zhdr->slots = slots;
 326	zhdr->pool = pool;
 327	INIT_LIST_HEAD(&zhdr->buddy);
 328	INIT_WORK(&zhdr->work, compact_page_work);
 329	return zhdr;
 330}
 331
 332/* Resets the struct page fields and frees the page */
 333static void free_z3fold_page(struct page *page, bool headless)
 334{
 335	if (!headless) {
 336		lock_page(page);
 337		__ClearPageMovable(page);
 338		unlock_page(page);
 339	}
 340	ClearPagePrivate(page);
 341	__free_page(page);
 342}
 343
 344/* Lock a z3fold page */
 345static inline void z3fold_page_lock(struct z3fold_header *zhdr)
 346{
 347	spin_lock(&zhdr->page_lock);
 348}
 349
 350/* Try to lock a z3fold page */
 351static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
 352{
 353	return spin_trylock(&zhdr->page_lock);
 354}
 355
 356/* Unlock a z3fold page */
 357static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
 358{
 359	spin_unlock(&zhdr->page_lock);
 360}
 361
 362/* Helper function to build the index */
 363static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
 364{
 365	return (bud + zhdr->first_num) & BUDDY_MASK;
 366}
 367
 368/*
 369 * Encodes the handle of a particular buddy within a z3fold page
 370 * Pool lock should be held as this function accesses first_num
 
 371 */
 372static unsigned long __encode_handle(struct z3fold_header *zhdr,
 373				struct z3fold_buddy_slots *slots,
 374				enum buddy bud)
 375{
 376	unsigned long h = (unsigned long)zhdr;
 377	int idx = 0;
 378
 379	/*
 380	 * For a headless page, its handle is its pointer with the extra
 381	 * PAGE_HEADLESS bit set
 382	 */
 383	if (bud == HEADLESS)
 384		return h | (1 << PAGE_HEADLESS);
 385
 386	/* otherwise, return pointer to encoded handle */
 387	idx = __idx(zhdr, bud);
 388	h += idx;
 389	if (bud == LAST)
 390		h |= (zhdr->last_chunks << BUDDY_SHIFT);
 391
 
 392	slots->slot[idx] = h;
 
 393	return (unsigned long)&slots->slot[idx];
 394}
 395
 396static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
 397{
 398	return __encode_handle(zhdr, zhdr->slots, bud);
 399}
 400
 401/* Returns the z3fold page where a given handle is stored */
 402static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
 403{
 404	unsigned long addr = h;
 405
 406	if (!(addr & (1 << PAGE_HEADLESS)))
 407		addr = *(unsigned long *)h;
 408
 409	return (struct z3fold_header *)(addr & PAGE_MASK);
 410}
 411
 412/* only for LAST bud, returns zero otherwise */
 413static unsigned short handle_to_chunks(unsigned long handle)
 414{
 415	unsigned long addr = *(unsigned long *)handle;
 
 416
 
 
 
 417	return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
 418}
 419
 420/*
 421 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
 422 *  but that doesn't matter. because the masking will result in the
 423 *  correct buddy number.
 424 */
 425static enum buddy handle_to_buddy(unsigned long handle)
 426{
 427	struct z3fold_header *zhdr;
 
 428	unsigned long addr;
 429
 
 430	WARN_ON(handle & (1 << PAGE_HEADLESS));
 431	addr = *(unsigned long *)handle;
 
 432	zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
 433	return (addr - zhdr->first_num) & BUDDY_MASK;
 434}
 435
 436static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
 437{
 438	return zhdr->pool;
 439}
 440
 441static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
 442{
 443	struct page *page = virt_to_page(zhdr);
 444	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
 445
 446	WARN_ON(!list_empty(&zhdr->buddy));
 447	set_bit(PAGE_STALE, &page->private);
 448	clear_bit(NEEDS_COMPACTING, &page->private);
 449	spin_lock(&pool->lock);
 450	if (!list_empty(&page->lru))
 451		list_del_init(&page->lru);
 452	spin_unlock(&pool->lock);
 
 453	if (locked)
 454		z3fold_page_unlock(zhdr);
 
 455	spin_lock(&pool->stale_lock);
 456	list_add(&zhdr->buddy, &pool->stale);
 457	queue_work(pool->release_wq, &pool->work);
 458	spin_unlock(&pool->stale_lock);
 459}
 460
 461static void __attribute__((__unused__))
 462			release_z3fold_page(struct kref *ref)
 463{
 464	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
 465						refcount);
 466	__release_z3fold_page(zhdr, false);
 467}
 468
 469static void release_z3fold_page_locked(struct kref *ref)
 470{
 471	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
 472						refcount);
 473	WARN_ON(z3fold_page_trylock(zhdr));
 474	__release_z3fold_page(zhdr, true);
 475}
 476
 477static void release_z3fold_page_locked_list(struct kref *ref)
 478{
 479	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
 480					       refcount);
 481	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
 
 482	spin_lock(&pool->lock);
 483	list_del_init(&zhdr->buddy);
 484	spin_unlock(&pool->lock);
 485
 486	WARN_ON(z3fold_page_trylock(zhdr));
 487	__release_z3fold_page(zhdr, true);
 488}
 489
 
 
 
 
 
 
 
 
 
 
 490static void free_pages_work(struct work_struct *w)
 491{
 492	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
 493
 494	spin_lock(&pool->stale_lock);
 495	while (!list_empty(&pool->stale)) {
 496		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
 497						struct z3fold_header, buddy);
 498		struct page *page = virt_to_page(zhdr);
 499
 500		list_del(&zhdr->buddy);
 501		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
 502			continue;
 503		spin_unlock(&pool->stale_lock);
 504		cancel_work_sync(&zhdr->work);
 505		free_z3fold_page(page, false);
 506		cond_resched();
 507		spin_lock(&pool->stale_lock);
 508	}
 509	spin_unlock(&pool->stale_lock);
 510}
 511
 512/*
 513 * Returns the number of free chunks in a z3fold page.
 514 * NB: can't be used with HEADLESS pages.
 515 */
 516static int num_free_chunks(struct z3fold_header *zhdr)
 517{
 518	int nfree;
 519	/*
 520	 * If there is a middle object, pick up the bigger free space
 521	 * either before or after it. Otherwise just subtract the number
 522	 * of chunks occupied by the first and the last objects.
 523	 */
 524	if (zhdr->middle_chunks != 0) {
 525		int nfree_before = zhdr->first_chunks ?
 526			0 : zhdr->start_middle - ZHDR_CHUNKS;
 527		int nfree_after = zhdr->last_chunks ?
 528			0 : TOTAL_CHUNKS -
 529				(zhdr->start_middle + zhdr->middle_chunks);
 530		nfree = max(nfree_before, nfree_after);
 531	} else
 532		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
 533	return nfree;
 534}
 535
 536/* Add to the appropriate unbuddied list */
 537static inline void add_to_unbuddied(struct z3fold_pool *pool,
 538				struct z3fold_header *zhdr)
 539{
 540	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
 541			zhdr->middle_chunks == 0) {
 542		struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
 543
 544		int freechunks = num_free_chunks(zhdr);
 
 
 
 545		spin_lock(&pool->lock);
 546		list_add(&zhdr->buddy, &unbuddied[freechunks]);
 547		spin_unlock(&pool->lock);
 548		zhdr->cpu = smp_processor_id();
 549		put_cpu_ptr(pool->unbuddied);
 550	}
 551}
 552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 553static inline void *mchunk_memmove(struct z3fold_header *zhdr,
 554				unsigned short dst_chunk)
 555{
 556	void *beg = zhdr;
 557	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
 558		       beg + (zhdr->start_middle << CHUNK_SHIFT),
 559		       zhdr->middle_chunks << CHUNK_SHIFT);
 560}
 561
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562#define BIG_CHUNK_GAP	3
 563/* Has to be called with lock held */
 564static int z3fold_compact_page(struct z3fold_header *zhdr)
 565{
 566	struct page *page = virt_to_page(zhdr);
 567
 568	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
 569		return 0; /* can't move middle chunk, it's used */
 570
 571	if (unlikely(PageIsolated(page)))
 572		return 0;
 573
 574	if (zhdr->middle_chunks == 0)
 575		return 0; /* nothing to compact */
 576
 577	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
 578		/* move to the beginning */
 579		mchunk_memmove(zhdr, ZHDR_CHUNKS);
 580		zhdr->first_chunks = zhdr->middle_chunks;
 581		zhdr->middle_chunks = 0;
 582		zhdr->start_middle = 0;
 583		zhdr->first_num++;
 584		return 1;
 585	}
 586
 587	/*
 588	 * moving data is expensive, so let's only do that if
 589	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
 590	 */
 591	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
 592	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
 593			BIG_CHUNK_GAP) {
 594		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
 595		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
 596		return 1;
 597	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
 598		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
 599					+ zhdr->middle_chunks) >=
 600			BIG_CHUNK_GAP) {
 601		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
 602			zhdr->middle_chunks;
 603		mchunk_memmove(zhdr, new_start);
 604		zhdr->start_middle = new_start;
 605		return 1;
 606	}
 607
 608	return 0;
 609}
 610
 611static void do_compact_page(struct z3fold_header *zhdr, bool locked)
 612{
 613	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
 614	struct page *page;
 615
 616	page = virt_to_page(zhdr);
 617	if (locked)
 618		WARN_ON(z3fold_page_trylock(zhdr));
 619	else
 620		z3fold_page_lock(zhdr);
 621	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
 622		z3fold_page_unlock(zhdr);
 623		return;
 624	}
 625	spin_lock(&pool->lock);
 626	list_del_init(&zhdr->buddy);
 627	spin_unlock(&pool->lock);
 628
 629	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
 630		atomic64_dec(&pool->pages_nr);
 631		return;
 632	}
 633
 634	if (unlikely(PageIsolated(page) ||
 635		     test_bit(PAGE_CLAIMED, &page->private) ||
 636		     test_bit(PAGE_STALE, &page->private))) {
 637		z3fold_page_unlock(zhdr);
 638		return;
 639	}
 640
 
 
 
 
 
 
 
 
 
 641	z3fold_compact_page(zhdr);
 642	add_to_unbuddied(pool, zhdr);
 
 643	z3fold_page_unlock(zhdr);
 644}
 645
 646static void compact_page_work(struct work_struct *w)
 647{
 648	struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
 649						work);
 650
 651	do_compact_page(zhdr, false);
 652}
 653
 654/* returns _locked_ z3fold page header or NULL */
 655static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
 656						size_t size, bool can_sleep)
 657{
 658	struct z3fold_header *zhdr = NULL;
 659	struct page *page;
 660	struct list_head *unbuddied;
 661	int chunks = size_to_chunks(size), i;
 662
 663lookup:
 
 664	/* First, try to find an unbuddied z3fold page. */
 665	unbuddied = get_cpu_ptr(pool->unbuddied);
 666	for_each_unbuddied_list(i, chunks) {
 667		struct list_head *l = &unbuddied[i];
 668
 669		zhdr = list_first_entry_or_null(READ_ONCE(l),
 670					struct z3fold_header, buddy);
 671
 672		if (!zhdr)
 673			continue;
 674
 675		/* Re-check under lock. */
 676		spin_lock(&pool->lock);
 677		l = &unbuddied[i];
 678		if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
 679						struct z3fold_header, buddy)) ||
 680		    !z3fold_page_trylock(zhdr)) {
 681			spin_unlock(&pool->lock);
 682			zhdr = NULL;
 683			put_cpu_ptr(pool->unbuddied);
 684			if (can_sleep)
 685				cond_resched();
 686			goto lookup;
 687		}
 688		list_del_init(&zhdr->buddy);
 689		zhdr->cpu = -1;
 690		spin_unlock(&pool->lock);
 691
 692		page = virt_to_page(zhdr);
 693		if (test_bit(NEEDS_COMPACTING, &page->private)) {
 
 694			z3fold_page_unlock(zhdr);
 695			zhdr = NULL;
 696			put_cpu_ptr(pool->unbuddied);
 697			if (can_sleep)
 698				cond_resched();
 699			goto lookup;
 700		}
 701
 702		/*
 703		 * this page could not be removed from its unbuddied
 704		 * list while pool lock was held, and then we've taken
 705		 * page lock so kref_put could not be called before
 706		 * we got here, so it's safe to just call kref_get()
 707		 */
 708		kref_get(&zhdr->refcount);
 709		break;
 710	}
 711	put_cpu_ptr(pool->unbuddied);
 712
 713	if (!zhdr) {
 714		int cpu;
 715
 716		/* look for _exact_ match on other cpus' lists */
 717		for_each_online_cpu(cpu) {
 718			struct list_head *l;
 719
 720			unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
 721			spin_lock(&pool->lock);
 722			l = &unbuddied[chunks];
 723
 724			zhdr = list_first_entry_or_null(READ_ONCE(l),
 725						struct z3fold_header, buddy);
 726
 727			if (!zhdr || !z3fold_page_trylock(zhdr)) {
 728				spin_unlock(&pool->lock);
 729				zhdr = NULL;
 730				continue;
 731			}
 732			list_del_init(&zhdr->buddy);
 733			zhdr->cpu = -1;
 734			spin_unlock(&pool->lock);
 735
 736			page = virt_to_page(zhdr);
 737			if (test_bit(NEEDS_COMPACTING, &page->private)) {
 
 738				z3fold_page_unlock(zhdr);
 739				zhdr = NULL;
 740				if (can_sleep)
 741					cond_resched();
 742				continue;
 743			}
 744			kref_get(&zhdr->refcount);
 745			break;
 746		}
 747	}
 748
 
 
 
 
 
 749	return zhdr;
 
 
 
 
 
 
 
 750}
 751
 752/*
 753 * API Functions
 754 */
 755
 756/**
 757 * z3fold_create_pool() - create a new z3fold pool
 758 * @name:	pool name
 759 * @gfp:	gfp flags when allocating the z3fold pool structure
 760 * @ops:	user-defined operations for the z3fold pool
 761 *
 762 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
 763 * failed.
 764 */
 765static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
 766		const struct z3fold_ops *ops)
 767{
 768	struct z3fold_pool *pool = NULL;
 769	int i, cpu;
 770
 771	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
 772	if (!pool)
 773		goto out;
 774	pool->c_handle = kmem_cache_create("z3fold_handle",
 775				sizeof(struct z3fold_buddy_slots),
 776				SLOTS_ALIGN, 0, NULL);
 777	if (!pool->c_handle)
 778		goto out_c;
 779	spin_lock_init(&pool->lock);
 780	spin_lock_init(&pool->stale_lock);
 781	pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
 
 782	if (!pool->unbuddied)
 783		goto out_pool;
 784	for_each_possible_cpu(cpu) {
 785		struct list_head *unbuddied =
 786				per_cpu_ptr(pool->unbuddied, cpu);
 787		for_each_unbuddied_list(i, 0)
 788			INIT_LIST_HEAD(&unbuddied[i]);
 789	}
 790	INIT_LIST_HEAD(&pool->lru);
 791	INIT_LIST_HEAD(&pool->stale);
 792	atomic64_set(&pool->pages_nr, 0);
 793	pool->name = name;
 794	pool->compact_wq = create_singlethread_workqueue(pool->name);
 795	if (!pool->compact_wq)
 796		goto out_unbuddied;
 797	pool->release_wq = create_singlethread_workqueue(pool->name);
 798	if (!pool->release_wq)
 799		goto out_wq;
 800	if (z3fold_register_migration(pool))
 801		goto out_rwq;
 802	INIT_WORK(&pool->work, free_pages_work);
 803	pool->ops = ops;
 804	return pool;
 805
 806out_rwq:
 807	destroy_workqueue(pool->release_wq);
 808out_wq:
 809	destroy_workqueue(pool->compact_wq);
 810out_unbuddied:
 811	free_percpu(pool->unbuddied);
 812out_pool:
 813	kmem_cache_destroy(pool->c_handle);
 814out_c:
 815	kfree(pool);
 816out:
 817	return NULL;
 818}
 819
 820/**
 821 * z3fold_destroy_pool() - destroys an existing z3fold pool
 822 * @pool:	the z3fold pool to be destroyed
 823 *
 824 * The pool should be emptied before this function is called.
 825 */
 826static void z3fold_destroy_pool(struct z3fold_pool *pool)
 827{
 828	kmem_cache_destroy(pool->c_handle);
 829
 830	/*
 831	 * We need to destroy pool->compact_wq before pool->release_wq,
 832	 * as any pending work on pool->compact_wq will call
 833	 * queue_work(pool->release_wq, &pool->work).
 834	 *
 835	 * There are still outstanding pages until both workqueues are drained,
 836	 * so we cannot unregister migration until then.
 837	 */
 838
 839	destroy_workqueue(pool->compact_wq);
 840	destroy_workqueue(pool->release_wq);
 841	z3fold_unregister_migration(pool);
 842	kfree(pool);
 843}
 844
 
 
 845/**
 846 * z3fold_alloc() - allocates a region of a given size
 847 * @pool:	z3fold pool from which to allocate
 848 * @size:	size in bytes of the desired allocation
 849 * @gfp:	gfp flags used if the pool needs to grow
 850 * @handle:	handle of the new allocation
 851 *
 852 * This function will attempt to find a free region in the pool large enough to
 853 * satisfy the allocation request.  A search of the unbuddied lists is
 854 * performed first. If no suitable free region is found, then a new page is
 855 * allocated and added to the pool to satisfy the request.
 856 *
 857 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
 858 * as z3fold pool pages.
 859 *
 860 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
 861 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
 862 * a new page.
 863 */
 864static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
 865			unsigned long *handle)
 866{
 867	int chunks = size_to_chunks(size);
 868	struct z3fold_header *zhdr = NULL;
 869	struct page *page = NULL;
 870	enum buddy bud;
 871	bool can_sleep = gfpflags_allow_blocking(gfp);
 872
 873	if (!size)
 874		return -EINVAL;
 875
 876	if (size > PAGE_SIZE)
 877		return -ENOSPC;
 878
 879	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
 880		bud = HEADLESS;
 881	else {
 882retry:
 883		zhdr = __z3fold_alloc(pool, size, can_sleep);
 884		if (zhdr) {
 885			if (zhdr->first_chunks == 0) {
 886				if (zhdr->middle_chunks != 0 &&
 887				    chunks >= zhdr->start_middle)
 888					bud = LAST;
 889				else
 890					bud = FIRST;
 891			} else if (zhdr->last_chunks == 0)
 892				bud = LAST;
 893			else if (zhdr->middle_chunks == 0)
 894				bud = MIDDLE;
 895			else {
 896				if (kref_put(&zhdr->refcount,
 897					     release_z3fold_page_locked))
 898					atomic64_dec(&pool->pages_nr);
 899				else
 900					z3fold_page_unlock(zhdr);
 901				pr_err("No free chunks in unbuddied\n");
 902				WARN_ON(1);
 903				goto retry;
 904			}
 905			page = virt_to_page(zhdr);
 906			goto found;
 907		}
 908		bud = FIRST;
 909	}
 910
 911	page = NULL;
 912	if (can_sleep) {
 913		spin_lock(&pool->stale_lock);
 914		zhdr = list_first_entry_or_null(&pool->stale,
 915						struct z3fold_header, buddy);
 916		/*
 917		 * Before allocating a page, let's see if we can take one from
 918		 * the stale pages list. cancel_work_sync() can sleep so we
 919		 * limit this case to the contexts where we can sleep
 920		 */
 921		if (zhdr) {
 922			list_del(&zhdr->buddy);
 923			spin_unlock(&pool->stale_lock);
 924			cancel_work_sync(&zhdr->work);
 925			page = virt_to_page(zhdr);
 926		} else {
 927			spin_unlock(&pool->stale_lock);
 928		}
 929	}
 930	if (!page)
 931		page = alloc_page(gfp);
 932
 933	if (!page)
 934		return -ENOMEM;
 935
 936	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
 937	if (!zhdr) {
 938		__free_page(page);
 939		return -ENOMEM;
 940	}
 941	atomic64_inc(&pool->pages_nr);
 942
 943	if (bud == HEADLESS) {
 944		set_bit(PAGE_HEADLESS, &page->private);
 945		goto headless;
 946	}
 947	if (can_sleep) {
 948		lock_page(page);
 949		__SetPageMovable(page, pool->inode->i_mapping);
 950		unlock_page(page);
 951	} else {
 952		if (trylock_page(page)) {
 953			__SetPageMovable(page, pool->inode->i_mapping);
 954			unlock_page(page);
 955		}
 956	}
 957	z3fold_page_lock(zhdr);
 958
 959found:
 960	if (bud == FIRST)
 961		zhdr->first_chunks = chunks;
 962	else if (bud == LAST)
 963		zhdr->last_chunks = chunks;
 964	else {
 965		zhdr->middle_chunks = chunks;
 966		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
 967	}
 968	add_to_unbuddied(pool, zhdr);
 969
 970headless:
 971	spin_lock(&pool->lock);
 972	/* Add/move z3fold page to beginning of LRU */
 973	if (!list_empty(&page->lru))
 974		list_del(&page->lru);
 975
 976	list_add(&page->lru, &pool->lru);
 977
 978	*handle = encode_handle(zhdr, bud);
 979	spin_unlock(&pool->lock);
 980	if (bud != HEADLESS)
 981		z3fold_page_unlock(zhdr);
 982
 983	return 0;
 984}
 985
 986/**
 987 * z3fold_free() - frees the allocation associated with the given handle
 988 * @pool:	pool in which the allocation resided
 989 * @handle:	handle associated with the allocation returned by z3fold_alloc()
 990 *
 991 * In the case that the z3fold page in which the allocation resides is under
 992 * reclaim, as indicated by the PG_reclaim flag being set, this function
 993 * only sets the first|last_chunks to 0.  The page is actually freed
 994 * once both buddies are evicted (see z3fold_reclaim_page() below).
 995 */
 996static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
 997{
 998	struct z3fold_header *zhdr;
 999	struct page *page;
1000	enum buddy bud;
1001	bool page_claimed;
1002
1003	zhdr = handle_to_z3fold_header(handle);
1004	page = virt_to_page(zhdr);
1005	page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1006
1007	if (test_bit(PAGE_HEADLESS, &page->private)) {
1008		/* if a headless page is under reclaim, just leave.
1009		 * NB: we use test_and_set_bit for a reason: if the bit
1010		 * has not been set before, we release this page
1011		 * immediately so we don't care about its value any more.
1012		 */
1013		if (!page_claimed) {
1014			spin_lock(&pool->lock);
1015			list_del(&page->lru);
1016			spin_unlock(&pool->lock);
1017			free_z3fold_page(page, true);
1018			atomic64_dec(&pool->pages_nr);
1019		}
1020		return;
1021	}
1022
1023	/* Non-headless case */
1024	z3fold_page_lock(zhdr);
1025	bud = handle_to_buddy(handle);
1026
1027	switch (bud) {
1028	case FIRST:
1029		zhdr->first_chunks = 0;
1030		break;
1031	case MIDDLE:
1032		zhdr->middle_chunks = 0;
1033		break;
1034	case LAST:
1035		zhdr->last_chunks = 0;
1036		break;
1037	default:
1038		pr_err("%s: unknown bud %d\n", __func__, bud);
1039		WARN_ON(1);
1040		z3fold_page_unlock(zhdr);
1041		return;
1042	}
1043
1044	free_handle(handle);
1045	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1046		atomic64_dec(&pool->pages_nr);
1047		return;
1048	}
1049	if (page_claimed) {
1050		/* the page has not been claimed by us */
1051		z3fold_page_unlock(zhdr);
1052		return;
1053	}
1054	if (unlikely(PageIsolated(page)) ||
1055	    test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1056		z3fold_page_unlock(zhdr);
1057		clear_bit(PAGE_CLAIMED, &page->private);
 
1058		return;
1059	}
1060	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1061		spin_lock(&pool->lock);
1062		list_del_init(&zhdr->buddy);
1063		spin_unlock(&pool->lock);
1064		zhdr->cpu = -1;
1065		kref_get(&zhdr->refcount);
1066		do_compact_page(zhdr, true);
1067		clear_bit(PAGE_CLAIMED, &page->private);
 
1068		return;
1069	}
1070	kref_get(&zhdr->refcount);
1071	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1072	clear_bit(PAGE_CLAIMED, &page->private);
1073	z3fold_page_unlock(zhdr);
1074}
1075
1076/**
1077 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1078 * @pool:	pool from which a page will attempt to be evicted
1079 * @retries:	number of pages on the LRU list for which eviction will
1080 *		be attempted before failing
1081 *
1082 * z3fold reclaim is different from normal system reclaim in that it is done
1083 * from the bottom, up. This is because only the bottom layer, z3fold, has
1084 * information on how the allocations are organized within each z3fold page.
1085 * This has the potential to create interesting locking situations between
1086 * z3fold and the user, however.
1087 *
1088 * To avoid these, this is how z3fold_reclaim_page() should be called:
1089 *
1090 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1091 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1092 * call the user-defined eviction handler with the pool and handle as
1093 * arguments.
1094 *
1095 * If the handle can not be evicted, the eviction handler should return
1096 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1097 * appropriate list and try the next z3fold page on the LRU up to
1098 * a user defined number of retries.
1099 *
1100 * If the handle is successfully evicted, the eviction handler should
1101 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1102 * contains logic to delay freeing the page if the page is under reclaim,
1103 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1104 *
1105 * If all buddies in the z3fold page are successfully evicted, then the
1106 * z3fold page can be freed.
1107 *
1108 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1109 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1110 * the retry limit was hit.
1111 */
1112static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1113{
1114	int i, ret = 0;
1115	struct z3fold_header *zhdr = NULL;
1116	struct page *page = NULL;
1117	struct list_head *pos;
1118	struct z3fold_buddy_slots slots;
1119	unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1120
1121	spin_lock(&pool->lock);
1122	if (!pool->ops || !pool->ops->evict || retries == 0) {
1123		spin_unlock(&pool->lock);
1124		return -EINVAL;
1125	}
1126	for (i = 0; i < retries; i++) {
1127		if (list_empty(&pool->lru)) {
1128			spin_unlock(&pool->lock);
1129			return -EINVAL;
1130		}
1131		list_for_each_prev(pos, &pool->lru) {
1132			page = list_entry(pos, struct page, lru);
1133
1134			/* this bit could have been set by free, in which case
1135			 * we pass over to the next page in the pool.
1136			 */
1137			if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1138				page = NULL;
1139				continue;
1140			}
1141
1142			if (unlikely(PageIsolated(page))) {
1143				clear_bit(PAGE_CLAIMED, &page->private);
1144				page = NULL;
1145				continue;
1146			}
1147			zhdr = page_address(page);
1148			if (test_bit(PAGE_HEADLESS, &page->private))
1149				break;
1150
1151			if (!z3fold_page_trylock(zhdr)) {
1152				clear_bit(PAGE_CLAIMED, &page->private);
1153				zhdr = NULL;
1154				continue; /* can't evict at this point */
1155			}
1156			kref_get(&zhdr->refcount);
1157			list_del_init(&zhdr->buddy);
1158			zhdr->cpu = -1;
1159			break;
1160		}
1161
1162		if (!zhdr)
1163			break;
1164
1165		list_del_init(&page->lru);
1166		spin_unlock(&pool->lock);
1167
1168		if (!test_bit(PAGE_HEADLESS, &page->private)) {
1169			/*
1170			 * We need encode the handles before unlocking, and
1171			 * use our local slots structure because z3fold_free
1172			 * can zero out zhdr->slots and we can't do much
1173			 * about that
1174			 */
1175			first_handle = 0;
1176			last_handle = 0;
1177			middle_handle = 0;
1178			if (zhdr->first_chunks)
1179				first_handle = __encode_handle(zhdr, &slots,
1180								FIRST);
1181			if (zhdr->middle_chunks)
1182				middle_handle = __encode_handle(zhdr, &slots,
1183								MIDDLE);
1184			if (zhdr->last_chunks)
1185				last_handle = __encode_handle(zhdr, &slots,
1186								LAST);
1187			/*
1188			 * it's safe to unlock here because we hold a
1189			 * reference to this page
1190			 */
1191			z3fold_page_unlock(zhdr);
1192		} else {
1193			first_handle = __encode_handle(zhdr, &slots, HEADLESS);
1194			last_handle = middle_handle = 0;
1195		}
1196
1197		/* Issue the eviction callback(s) */
1198		if (middle_handle) {
1199			ret = pool->ops->evict(pool, middle_handle);
1200			if (ret)
1201				goto next;
1202		}
1203		if (first_handle) {
1204			ret = pool->ops->evict(pool, first_handle);
1205			if (ret)
1206				goto next;
1207		}
1208		if (last_handle) {
1209			ret = pool->ops->evict(pool, last_handle);
1210			if (ret)
1211				goto next;
1212		}
1213next:
1214		if (test_bit(PAGE_HEADLESS, &page->private)) {
1215			if (ret == 0) {
1216				free_z3fold_page(page, true);
1217				atomic64_dec(&pool->pages_nr);
1218				return 0;
1219			}
1220			spin_lock(&pool->lock);
1221			list_add(&page->lru, &pool->lru);
1222			spin_unlock(&pool->lock);
1223			clear_bit(PAGE_CLAIMED, &page->private);
1224		} else {
1225			z3fold_page_lock(zhdr);
1226			if (kref_put(&zhdr->refcount,
1227					release_z3fold_page_locked)) {
1228				atomic64_dec(&pool->pages_nr);
1229				return 0;
1230			}
1231			/*
1232			 * if we are here, the page is still not completely
1233			 * free. Take the global pool lock then to be able
1234			 * to add it back to the lru list
1235			 */
1236			spin_lock(&pool->lock);
1237			list_add(&page->lru, &pool->lru);
1238			spin_unlock(&pool->lock);
1239			z3fold_page_unlock(zhdr);
1240			clear_bit(PAGE_CLAIMED, &page->private);
1241		}
1242
1243		/* We started off locked to we need to lock the pool back */
1244		spin_lock(&pool->lock);
1245	}
1246	spin_unlock(&pool->lock);
1247	return -EAGAIN;
1248}
1249
1250/**
1251 * z3fold_map() - maps the allocation associated with the given handle
1252 * @pool:	pool in which the allocation resides
1253 * @handle:	handle associated with the allocation to be mapped
1254 *
1255 * Extracts the buddy number from handle and constructs the pointer to the
1256 * correct starting chunk within the page.
1257 *
1258 * Returns: a pointer to the mapped allocation
1259 */
1260static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1261{
1262	struct z3fold_header *zhdr;
1263	struct page *page;
1264	void *addr;
1265	enum buddy buddy;
1266
1267	zhdr = handle_to_z3fold_header(handle);
1268	addr = zhdr;
1269	page = virt_to_page(zhdr);
1270
1271	if (test_bit(PAGE_HEADLESS, &page->private))
1272		goto out;
1273
1274	z3fold_page_lock(zhdr);
1275	buddy = handle_to_buddy(handle);
1276	switch (buddy) {
1277	case FIRST:
1278		addr += ZHDR_SIZE_ALIGNED;
1279		break;
1280	case MIDDLE:
1281		addr += zhdr->start_middle << CHUNK_SHIFT;
1282		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1283		break;
1284	case LAST:
1285		addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1286		break;
1287	default:
1288		pr_err("unknown buddy id %d\n", buddy);
1289		WARN_ON(1);
1290		addr = NULL;
1291		break;
1292	}
1293
1294	if (addr)
1295		zhdr->mapped_count++;
1296	z3fold_page_unlock(zhdr);
1297out:
 
1298	return addr;
1299}
1300
1301/**
1302 * z3fold_unmap() - unmaps the allocation associated with the given handle
1303 * @pool:	pool in which the allocation resides
1304 * @handle:	handle associated with the allocation to be unmapped
1305 */
1306static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1307{
1308	struct z3fold_header *zhdr;
1309	struct page *page;
1310	enum buddy buddy;
1311
1312	zhdr = handle_to_z3fold_header(handle);
1313	page = virt_to_page(zhdr);
1314
1315	if (test_bit(PAGE_HEADLESS, &page->private))
1316		return;
1317
1318	z3fold_page_lock(zhdr);
1319	buddy = handle_to_buddy(handle);
1320	if (buddy == MIDDLE)
1321		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1322	zhdr->mapped_count--;
1323	z3fold_page_unlock(zhdr);
1324}
1325
1326/**
1327 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1328 * @pool:	pool whose size is being queried
1329 *
1330 * Returns: size in pages of the given pool.
1331 */
1332static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1333{
1334	return atomic64_read(&pool->pages_nr);
1335}
1336
1337static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1338{
1339	struct z3fold_header *zhdr;
1340	struct z3fold_pool *pool;
1341
1342	VM_BUG_ON_PAGE(!PageMovable(page), page);
1343	VM_BUG_ON_PAGE(PageIsolated(page), page);
1344
1345	if (test_bit(PAGE_HEADLESS, &page->private) ||
1346	    test_bit(PAGE_CLAIMED, &page->private))
1347		return false;
1348
1349	zhdr = page_address(page);
1350	z3fold_page_lock(zhdr);
1351	if (test_bit(NEEDS_COMPACTING, &page->private) ||
1352	    test_bit(PAGE_STALE, &page->private))
1353		goto out;
1354
 
 
 
 
 
1355	pool = zhdr_to_pool(zhdr);
 
 
 
 
 
 
 
 
1356
1357	if (zhdr->mapped_count == 0) {
1358		kref_get(&zhdr->refcount);
1359		if (!list_empty(&zhdr->buddy))
1360			list_del_init(&zhdr->buddy);
1361		spin_lock(&pool->lock);
1362		if (!list_empty(&page->lru))
1363			list_del(&page->lru);
1364		spin_unlock(&pool->lock);
1365		z3fold_page_unlock(zhdr);
1366		return true;
1367	}
1368out:
1369	z3fold_page_unlock(zhdr);
1370	return false;
1371}
1372
1373static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1374			       struct page *page, enum migrate_mode mode)
1375{
1376	struct z3fold_header *zhdr, *new_zhdr;
1377	struct z3fold_pool *pool;
1378	struct address_space *new_mapping;
1379
1380	VM_BUG_ON_PAGE(!PageMovable(page), page);
1381	VM_BUG_ON_PAGE(!PageIsolated(page), page);
 
1382	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1383
1384	zhdr = page_address(page);
1385	pool = zhdr_to_pool(zhdr);
1386
1387	if (!z3fold_page_trylock(zhdr)) {
1388		return -EAGAIN;
1389	}
1390	if (zhdr->mapped_count != 0) {
1391		z3fold_page_unlock(zhdr);
1392		return -EBUSY;
1393	}
1394	if (work_pending(&zhdr->work)) {
1395		z3fold_page_unlock(zhdr);
1396		return -EAGAIN;
1397	}
1398	new_zhdr = page_address(newpage);
1399	memcpy(new_zhdr, zhdr, PAGE_SIZE);
1400	newpage->private = page->private;
1401	page->private = 0;
1402	z3fold_page_unlock(zhdr);
1403	spin_lock_init(&new_zhdr->page_lock);
1404	INIT_WORK(&new_zhdr->work, compact_page_work);
1405	/*
1406	 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1407	 * so we only have to reinitialize it.
1408	 */
1409	INIT_LIST_HEAD(&new_zhdr->buddy);
1410	new_mapping = page_mapping(page);
1411	__ClearPageMovable(page);
1412	ClearPagePrivate(page);
1413
1414	get_page(newpage);
1415	z3fold_page_lock(new_zhdr);
1416	if (new_zhdr->first_chunks)
1417		encode_handle(new_zhdr, FIRST);
1418	if (new_zhdr->last_chunks)
1419		encode_handle(new_zhdr, LAST);
1420	if (new_zhdr->middle_chunks)
1421		encode_handle(new_zhdr, MIDDLE);
1422	set_bit(NEEDS_COMPACTING, &newpage->private);
1423	new_zhdr->cpu = smp_processor_id();
1424	spin_lock(&pool->lock);
1425	list_add(&newpage->lru, &pool->lru);
1426	spin_unlock(&pool->lock);
1427	__SetPageMovable(newpage, new_mapping);
1428	z3fold_page_unlock(new_zhdr);
1429
1430	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1431
1432	page_mapcount_reset(page);
 
1433	put_page(page);
1434	return 0;
1435}
1436
1437static void z3fold_page_putback(struct page *page)
1438{
1439	struct z3fold_header *zhdr;
1440	struct z3fold_pool *pool;
1441
1442	zhdr = page_address(page);
1443	pool = zhdr_to_pool(zhdr);
1444
1445	z3fold_page_lock(zhdr);
1446	if (!list_empty(&zhdr->buddy))
1447		list_del_init(&zhdr->buddy);
1448	INIT_LIST_HEAD(&page->lru);
1449	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1450		atomic64_dec(&pool->pages_nr);
1451		return;
1452	}
1453	spin_lock(&pool->lock);
1454	list_add(&page->lru, &pool->lru);
1455	spin_unlock(&pool->lock);
1456	z3fold_page_unlock(zhdr);
1457}
1458
1459static const struct address_space_operations z3fold_aops = {
1460	.isolate_page = z3fold_page_isolate,
1461	.migratepage = z3fold_page_migrate,
1462	.putback_page = z3fold_page_putback,
1463};
1464
1465/*****************
1466 * zpool
1467 ****************/
1468
1469static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1470{
1471	if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1472		return pool->zpool_ops->evict(pool->zpool, handle);
1473	else
1474		return -ENOENT;
1475}
1476
1477static const struct z3fold_ops z3fold_zpool_ops = {
1478	.evict =	z3fold_zpool_evict
1479};
1480
1481static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1482			       const struct zpool_ops *zpool_ops,
1483			       struct zpool *zpool)
1484{
1485	struct z3fold_pool *pool;
1486
1487	pool = z3fold_create_pool(name, gfp,
1488				zpool_ops ? &z3fold_zpool_ops : NULL);
1489	if (pool) {
1490		pool->zpool = zpool;
1491		pool->zpool_ops = zpool_ops;
1492	}
1493	return pool;
1494}
1495
1496static void z3fold_zpool_destroy(void *pool)
1497{
1498	z3fold_destroy_pool(pool);
1499}
1500
1501static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1502			unsigned long *handle)
1503{
1504	return z3fold_alloc(pool, size, gfp, handle);
1505}
1506static void z3fold_zpool_free(void *pool, unsigned long handle)
1507{
1508	z3fold_free(pool, handle);
1509}
1510
1511static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1512			unsigned int *reclaimed)
1513{
1514	unsigned int total = 0;
1515	int ret = -EINVAL;
1516
1517	while (total < pages) {
1518		ret = z3fold_reclaim_page(pool, 8);
1519		if (ret < 0)
1520			break;
1521		total++;
1522	}
1523
1524	if (reclaimed)
1525		*reclaimed = total;
1526
1527	return ret;
1528}
1529
1530static void *z3fold_zpool_map(void *pool, unsigned long handle,
1531			enum zpool_mapmode mm)
1532{
1533	return z3fold_map(pool, handle);
1534}
1535static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1536{
1537	z3fold_unmap(pool, handle);
1538}
1539
1540static u64 z3fold_zpool_total_size(void *pool)
1541{
1542	return z3fold_get_pool_size(pool) * PAGE_SIZE;
1543}
1544
1545static struct zpool_driver z3fold_zpool_driver = {
1546	.type =		"z3fold",
 
1547	.owner =	THIS_MODULE,
1548	.create =	z3fold_zpool_create,
1549	.destroy =	z3fold_zpool_destroy,
1550	.malloc =	z3fold_zpool_malloc,
1551	.free =		z3fold_zpool_free,
1552	.shrink =	z3fold_zpool_shrink,
1553	.map =		z3fold_zpool_map,
1554	.unmap =	z3fold_zpool_unmap,
1555	.total_size =	z3fold_zpool_total_size,
1556};
1557
1558MODULE_ALIAS("zpool-z3fold");
1559
1560static int __init init_z3fold(void)
1561{
1562	int ret;
1563
1564	/* Make sure the z3fold header is not larger than the page size */
1565	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1566	ret = z3fold_mount();
1567	if (ret)
1568		return ret;
1569
1570	zpool_register_driver(&z3fold_zpool_driver);
1571
1572	return 0;
1573}
1574
1575static void __exit exit_z3fold(void)
1576{
1577	z3fold_unmount();
1578	zpool_unregister_driver(&z3fold_zpool_driver);
1579}
1580
1581module_init(init_z3fold);
1582module_exit(exit_z3fold);
1583
1584MODULE_LICENSE("GPL");
1585MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1586MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * z3fold.c
   4 *
   5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
   6 * Copyright (C) 2016, Sony Mobile Communications Inc.
   7 *
   8 * This implementation is based on zbud written by Seth Jennings.
   9 *
  10 * z3fold is an special purpose allocator for storing compressed pages. It
  11 * can store up to three compressed pages per page which improves the
  12 * compression ratio of zbud while retaining its main concepts (e. g. always
  13 * storing an integral number of objects per page) and simplicity.
  14 * It still has simple and deterministic reclaim properties that make it
  15 * preferable to a higher density approach (with no requirement on integral
  16 * number of object per page) when reclaim is used.
  17 *
  18 * As in zbud, pages are divided into "chunks".  The size of the chunks is
  19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
  20 *
  21 * z3fold doesn't export any API and is meant to be used via zpool API.
  22 */
  23
  24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  25
  26#include <linux/atomic.h>
  27#include <linux/sched.h>
  28#include <linux/cpumask.h>
  29#include <linux/list.h>
  30#include <linux/mm.h>
  31#include <linux/module.h>
  32#include <linux/page-flags.h>
  33#include <linux/migrate.h>
  34#include <linux/node.h>
  35#include <linux/compaction.h>
  36#include <linux/percpu.h>
 
 
 
  37#include <linux/preempt.h>
  38#include <linux/workqueue.h>
  39#include <linux/slab.h>
  40#include <linux/spinlock.h>
  41#include <linux/zpool.h>
  42#include <linux/kmemleak.h>
  43
  44/*
  45 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
  46 * adjusting internal fragmentation.  It also determines the number of
  47 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
  48 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
  49 * in the beginning of an allocated page are occupied by z3fold header, so
  50 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
  51 * which shows the max number of free chunks in z3fold page, also there will
  52 * be 63, or 62, respectively, freelists per pool.
  53 */
  54#define NCHUNKS_ORDER	6
  55
  56#define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
  57#define CHUNK_SIZE	(1 << CHUNK_SHIFT)
  58#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
  59#define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
  60#define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
  61#define NCHUNKS		(TOTAL_CHUNKS - ZHDR_CHUNKS)
  62
  63#define BUDDY_MASK	(0x3)
  64#define BUDDY_SHIFT	2
  65#define SLOTS_ALIGN	(0x40)
  66
  67/*****************
  68 * Structures
  69*****************/
  70struct z3fold_pool;
 
 
 
  71
  72enum buddy {
  73	HEADLESS = 0,
  74	FIRST,
  75	MIDDLE,
  76	LAST,
  77	BUDDIES_MAX = LAST
  78};
  79
  80struct z3fold_buddy_slots {
  81	/*
  82	 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
  83	 * be enough slots to hold all possible variants
  84	 */
  85	unsigned long slot[BUDDY_MASK + 1];
  86	unsigned long pool; /* back link */
  87	rwlock_t lock;
  88};
  89#define HANDLE_FLAG_MASK	(0x03)
  90
  91/*
  92 * struct z3fold_header - z3fold page metadata occupying first chunks of each
  93 *			z3fold page, except for HEADLESS pages
  94 * @buddy:		links the z3fold page into the relevant list in the
  95 *			pool
  96 * @page_lock:		per-page lock
  97 * @refcount:		reference count for the z3fold page
  98 * @work:		work_struct for page layout optimization
  99 * @slots:		pointer to the structure holding buddy slots
 100 * @pool:		pointer to the containing pool
 101 * @cpu:		CPU which this page "belongs" to
 102 * @first_chunks:	the size of the first buddy in chunks, 0 if free
 103 * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
 104 * @last_chunks:	the size of the last buddy in chunks, 0 if free
 105 * @first_num:		the starting number (for the first handle)
 106 * @mapped_count:	the number of objects currently mapped
 107 */
 108struct z3fold_header {
 109	struct list_head buddy;
 110	spinlock_t page_lock;
 111	struct kref refcount;
 112	struct work_struct work;
 113	struct z3fold_buddy_slots *slots;
 114	struct z3fold_pool *pool;
 115	short cpu;
 116	unsigned short first_chunks;
 117	unsigned short middle_chunks;
 118	unsigned short last_chunks;
 119	unsigned short start_middle;
 120	unsigned short first_num:2;
 121	unsigned short mapped_count:2;
 122	unsigned short foreign_handles:2;
 123};
 124
 125/**
 126 * struct z3fold_pool - stores metadata for each z3fold pool
 127 * @name:	pool name
 128 * @lock:	protects pool unbuddied lists
 129 * @stale_lock:	protects pool stale page list
 130 * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
 131 *		buddies; the list each z3fold page is added to depends on
 132 *		the size of its free region.
 
 
 133 * @stale:	list of pages marked for freeing
 134 * @pages_nr:	number of z3fold pages in the pool.
 135 * @c_handle:	cache for z3fold_buddy_slots allocation
 
 
 136 * @compact_wq:	workqueue for page layout background optimization
 137 * @release_wq:	workqueue for safe page release
 138 * @work:	work_struct for safe page release
 
 139 *
 140 * This structure is allocated at pool creation time and maintains metadata
 141 * pertaining to a particular z3fold pool.
 142 */
 143struct z3fold_pool {
 144	const char *name;
 145	spinlock_t lock;
 146	spinlock_t stale_lock;
 147	struct list_head __percpu *unbuddied;
 
 148	struct list_head stale;
 149	atomic64_t pages_nr;
 150	struct kmem_cache *c_handle;
 
 
 
 151	struct workqueue_struct *compact_wq;
 152	struct workqueue_struct *release_wq;
 153	struct work_struct work;
 
 154};
 155
 156/*
 157 * Internal z3fold page flags
 158 */
 159enum z3fold_page_flags {
 160	PAGE_HEADLESS = 0,
 161	MIDDLE_CHUNK_MAPPED,
 162	NEEDS_COMPACTING,
 163	PAGE_STALE,
 164	PAGE_CLAIMED, /* by either reclaim or free */
 165	PAGE_MIGRATED, /* page is migrated and soon to be released */
 166};
 167
 168/*
 169 * handle flags, go under HANDLE_FLAG_MASK
 170 */
 171enum z3fold_handle_flags {
 172	HANDLES_NOFREE = 0,
 173};
 174
 175/*
 176 * Forward declarations
 177 */
 178static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
 179static void compact_page_work(struct work_struct *w);
 180
 181/*****************
 182 * Helpers
 183*****************/
 184
 185/* Converts an allocation size in bytes to size in z3fold chunks */
 186static int size_to_chunks(size_t size)
 187{
 188	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
 189}
 190
 191#define for_each_unbuddied_list(_iter, _begin) \
 192	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
 193
 
 
 194static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
 195							gfp_t gfp)
 196{
 197	struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
 198							     gfp);
 
 
 199
 200	if (slots) {
 201		/* It will be freed separately in free_handle(). */
 202		kmemleak_not_leak(slots);
 203		slots->pool = (unsigned long)pool;
 204		rwlock_init(&slots->lock);
 205	}
 206
 207	return slots;
 208}
 209
 210static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
 211{
 212	return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
 213}
 214
 215static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
 216{
 217	return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
 218}
 219
 220/* Lock a z3fold page */
 221static inline void z3fold_page_lock(struct z3fold_header *zhdr)
 222{
 223	spin_lock(&zhdr->page_lock);
 224}
 225
 226/* Try to lock a z3fold page */
 227static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
 228{
 229	return spin_trylock(&zhdr->page_lock);
 230}
 231
 232/* Unlock a z3fold page */
 233static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
 234{
 235	spin_unlock(&zhdr->page_lock);
 236}
 237
 238/* return locked z3fold page if it's not headless */
 239static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
 240{
 241	struct z3fold_buddy_slots *slots;
 242	struct z3fold_header *zhdr;
 243	int locked = 0;
 244
 245	if (!(handle & (1 << PAGE_HEADLESS))) {
 246		slots = handle_to_slots(handle);
 247		do {
 248			unsigned long addr;
 249
 250			read_lock(&slots->lock);
 251			addr = *(unsigned long *)handle;
 252			zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
 253			locked = z3fold_page_trylock(zhdr);
 254			read_unlock(&slots->lock);
 255			if (locked) {
 256				struct page *page = virt_to_page(zhdr);
 257
 258				if (!test_bit(PAGE_MIGRATED, &page->private))
 259					break;
 260				z3fold_page_unlock(zhdr);
 261			}
 262			cpu_relax();
 263		} while (true);
 264	} else {
 265		zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
 266	}
 267
 268	return zhdr;
 269}
 270
 271static inline void put_z3fold_header(struct z3fold_header *zhdr)
 272{
 273	struct page *page = virt_to_page(zhdr);
 274
 275	if (!test_bit(PAGE_HEADLESS, &page->private))
 276		z3fold_page_unlock(zhdr);
 277}
 278
 279static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
 280{
 281	struct z3fold_buddy_slots *slots;
 282	int i;
 283	bool is_free;
 284
 285	if (WARN_ON(*(unsigned long *)handle == 0))
 286		return;
 287
 
 
 288	slots = handle_to_slots(handle);
 289	write_lock(&slots->lock);
 290	*(unsigned long *)handle = 0;
 291
 292	if (test_bit(HANDLES_NOFREE, &slots->pool)) {
 293		write_unlock(&slots->lock);
 294		return; /* simple case, nothing else to do */
 295	}
 296
 297	if (zhdr->slots != slots)
 298		zhdr->foreign_handles--;
 299
 300	is_free = true;
 301	for (i = 0; i <= BUDDY_MASK; i++) {
 302		if (slots->slot[i]) {
 303			is_free = false;
 304			break;
 305		}
 306	}
 307	write_unlock(&slots->lock);
 308
 309	if (is_free) {
 310		struct z3fold_pool *pool = slots_to_pool(slots);
 311
 312		if (zhdr->slots == slots)
 313			zhdr->slots = NULL;
 314		kmem_cache_free(pool->c_handle, slots);
 315	}
 316}
 317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 318/* Initializes the z3fold header of a newly allocated z3fold page */
 319static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
 320					struct z3fold_pool *pool, gfp_t gfp)
 321{
 322	struct z3fold_header *zhdr = page_address(page);
 323	struct z3fold_buddy_slots *slots;
 324
 
 325	clear_bit(PAGE_HEADLESS, &page->private);
 326	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
 327	clear_bit(NEEDS_COMPACTING, &page->private);
 328	clear_bit(PAGE_STALE, &page->private);
 329	clear_bit(PAGE_CLAIMED, &page->private);
 330	clear_bit(PAGE_MIGRATED, &page->private);
 331	if (headless)
 332		return zhdr;
 333
 334	slots = alloc_slots(pool, gfp);
 335	if (!slots)
 336		return NULL;
 337
 338	memset(zhdr, 0, sizeof(*zhdr));
 339	spin_lock_init(&zhdr->page_lock);
 340	kref_init(&zhdr->refcount);
 
 
 
 
 
 341	zhdr->cpu = -1;
 342	zhdr->slots = slots;
 343	zhdr->pool = pool;
 344	INIT_LIST_HEAD(&zhdr->buddy);
 345	INIT_WORK(&zhdr->work, compact_page_work);
 346	return zhdr;
 347}
 348
 349/* Resets the struct page fields and frees the page */
 350static void free_z3fold_page(struct page *page, bool headless)
 351{
 352	if (!headless) {
 353		lock_page(page);
 354		__ClearPageMovable(page);
 355		unlock_page(page);
 356	}
 
 357	__free_page(page);
 358}
 359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 360/* Helper function to build the index */
 361static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
 362{
 363	return (bud + zhdr->first_num) & BUDDY_MASK;
 364}
 365
 366/*
 367 * Encodes the handle of a particular buddy within a z3fold page.
 368 * Zhdr->page_lock should be held as this function accesses first_num
 369 * if bud != HEADLESS.
 370 */
 371static unsigned long __encode_handle(struct z3fold_header *zhdr,
 372				struct z3fold_buddy_slots *slots,
 373				enum buddy bud)
 374{
 375	unsigned long h = (unsigned long)zhdr;
 376	int idx = 0;
 377
 378	/*
 379	 * For a headless page, its handle is its pointer with the extra
 380	 * PAGE_HEADLESS bit set
 381	 */
 382	if (bud == HEADLESS)
 383		return h | (1 << PAGE_HEADLESS);
 384
 385	/* otherwise, return pointer to encoded handle */
 386	idx = __idx(zhdr, bud);
 387	h += idx;
 388	if (bud == LAST)
 389		h |= (zhdr->last_chunks << BUDDY_SHIFT);
 390
 391	write_lock(&slots->lock);
 392	slots->slot[idx] = h;
 393	write_unlock(&slots->lock);
 394	return (unsigned long)&slots->slot[idx];
 395}
 396
 397static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
 398{
 399	return __encode_handle(zhdr, zhdr->slots, bud);
 400}
 401
 
 
 
 
 
 
 
 
 
 
 
 402/* only for LAST bud, returns zero otherwise */
 403static unsigned short handle_to_chunks(unsigned long handle)
 404{
 405	struct z3fold_buddy_slots *slots = handle_to_slots(handle);
 406	unsigned long addr;
 407
 408	read_lock(&slots->lock);
 409	addr = *(unsigned long *)handle;
 410	read_unlock(&slots->lock);
 411	return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
 412}
 413
 414/*
 415 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
 416 *  but that doesn't matter. because the masking will result in the
 417 *  correct buddy number.
 418 */
 419static enum buddy handle_to_buddy(unsigned long handle)
 420{
 421	struct z3fold_header *zhdr;
 422	struct z3fold_buddy_slots *slots = handle_to_slots(handle);
 423	unsigned long addr;
 424
 425	read_lock(&slots->lock);
 426	WARN_ON(handle & (1 << PAGE_HEADLESS));
 427	addr = *(unsigned long *)handle;
 428	read_unlock(&slots->lock);
 429	zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
 430	return (addr - zhdr->first_num) & BUDDY_MASK;
 431}
 432
 433static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
 434{
 435	return zhdr->pool;
 436}
 437
 438static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
 439{
 440	struct page *page = virt_to_page(zhdr);
 441	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
 442
 443	WARN_ON(!list_empty(&zhdr->buddy));
 444	set_bit(PAGE_STALE, &page->private);
 445	clear_bit(NEEDS_COMPACTING, &page->private);
 446	spin_lock(&pool->lock);
 
 
 447	spin_unlock(&pool->lock);
 448
 449	if (locked)
 450		z3fold_page_unlock(zhdr);
 451
 452	spin_lock(&pool->stale_lock);
 453	list_add(&zhdr->buddy, &pool->stale);
 454	queue_work(pool->release_wq, &pool->work);
 455	spin_unlock(&pool->stale_lock);
 
 456
 457	atomic64_dec(&pool->pages_nr);
 
 
 
 
 
 458}
 459
 460static void release_z3fold_page_locked(struct kref *ref)
 461{
 462	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
 463						refcount);
 464	WARN_ON(z3fold_page_trylock(zhdr));
 465	__release_z3fold_page(zhdr, true);
 466}
 467
 468static void release_z3fold_page_locked_list(struct kref *ref)
 469{
 470	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
 471					       refcount);
 472	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
 473
 474	spin_lock(&pool->lock);
 475	list_del_init(&zhdr->buddy);
 476	spin_unlock(&pool->lock);
 477
 478	WARN_ON(z3fold_page_trylock(zhdr));
 479	__release_z3fold_page(zhdr, true);
 480}
 481
 482static inline int put_z3fold_locked(struct z3fold_header *zhdr)
 483{
 484	return kref_put(&zhdr->refcount, release_z3fold_page_locked);
 485}
 486
 487static inline int put_z3fold_locked_list(struct z3fold_header *zhdr)
 488{
 489	return kref_put(&zhdr->refcount, release_z3fold_page_locked_list);
 490}
 491
 492static void free_pages_work(struct work_struct *w)
 493{
 494	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
 495
 496	spin_lock(&pool->stale_lock);
 497	while (!list_empty(&pool->stale)) {
 498		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
 499						struct z3fold_header, buddy);
 500		struct page *page = virt_to_page(zhdr);
 501
 502		list_del(&zhdr->buddy);
 503		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
 504			continue;
 505		spin_unlock(&pool->stale_lock);
 506		cancel_work_sync(&zhdr->work);
 507		free_z3fold_page(page, false);
 508		cond_resched();
 509		spin_lock(&pool->stale_lock);
 510	}
 511	spin_unlock(&pool->stale_lock);
 512}
 513
 514/*
 515 * Returns the number of free chunks in a z3fold page.
 516 * NB: can't be used with HEADLESS pages.
 517 */
 518static int num_free_chunks(struct z3fold_header *zhdr)
 519{
 520	int nfree;
 521	/*
 522	 * If there is a middle object, pick up the bigger free space
 523	 * either before or after it. Otherwise just subtract the number
 524	 * of chunks occupied by the first and the last objects.
 525	 */
 526	if (zhdr->middle_chunks != 0) {
 527		int nfree_before = zhdr->first_chunks ?
 528			0 : zhdr->start_middle - ZHDR_CHUNKS;
 529		int nfree_after = zhdr->last_chunks ?
 530			0 : TOTAL_CHUNKS -
 531				(zhdr->start_middle + zhdr->middle_chunks);
 532		nfree = max(nfree_before, nfree_after);
 533	} else
 534		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
 535	return nfree;
 536}
 537
 538/* Add to the appropriate unbuddied list */
 539static inline void add_to_unbuddied(struct z3fold_pool *pool,
 540				struct z3fold_header *zhdr)
 541{
 542	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
 543			zhdr->middle_chunks == 0) {
 544		struct list_head *unbuddied;
 
 545		int freechunks = num_free_chunks(zhdr);
 546
 547		migrate_disable();
 548		unbuddied = this_cpu_ptr(pool->unbuddied);
 549		spin_lock(&pool->lock);
 550		list_add(&zhdr->buddy, &unbuddied[freechunks]);
 551		spin_unlock(&pool->lock);
 552		zhdr->cpu = smp_processor_id();
 553		migrate_enable();
 554	}
 555}
 556
 557static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
 558{
 559	enum buddy bud = HEADLESS;
 560
 561	if (zhdr->middle_chunks) {
 562		if (!zhdr->first_chunks &&
 563		    chunks <= zhdr->start_middle - ZHDR_CHUNKS)
 564			bud = FIRST;
 565		else if (!zhdr->last_chunks)
 566			bud = LAST;
 567	} else {
 568		if (!zhdr->first_chunks)
 569			bud = FIRST;
 570		else if (!zhdr->last_chunks)
 571			bud = LAST;
 572		else
 573			bud = MIDDLE;
 574	}
 575
 576	return bud;
 577}
 578
 579static inline void *mchunk_memmove(struct z3fold_header *zhdr,
 580				unsigned short dst_chunk)
 581{
 582	void *beg = zhdr;
 583	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
 584		       beg + (zhdr->start_middle << CHUNK_SHIFT),
 585		       zhdr->middle_chunks << CHUNK_SHIFT);
 586}
 587
 588static inline bool buddy_single(struct z3fold_header *zhdr)
 589{
 590	return !((zhdr->first_chunks && zhdr->middle_chunks) ||
 591			(zhdr->first_chunks && zhdr->last_chunks) ||
 592			(zhdr->middle_chunks && zhdr->last_chunks));
 593}
 594
 595static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
 596{
 597	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
 598	void *p = zhdr;
 599	unsigned long old_handle = 0;
 600	size_t sz = 0;
 601	struct z3fold_header *new_zhdr = NULL;
 602	int first_idx = __idx(zhdr, FIRST);
 603	int middle_idx = __idx(zhdr, MIDDLE);
 604	int last_idx = __idx(zhdr, LAST);
 605	unsigned short *moved_chunks = NULL;
 606
 607	/*
 608	 * No need to protect slots here -- all the slots are "local" and
 609	 * the page lock is already taken
 610	 */
 611	if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
 612		p += ZHDR_SIZE_ALIGNED;
 613		sz = zhdr->first_chunks << CHUNK_SHIFT;
 614		old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
 615		moved_chunks = &zhdr->first_chunks;
 616	} else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
 617		p += zhdr->start_middle << CHUNK_SHIFT;
 618		sz = zhdr->middle_chunks << CHUNK_SHIFT;
 619		old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
 620		moved_chunks = &zhdr->middle_chunks;
 621	} else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
 622		p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
 623		sz = zhdr->last_chunks << CHUNK_SHIFT;
 624		old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
 625		moved_chunks = &zhdr->last_chunks;
 626	}
 627
 628	if (sz > 0) {
 629		enum buddy new_bud = HEADLESS;
 630		short chunks = size_to_chunks(sz);
 631		void *q;
 632
 633		new_zhdr = __z3fold_alloc(pool, sz, false);
 634		if (!new_zhdr)
 635			return NULL;
 636
 637		if (WARN_ON(new_zhdr == zhdr))
 638			goto out_fail;
 639
 640		new_bud = get_free_buddy(new_zhdr, chunks);
 641		q = new_zhdr;
 642		switch (new_bud) {
 643		case FIRST:
 644			new_zhdr->first_chunks = chunks;
 645			q += ZHDR_SIZE_ALIGNED;
 646			break;
 647		case MIDDLE:
 648			new_zhdr->middle_chunks = chunks;
 649			new_zhdr->start_middle =
 650				new_zhdr->first_chunks + ZHDR_CHUNKS;
 651			q += new_zhdr->start_middle << CHUNK_SHIFT;
 652			break;
 653		case LAST:
 654			new_zhdr->last_chunks = chunks;
 655			q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
 656			break;
 657		default:
 658			goto out_fail;
 659		}
 660		new_zhdr->foreign_handles++;
 661		memcpy(q, p, sz);
 662		write_lock(&zhdr->slots->lock);
 663		*(unsigned long *)old_handle = (unsigned long)new_zhdr +
 664			__idx(new_zhdr, new_bud);
 665		if (new_bud == LAST)
 666			*(unsigned long *)old_handle |=
 667					(new_zhdr->last_chunks << BUDDY_SHIFT);
 668		write_unlock(&zhdr->slots->lock);
 669		add_to_unbuddied(pool, new_zhdr);
 670		z3fold_page_unlock(new_zhdr);
 671
 672		*moved_chunks = 0;
 673	}
 674
 675	return new_zhdr;
 676
 677out_fail:
 678	if (new_zhdr && !put_z3fold_locked(new_zhdr)) {
 679		add_to_unbuddied(pool, new_zhdr);
 680		z3fold_page_unlock(new_zhdr);
 681	}
 682	return NULL;
 683
 684}
 685
 686#define BIG_CHUNK_GAP	3
 687/* Has to be called with lock held */
 688static int z3fold_compact_page(struct z3fold_header *zhdr)
 689{
 690	struct page *page = virt_to_page(zhdr);
 691
 692	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
 693		return 0; /* can't move middle chunk, it's used */
 694
 695	if (unlikely(PageIsolated(page)))
 696		return 0;
 697
 698	if (zhdr->middle_chunks == 0)
 699		return 0; /* nothing to compact */
 700
 701	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
 702		/* move to the beginning */
 703		mchunk_memmove(zhdr, ZHDR_CHUNKS);
 704		zhdr->first_chunks = zhdr->middle_chunks;
 705		zhdr->middle_chunks = 0;
 706		zhdr->start_middle = 0;
 707		zhdr->first_num++;
 708		return 1;
 709	}
 710
 711	/*
 712	 * moving data is expensive, so let's only do that if
 713	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
 714	 */
 715	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
 716	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
 717			BIG_CHUNK_GAP) {
 718		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
 719		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
 720		return 1;
 721	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
 722		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
 723					+ zhdr->middle_chunks) >=
 724			BIG_CHUNK_GAP) {
 725		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
 726			zhdr->middle_chunks;
 727		mchunk_memmove(zhdr, new_start);
 728		zhdr->start_middle = new_start;
 729		return 1;
 730	}
 731
 732	return 0;
 733}
 734
 735static void do_compact_page(struct z3fold_header *zhdr, bool locked)
 736{
 737	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
 738	struct page *page;
 739
 740	page = virt_to_page(zhdr);
 741	if (locked)
 742		WARN_ON(z3fold_page_trylock(zhdr));
 743	else
 744		z3fold_page_lock(zhdr);
 745	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
 746		z3fold_page_unlock(zhdr);
 747		return;
 748	}
 749	spin_lock(&pool->lock);
 750	list_del_init(&zhdr->buddy);
 751	spin_unlock(&pool->lock);
 752
 753	if (put_z3fold_locked(zhdr))
 
 754		return;
 
 755
 756	if (test_bit(PAGE_STALE, &page->private) ||
 757	    test_and_set_bit(PAGE_CLAIMED, &page->private)) {
 
 758		z3fold_page_unlock(zhdr);
 759		return;
 760	}
 761
 762	if (!zhdr->foreign_handles && buddy_single(zhdr) &&
 763	    zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
 764		if (!put_z3fold_locked(zhdr)) {
 765			clear_bit(PAGE_CLAIMED, &page->private);
 766			z3fold_page_unlock(zhdr);
 767		}
 768		return;
 769	}
 770
 771	z3fold_compact_page(zhdr);
 772	add_to_unbuddied(pool, zhdr);
 773	clear_bit(PAGE_CLAIMED, &page->private);
 774	z3fold_page_unlock(zhdr);
 775}
 776
 777static void compact_page_work(struct work_struct *w)
 778{
 779	struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
 780						work);
 781
 782	do_compact_page(zhdr, false);
 783}
 784
 785/* returns _locked_ z3fold page header or NULL */
 786static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
 787						size_t size, bool can_sleep)
 788{
 789	struct z3fold_header *zhdr = NULL;
 790	struct page *page;
 791	struct list_head *unbuddied;
 792	int chunks = size_to_chunks(size), i;
 793
 794lookup:
 795	migrate_disable();
 796	/* First, try to find an unbuddied z3fold page. */
 797	unbuddied = this_cpu_ptr(pool->unbuddied);
 798	for_each_unbuddied_list(i, chunks) {
 799		struct list_head *l = &unbuddied[i];
 800
 801		zhdr = list_first_entry_or_null(READ_ONCE(l),
 802					struct z3fold_header, buddy);
 803
 804		if (!zhdr)
 805			continue;
 806
 807		/* Re-check under lock. */
 808		spin_lock(&pool->lock);
 
 809		if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
 810						struct z3fold_header, buddy)) ||
 811		    !z3fold_page_trylock(zhdr)) {
 812			spin_unlock(&pool->lock);
 813			zhdr = NULL;
 814			migrate_enable();
 815			if (can_sleep)
 816				cond_resched();
 817			goto lookup;
 818		}
 819		list_del_init(&zhdr->buddy);
 820		zhdr->cpu = -1;
 821		spin_unlock(&pool->lock);
 822
 823		page = virt_to_page(zhdr);
 824		if (test_bit(NEEDS_COMPACTING, &page->private) ||
 825		    test_bit(PAGE_CLAIMED, &page->private)) {
 826			z3fold_page_unlock(zhdr);
 827			zhdr = NULL;
 828			migrate_enable();
 829			if (can_sleep)
 830				cond_resched();
 831			goto lookup;
 832		}
 833
 834		/*
 835		 * this page could not be removed from its unbuddied
 836		 * list while pool lock was held, and then we've taken
 837		 * page lock so kref_put could not be called before
 838		 * we got here, so it's safe to just call kref_get()
 839		 */
 840		kref_get(&zhdr->refcount);
 841		break;
 842	}
 843	migrate_enable();
 844
 845	if (!zhdr) {
 846		int cpu;
 847
 848		/* look for _exact_ match on other cpus' lists */
 849		for_each_online_cpu(cpu) {
 850			struct list_head *l;
 851
 852			unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
 853			spin_lock(&pool->lock);
 854			l = &unbuddied[chunks];
 855
 856			zhdr = list_first_entry_or_null(READ_ONCE(l),
 857						struct z3fold_header, buddy);
 858
 859			if (!zhdr || !z3fold_page_trylock(zhdr)) {
 860				spin_unlock(&pool->lock);
 861				zhdr = NULL;
 862				continue;
 863			}
 864			list_del_init(&zhdr->buddy);
 865			zhdr->cpu = -1;
 866			spin_unlock(&pool->lock);
 867
 868			page = virt_to_page(zhdr);
 869			if (test_bit(NEEDS_COMPACTING, &page->private) ||
 870			    test_bit(PAGE_CLAIMED, &page->private)) {
 871				z3fold_page_unlock(zhdr);
 872				zhdr = NULL;
 873				if (can_sleep)
 874					cond_resched();
 875				continue;
 876			}
 877			kref_get(&zhdr->refcount);
 878			break;
 879		}
 880	}
 881
 882	if (zhdr && !zhdr->slots) {
 883		zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
 884		if (!zhdr->slots)
 885			goto out_fail;
 886	}
 887	return zhdr;
 888
 889out_fail:
 890	if (!put_z3fold_locked(zhdr)) {
 891		add_to_unbuddied(pool, zhdr);
 892		z3fold_page_unlock(zhdr);
 893	}
 894	return NULL;
 895}
 896
 897/*
 898 * API Functions
 899 */
 900
 901/**
 902 * z3fold_create_pool() - create a new z3fold pool
 903 * @name:	pool name
 904 * @gfp:	gfp flags when allocating the z3fold pool structure
 
 905 *
 906 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
 907 * failed.
 908 */
 909static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp)
 
 910{
 911	struct z3fold_pool *pool = NULL;
 912	int i, cpu;
 913
 914	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
 915	if (!pool)
 916		goto out;
 917	pool->c_handle = kmem_cache_create("z3fold_handle",
 918				sizeof(struct z3fold_buddy_slots),
 919				SLOTS_ALIGN, 0, NULL);
 920	if (!pool->c_handle)
 921		goto out_c;
 922	spin_lock_init(&pool->lock);
 923	spin_lock_init(&pool->stale_lock);
 924	pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
 925					 __alignof__(struct list_head));
 926	if (!pool->unbuddied)
 927		goto out_pool;
 928	for_each_possible_cpu(cpu) {
 929		struct list_head *unbuddied =
 930				per_cpu_ptr(pool->unbuddied, cpu);
 931		for_each_unbuddied_list(i, 0)
 932			INIT_LIST_HEAD(&unbuddied[i]);
 933	}
 
 934	INIT_LIST_HEAD(&pool->stale);
 935	atomic64_set(&pool->pages_nr, 0);
 936	pool->name = name;
 937	pool->compact_wq = create_singlethread_workqueue(pool->name);
 938	if (!pool->compact_wq)
 939		goto out_unbuddied;
 940	pool->release_wq = create_singlethread_workqueue(pool->name);
 941	if (!pool->release_wq)
 942		goto out_wq;
 
 
 943	INIT_WORK(&pool->work, free_pages_work);
 
 944	return pool;
 945
 
 
 946out_wq:
 947	destroy_workqueue(pool->compact_wq);
 948out_unbuddied:
 949	free_percpu(pool->unbuddied);
 950out_pool:
 951	kmem_cache_destroy(pool->c_handle);
 952out_c:
 953	kfree(pool);
 954out:
 955	return NULL;
 956}
 957
 958/**
 959 * z3fold_destroy_pool() - destroys an existing z3fold pool
 960 * @pool:	the z3fold pool to be destroyed
 961 *
 962 * The pool should be emptied before this function is called.
 963 */
 964static void z3fold_destroy_pool(struct z3fold_pool *pool)
 965{
 966	kmem_cache_destroy(pool->c_handle);
 967
 968	/*
 969	 * We need to destroy pool->compact_wq before pool->release_wq,
 970	 * as any pending work on pool->compact_wq will call
 971	 * queue_work(pool->release_wq, &pool->work).
 972	 *
 973	 * There are still outstanding pages until both workqueues are drained,
 974	 * so we cannot unregister migration until then.
 975	 */
 976
 977	destroy_workqueue(pool->compact_wq);
 978	destroy_workqueue(pool->release_wq);
 979	free_percpu(pool->unbuddied);
 980	kfree(pool);
 981}
 982
 983static const struct movable_operations z3fold_mops;
 984
 985/**
 986 * z3fold_alloc() - allocates a region of a given size
 987 * @pool:	z3fold pool from which to allocate
 988 * @size:	size in bytes of the desired allocation
 989 * @gfp:	gfp flags used if the pool needs to grow
 990 * @handle:	handle of the new allocation
 991 *
 992 * This function will attempt to find a free region in the pool large enough to
 993 * satisfy the allocation request.  A search of the unbuddied lists is
 994 * performed first. If no suitable free region is found, then a new page is
 995 * allocated and added to the pool to satisfy the request.
 996 *
 
 
 
 997 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
 998 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
 999 * a new page.
1000 */
1001static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1002			unsigned long *handle)
1003{
1004	int chunks = size_to_chunks(size);
1005	struct z3fold_header *zhdr = NULL;
1006	struct page *page = NULL;
1007	enum buddy bud;
1008	bool can_sleep = gfpflags_allow_blocking(gfp);
1009
1010	if (!size || (gfp & __GFP_HIGHMEM))
1011		return -EINVAL;
1012
1013	if (size > PAGE_SIZE)
1014		return -ENOSPC;
1015
1016	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1017		bud = HEADLESS;
1018	else {
1019retry:
1020		zhdr = __z3fold_alloc(pool, size, can_sleep);
1021		if (zhdr) {
1022			bud = get_free_buddy(zhdr, chunks);
1023			if (bud == HEADLESS) {
1024				if (!put_z3fold_locked(zhdr))
 
 
 
 
 
 
 
 
 
 
 
 
1025					z3fold_page_unlock(zhdr);
1026				pr_err("No free chunks in unbuddied\n");
1027				WARN_ON(1);
1028				goto retry;
1029			}
1030			page = virt_to_page(zhdr);
1031			goto found;
1032		}
1033		bud = FIRST;
1034	}
1035
1036	page = alloc_page(gfp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037	if (!page)
1038		return -ENOMEM;
1039
1040	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1041	if (!zhdr) {
1042		__free_page(page);
1043		return -ENOMEM;
1044	}
1045	atomic64_inc(&pool->pages_nr);
1046
1047	if (bud == HEADLESS) {
1048		set_bit(PAGE_HEADLESS, &page->private);
1049		goto headless;
1050	}
1051	if (can_sleep) {
1052		lock_page(page);
1053		__SetPageMovable(page, &z3fold_mops);
1054		unlock_page(page);
1055	} else {
1056		WARN_ON(!trylock_page(page));
1057		__SetPageMovable(page, &z3fold_mops);
1058		unlock_page(page);
 
1059	}
1060	z3fold_page_lock(zhdr);
1061
1062found:
1063	if (bud == FIRST)
1064		zhdr->first_chunks = chunks;
1065	else if (bud == LAST)
1066		zhdr->last_chunks = chunks;
1067	else {
1068		zhdr->middle_chunks = chunks;
1069		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1070	}
1071	add_to_unbuddied(pool, zhdr);
1072
1073headless:
1074	spin_lock(&pool->lock);
 
 
 
 
 
 
1075	*handle = encode_handle(zhdr, bud);
1076	spin_unlock(&pool->lock);
1077	if (bud != HEADLESS)
1078		z3fold_page_unlock(zhdr);
1079
1080	return 0;
1081}
1082
1083/**
1084 * z3fold_free() - frees the allocation associated with the given handle
1085 * @pool:	pool in which the allocation resided
1086 * @handle:	handle associated with the allocation returned by z3fold_alloc()
1087 *
1088 * In the case that the z3fold page in which the allocation resides is under
1089 * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1090 * only sets the first|middle|last_chunks to 0.  The page is actually freed
1091 * once all buddies are evicted (see z3fold_reclaim_page() below).
1092 */
1093static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1094{
1095	struct z3fold_header *zhdr;
1096	struct page *page;
1097	enum buddy bud;
1098	bool page_claimed;
1099
1100	zhdr = get_z3fold_header(handle);
1101	page = virt_to_page(zhdr);
1102	page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1103
1104	if (test_bit(PAGE_HEADLESS, &page->private)) {
1105		/* if a headless page is under reclaim, just leave.
1106		 * NB: we use test_and_set_bit for a reason: if the bit
1107		 * has not been set before, we release this page
1108		 * immediately so we don't care about its value any more.
1109		 */
1110		if (!page_claimed) {
1111			put_z3fold_header(zhdr);
 
 
1112			free_z3fold_page(page, true);
1113			atomic64_dec(&pool->pages_nr);
1114		}
1115		return;
1116	}
1117
1118	/* Non-headless case */
 
1119	bud = handle_to_buddy(handle);
1120
1121	switch (bud) {
1122	case FIRST:
1123		zhdr->first_chunks = 0;
1124		break;
1125	case MIDDLE:
1126		zhdr->middle_chunks = 0;
1127		break;
1128	case LAST:
1129		zhdr->last_chunks = 0;
1130		break;
1131	default:
1132		pr_err("%s: unknown bud %d\n", __func__, bud);
1133		WARN_ON(1);
1134		put_z3fold_header(zhdr);
1135		return;
1136	}
1137
1138	if (!page_claimed)
1139		free_handle(handle, zhdr);
1140	if (put_z3fold_locked_list(zhdr))
1141		return;
 
1142	if (page_claimed) {
1143		/* the page has not been claimed by us */
1144		put_z3fold_header(zhdr);
1145		return;
1146	}
1147	if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
 
 
1148		clear_bit(PAGE_CLAIMED, &page->private);
1149		put_z3fold_header(zhdr);
1150		return;
1151	}
1152	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
 
 
 
1153		zhdr->cpu = -1;
1154		kref_get(&zhdr->refcount);
 
1155		clear_bit(PAGE_CLAIMED, &page->private);
1156		do_compact_page(zhdr, true);
1157		return;
1158	}
1159	kref_get(&zhdr->refcount);
 
1160	clear_bit(PAGE_CLAIMED, &page->private);
1161	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1162	put_z3fold_header(zhdr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1163}
1164
1165/**
1166 * z3fold_map() - maps the allocation associated with the given handle
1167 * @pool:	pool in which the allocation resides
1168 * @handle:	handle associated with the allocation to be mapped
1169 *
1170 * Extracts the buddy number from handle and constructs the pointer to the
1171 * correct starting chunk within the page.
1172 *
1173 * Returns: a pointer to the mapped allocation
1174 */
1175static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1176{
1177	struct z3fold_header *zhdr;
1178	struct page *page;
1179	void *addr;
1180	enum buddy buddy;
1181
1182	zhdr = get_z3fold_header(handle);
1183	addr = zhdr;
1184	page = virt_to_page(zhdr);
1185
1186	if (test_bit(PAGE_HEADLESS, &page->private))
1187		goto out;
1188
 
1189	buddy = handle_to_buddy(handle);
1190	switch (buddy) {
1191	case FIRST:
1192		addr += ZHDR_SIZE_ALIGNED;
1193		break;
1194	case MIDDLE:
1195		addr += zhdr->start_middle << CHUNK_SHIFT;
1196		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1197		break;
1198	case LAST:
1199		addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1200		break;
1201	default:
1202		pr_err("unknown buddy id %d\n", buddy);
1203		WARN_ON(1);
1204		addr = NULL;
1205		break;
1206	}
1207
1208	if (addr)
1209		zhdr->mapped_count++;
 
1210out:
1211	put_z3fold_header(zhdr);
1212	return addr;
1213}
1214
1215/**
1216 * z3fold_unmap() - unmaps the allocation associated with the given handle
1217 * @pool:	pool in which the allocation resides
1218 * @handle:	handle associated with the allocation to be unmapped
1219 */
1220static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1221{
1222	struct z3fold_header *zhdr;
1223	struct page *page;
1224	enum buddy buddy;
1225
1226	zhdr = get_z3fold_header(handle);
1227	page = virt_to_page(zhdr);
1228
1229	if (test_bit(PAGE_HEADLESS, &page->private))
1230		return;
1231
 
1232	buddy = handle_to_buddy(handle);
1233	if (buddy == MIDDLE)
1234		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1235	zhdr->mapped_count--;
1236	put_z3fold_header(zhdr);
1237}
1238
1239/**
1240 * z3fold_get_pool_pages() - gets the z3fold pool size in pages
1241 * @pool:	pool whose size is being queried
1242 *
1243 * Returns: size in pages of the given pool.
1244 */
1245static u64 z3fold_get_pool_pages(struct z3fold_pool *pool)
1246{
1247	return atomic64_read(&pool->pages_nr);
1248}
1249
1250static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1251{
1252	struct z3fold_header *zhdr;
1253	struct z3fold_pool *pool;
1254
 
1255	VM_BUG_ON_PAGE(PageIsolated(page), page);
1256
1257	if (test_bit(PAGE_HEADLESS, &page->private))
 
1258		return false;
1259
1260	zhdr = page_address(page);
1261	z3fold_page_lock(zhdr);
1262	if (test_bit(NEEDS_COMPACTING, &page->private) ||
1263	    test_bit(PAGE_STALE, &page->private))
1264		goto out;
1265
1266	if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1267		goto out;
1268
1269	if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1270		goto out;
1271	pool = zhdr_to_pool(zhdr);
1272	spin_lock(&pool->lock);
1273	if (!list_empty(&zhdr->buddy))
1274		list_del_init(&zhdr->buddy);
1275	spin_unlock(&pool->lock);
1276
1277	kref_get(&zhdr->refcount);
1278	z3fold_page_unlock(zhdr);
1279	return true;
1280
 
 
 
 
 
 
 
 
 
 
 
1281out:
1282	z3fold_page_unlock(zhdr);
1283	return false;
1284}
1285
1286static int z3fold_page_migrate(struct page *newpage, struct page *page,
1287		enum migrate_mode mode)
1288{
1289	struct z3fold_header *zhdr, *new_zhdr;
1290	struct z3fold_pool *pool;
 
1291
 
1292	VM_BUG_ON_PAGE(!PageIsolated(page), page);
1293	VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1294	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1295
1296	zhdr = page_address(page);
1297	pool = zhdr_to_pool(zhdr);
1298
1299	if (!z3fold_page_trylock(zhdr))
1300		return -EAGAIN;
1301	if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1302		clear_bit(PAGE_CLAIMED, &page->private);
1303		z3fold_page_unlock(zhdr);
1304		return -EBUSY;
1305	}
1306	if (work_pending(&zhdr->work)) {
1307		z3fold_page_unlock(zhdr);
1308		return -EAGAIN;
1309	}
1310	new_zhdr = page_address(newpage);
1311	memcpy(new_zhdr, zhdr, PAGE_SIZE);
1312	newpage->private = page->private;
1313	set_bit(PAGE_MIGRATED, &page->private);
1314	z3fold_page_unlock(zhdr);
1315	spin_lock_init(&new_zhdr->page_lock);
1316	INIT_WORK(&new_zhdr->work, compact_page_work);
1317	/*
1318	 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1319	 * so we only have to reinitialize it.
1320	 */
1321	INIT_LIST_HEAD(&new_zhdr->buddy);
 
1322	__ClearPageMovable(page);
 
1323
1324	get_page(newpage);
1325	z3fold_page_lock(new_zhdr);
1326	if (new_zhdr->first_chunks)
1327		encode_handle(new_zhdr, FIRST);
1328	if (new_zhdr->last_chunks)
1329		encode_handle(new_zhdr, LAST);
1330	if (new_zhdr->middle_chunks)
1331		encode_handle(new_zhdr, MIDDLE);
1332	set_bit(NEEDS_COMPACTING, &newpage->private);
1333	new_zhdr->cpu = smp_processor_id();
1334	__SetPageMovable(newpage, &z3fold_mops);
 
 
 
1335	z3fold_page_unlock(new_zhdr);
1336
1337	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1338
1339	/* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
1340	page->private = 0;
1341	put_page(page);
1342	return 0;
1343}
1344
1345static void z3fold_page_putback(struct page *page)
1346{
1347	struct z3fold_header *zhdr;
1348	struct z3fold_pool *pool;
1349
1350	zhdr = page_address(page);
1351	pool = zhdr_to_pool(zhdr);
1352
1353	z3fold_page_lock(zhdr);
1354	if (!list_empty(&zhdr->buddy))
1355		list_del_init(&zhdr->buddy);
1356	INIT_LIST_HEAD(&page->lru);
1357	if (put_z3fold_locked(zhdr))
 
1358		return;
1359	if (list_empty(&zhdr->buddy))
1360		add_to_unbuddied(pool, zhdr);
1361	clear_bit(PAGE_CLAIMED, &page->private);
 
1362	z3fold_page_unlock(zhdr);
1363}
1364
1365static const struct movable_operations z3fold_mops = {
1366	.isolate_page = z3fold_page_isolate,
1367	.migrate_page = z3fold_page_migrate,
1368	.putback_page = z3fold_page_putback,
1369};
1370
1371/*****************
1372 * zpool
1373 ****************/
1374
1375static void *z3fold_zpool_create(const char *name, gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1376{
1377	return z3fold_create_pool(name, gfp);
 
 
 
 
 
 
 
 
1378}
1379
1380static void z3fold_zpool_destroy(void *pool)
1381{
1382	z3fold_destroy_pool(pool);
1383}
1384
1385static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1386			unsigned long *handle)
1387{
1388	return z3fold_alloc(pool, size, gfp, handle);
1389}
1390static void z3fold_zpool_free(void *pool, unsigned long handle)
1391{
1392	z3fold_free(pool, handle);
1393}
1394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1395static void *z3fold_zpool_map(void *pool, unsigned long handle,
1396			enum zpool_mapmode mm)
1397{
1398	return z3fold_map(pool, handle);
1399}
1400static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1401{
1402	z3fold_unmap(pool, handle);
1403}
1404
1405static u64 z3fold_zpool_total_pages(void *pool)
1406{
1407	return z3fold_get_pool_pages(pool);
1408}
1409
1410static struct zpool_driver z3fold_zpool_driver = {
1411	.type =		"z3fold",
1412	.sleep_mapped = true,
1413	.owner =	THIS_MODULE,
1414	.create =	z3fold_zpool_create,
1415	.destroy =	z3fold_zpool_destroy,
1416	.malloc =	z3fold_zpool_malloc,
1417	.free =		z3fold_zpool_free,
 
1418	.map =		z3fold_zpool_map,
1419	.unmap =	z3fold_zpool_unmap,
1420	.total_pages =	z3fold_zpool_total_pages,
1421};
1422
1423MODULE_ALIAS("zpool-z3fold");
1424
1425static int __init init_z3fold(void)
1426{
1427	/*
1428	 * Make sure the z3fold header is not larger than the page size and
1429	 * there has remaining spaces for its buddy.
1430	 */
1431	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
 
 
 
1432	zpool_register_driver(&z3fold_zpool_driver);
1433
1434	return 0;
1435}
1436
1437static void __exit exit_z3fold(void)
1438{
 
1439	zpool_unregister_driver(&z3fold_zpool_driver);
1440}
1441
1442module_init(init_z3fold);
1443module_exit(exit_z3fold);
1444
1445MODULE_LICENSE("GPL");
1446MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1447MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");