Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * zswap.c - zswap driver file
  3 *
  4 * zswap is a backend for frontswap that takes pages that are in the process
  5 * of being swapped out and attempts to compress and store them in a
  6 * RAM-based memory pool.  This can result in a significant I/O reduction on
  7 * the swap device and, in the case where decompressing from RAM is faster
  8 * than reading from the swap device, can also improve workload performance.
  9 *
 10 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
 11 *
 12 * This program is free software; you can redistribute it and/or
 13 * modify it under the terms of the GNU General Public License
 14 * as published by the Free Software Foundation; either version 2
 15 * of the License, or (at your option) any later version.
 16 *
 17 * This program is distributed in the hope that it will be useful,
 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 20 * GNU General Public License for more details.
 21*/
 22
 23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 24
 25#include <linux/module.h>
 26#include <linux/cpu.h>
 27#include <linux/highmem.h>
 28#include <linux/slab.h>
 29#include <linux/spinlock.h>
 30#include <linux/types.h>
 31#include <linux/atomic.h>
 32#include <linux/frontswap.h>
 33#include <linux/rbtree.h>
 34#include <linux/swap.h>
 35#include <linux/crypto.h>
 
 
 36#include <linux/mempool.h>
 37#include <linux/zbud.h>
 38
 
 39#include <linux/mm_types.h>
 40#include <linux/page-flags.h>
 41#include <linux/swapops.h>
 42#include <linux/writeback.h>
 43#include <linux/pagemap.h>
 
 
 
 
 
 44
 45/*********************************
 46* statistics
 47**********************************/
 48/* Number of memory pages used by the compressed pool */
 49static u64 zswap_pool_pages;
 50/* The number of compressed pages currently stored in zswap */
 51static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
 
 
 52
 53/*
 54 * The statistics below are not protected from concurrent access for
 55 * performance reasons so they may not be a 100% accurate.  However,
 56 * they do provide useful information on roughly how many times a
 57 * certain event is occurring.
 58*/
 59
 60/* Pool limit was hit (see zswap_max_pool_percent) */
 61static u64 zswap_pool_limit_hit;
 62/* Pages written back when pool limit was reached */
 63static u64 zswap_written_back_pages;
 64/* Store failed due to a reclaim failure after pool limit was reached */
 65static u64 zswap_reject_reclaim_fail;
 
 
 66/* Compressed page was too big for the allocator to (optimally) store */
 67static u64 zswap_reject_compress_poor;
 68/* Store failed because underlying allocator could not get memory */
 69static u64 zswap_reject_alloc_fail;
 70/* Store failed because the entry metadata could not be allocated (rare) */
 71static u64 zswap_reject_kmemcache_fail;
 72/* Duplicate store was encountered (rare) */
 73static u64 zswap_duplicate_entry;
 
 
 
 74
 75/*********************************
 76* tunables
 77**********************************/
 78/* Enable/disable zswap (disabled by default, fixed at boot for now) */
 79static bool zswap_enabled __read_mostly;
 80module_param_named(enabled, zswap_enabled, bool, 0444);
 81
 82/* Compressor to be used by zswap (fixed at boot for now) */
 83#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
 84static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
 85module_param_named(compressor, zswap_compressor, charp, 0444);
 86
 87/* The maximum percentage of memory that the compressed pool can occupy */
 88static unsigned int zswap_max_pool_percent = 20;
 89module_param_named(max_pool_percent,
 90			zswap_max_pool_percent, uint, 0644);
 91
 92/* zbud_pool is shared by all of zswap backend  */
 93static struct zbud_pool *zswap_pool;
 94
 95/*********************************
 96* compression functions
 97**********************************/
 98/* per-cpu compression transforms */
 99static struct crypto_comp * __percpu *zswap_comp_pcpu_tfms;
 
 
 
 
100
101enum comp_op {
102	ZSWAP_COMPOP_COMPRESS,
103	ZSWAP_COMPOP_DECOMPRESS
 
 
 
 
 
104};
 
 
105
106static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen,
107				u8 *dst, unsigned int *dlen)
108{
109	struct crypto_comp *tfm;
110	int ret;
 
 
 
 
111
112	tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu());
113	switch (op) {
114	case ZSWAP_COMPOP_COMPRESS:
115		ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
116		break;
117	case ZSWAP_COMPOP_DECOMPRESS:
118		ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
119		break;
120	default:
121		ret = -EINVAL;
122	}
123
124	put_cpu();
125	return ret;
126}
 
127
128static int __init zswap_comp_init(void)
129{
130	if (!crypto_has_comp(zswap_compressor, 0, 0)) {
131		pr_info("%s compressor not available\n", zswap_compressor);
132		/* fall back to default compressor */
133		zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
134		if (!crypto_has_comp(zswap_compressor, 0, 0))
135			/* can't even load the default compressor */
136			return -ENODEV;
137	}
138	pr_info("using %s compressor\n", zswap_compressor);
139
140	/* alloc percpu transforms */
141	zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
142	if (!zswap_comp_pcpu_tfms)
143		return -ENOMEM;
144	return 0;
145}
 
 
146
147static void zswap_comp_exit(void)
148{
149	/* free percpu transforms */
150	if (zswap_comp_pcpu_tfms)
151		free_percpu(zswap_comp_pcpu_tfms);
152}
153
154/*********************************
155* data structures
156**********************************/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157/*
158 * struct zswap_entry
159 *
160 * This structure contains the metadata for tracking a single compressed
161 * page within zswap.
162 *
163 * rbnode - links the entry into red-black tree for the appropriate swap type
164 * refcount - the number of outstanding reference to the entry. This is needed
165 *            to protect against premature freeing of the entry by code
166 *            concurrent calls to load, invalidate, and writeback.  The lock
167 *            for the zswap_tree structure that contains the entry must
168 *            be held while changing the refcount.  Since the lock must
169 *            be held, there is no reason to also make refcount atomic.
170 * offset - the swap offset for the entry.  Index into the red-black tree.
171 * handle - zbud allocation handle that stores the compressed page data
172 * length - the length in bytes of the compressed page data.  Needed during
173 *          decompression
 
 
 
 
 
 
174 */
175struct zswap_entry {
176	struct rb_node rbnode;
177	pgoff_t offset;
178	int refcount;
179	unsigned int length;
180	unsigned long handle;
181};
182
183struct zswap_header {
184	swp_entry_t swpentry;
 
 
 
 
 
 
 
 
185};
186
187/*
188 * The tree lock in the zswap_tree struct protects a few things:
189 * - the rbtree
190 * - the refcount field of each entry in the tree
191 */
192struct zswap_tree {
193	struct rb_root rbroot;
194	spinlock_t lock;
195};
196
197static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
199/*********************************
200* zswap entry functions
201**********************************/
202static struct kmem_cache *zswap_entry_cache;
203
204static int zswap_entry_cache_create(void)
205{
206	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
207	return zswap_entry_cache == NULL;
208}
209
210static void zswap_entry_cache_destory(void)
 
 
 
 
211{
212	kmem_cache_destroy(zswap_entry_cache);
 
213}
214
215static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
216{
217	struct zswap_entry *entry;
218	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
219	if (!entry)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220		return NULL;
221	entry->refcount = 1;
222	RB_CLEAR_NODE(&entry->rbnode);
223	return entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224}
225
226static void zswap_entry_cache_free(struct zswap_entry *entry)
227{
228	kmem_cache_free(zswap_entry_cache, entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229}
230
231/*********************************
232* rbtree functions
233**********************************/
234static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
235{
236	struct rb_node *node = root->rb_node;
237	struct zswap_entry *entry;
 
238
239	while (node) {
240		entry = rb_entry(node, struct zswap_entry, rbnode);
241		if (entry->offset > offset)
 
242			node = node->rb_left;
243		else if (entry->offset < offset)
244			node = node->rb_right;
245		else
246			return entry;
247	}
248	return NULL;
249}
250
251/*
252 * In the case that a entry with the same offset is found, a pointer to
253 * the existing entry is stored in dupentry and the function returns -EEXIST
254 */
255static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
256			struct zswap_entry **dupentry)
257{
258	struct rb_node **link = &root->rb_node, *parent = NULL;
259	struct zswap_entry *myentry;
 
260
261	while (*link) {
262		parent = *link;
263		myentry = rb_entry(parent, struct zswap_entry, rbnode);
264		if (myentry->offset > entry->offset)
 
265			link = &(*link)->rb_left;
266		else if (myentry->offset < entry->offset)
267			link = &(*link)->rb_right;
268		else {
269			*dupentry = myentry;
270			return -EEXIST;
271		}
272	}
273	rb_link_node(&entry->rbnode, parent, link);
274	rb_insert_color(&entry->rbnode, root);
275	return 0;
276}
277
278static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
279{
280	if (!RB_EMPTY_NODE(&entry->rbnode)) {
281		rb_erase(&entry->rbnode, root);
282		RB_CLEAR_NODE(&entry->rbnode);
283	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284}
285
286/*
287 * Carries out the common pattern of freeing and entry's zbud allocation,
288 * freeing the entry itself, and decrementing the number of stored pages.
289 */
290static void zswap_free_entry(struct zswap_entry *entry)
291{
292	zbud_free(zswap_pool, entry->handle);
 
 
 
 
 
 
 
 
 
 
 
293	zswap_entry_cache_free(entry);
294	atomic_dec(&zswap_stored_pages);
295	zswap_pool_pages = zbud_get_pool_size(zswap_pool);
296}
297
298/* caller must hold the tree lock */
299static void zswap_entry_get(struct zswap_entry *entry)
 
 
 
 
300{
301	entry->refcount++;
 
302}
303
304/* caller must hold the tree lock
305* remove from the tree and free it, if nobody reference the entry
306*/
307static void zswap_entry_put(struct zswap_tree *tree,
308			struct zswap_entry *entry)
309{
310	int refcount = --entry->refcount;
 
 
 
 
311
312	BUG_ON(refcount < 0);
313	if (refcount == 0) {
314		zswap_rb_erase(&tree->rbroot, entry);
315		zswap_free_entry(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316	}
317}
318
319/* caller must hold the tree lock */
320static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
321				pgoff_t offset)
322{
323	struct zswap_entry *entry = NULL;
 
 
 
324
325	entry = zswap_rb_search(root, offset);
326	if (entry)
327		zswap_entry_get(entry);
328
329	return entry;
 
 
 
 
330}
331
332/*********************************
333* per-cpu code
334**********************************/
335static DEFINE_PER_CPU(u8 *, zswap_dstmem);
336
337static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu)
338{
339	struct crypto_comp *tfm;
340	u8 *dst;
341
342	switch (action) {
343	case CPU_UP_PREPARE:
344		tfm = crypto_alloc_comp(zswap_compressor, 0, 0);
345		if (IS_ERR(tfm)) {
346			pr_err("can't allocate compressor transform\n");
347			return NOTIFY_BAD;
348		}
349		*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm;
350		dst = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
351		if (!dst) {
352			pr_err("can't allocate compressor buffer\n");
353			crypto_free_comp(tfm);
354			*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
355			return NOTIFY_BAD;
356		}
357		per_cpu(zswap_dstmem, cpu) = dst;
358		break;
359	case CPU_DEAD:
360	case CPU_UP_CANCELED:
361		tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu);
362		if (tfm) {
363			crypto_free_comp(tfm);
364			*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
365		}
366		dst = per_cpu(zswap_dstmem, cpu);
367		kfree(dst);
368		per_cpu(zswap_dstmem, cpu) = NULL;
369		break;
370	default:
371		break;
372	}
373	return NOTIFY_OK;
 
374}
375
376static int zswap_cpu_notifier(struct notifier_block *nb,
377				unsigned long action, void *pcpu)
378{
379	unsigned long cpu = (unsigned long)pcpu;
380	return __zswap_cpu_notifier(action, cpu);
381}
 
 
 
 
 
 
382
383static struct notifier_block zswap_cpu_notifier_block = {
384	.notifier_call = zswap_cpu_notifier
385};
386
387static int zswap_cpu_init(void)
388{
389	unsigned long cpu;
390
391	cpu_notifier_register_begin();
392	for_each_online_cpu(cpu)
393		if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
394			goto cleanup;
395	__register_cpu_notifier(&zswap_cpu_notifier_block);
396	cpu_notifier_register_done();
397	return 0;
398
399cleanup:
400	for_each_online_cpu(cpu)
401		__zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
402	cpu_notifier_register_done();
403	return -ENOMEM;
404}
 
405
406/*********************************
407* helpers
408**********************************/
409static bool zswap_is_full(void)
410{
411	return totalram_pages * zswap_max_pool_percent / 100 <
412		zswap_pool_pages;
413}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414
415/*********************************
416* writeback code
417**********************************/
418/* return enum for zswap_get_swap_cache_page */
419enum zswap_get_swap_ret {
420	ZSWAP_SWAPCACHE_NEW,
421	ZSWAP_SWAPCACHE_EXIST,
422	ZSWAP_SWAPCACHE_FAIL,
423};
424
425/*
426 * zswap_get_swap_cache_page
427 *
428 * This is an adaption of read_swap_cache_async()
429 *
430 * This function tries to find a page with the given swap entry
431 * in the swapper_space address space (the swap cache).  If the page
432 * is found, it is returned in retpage.  Otherwise, a page is allocated,
433 * added to the swap cache, and returned in retpage.
434 *
435 * If success, the swap cache page is returned in retpage
436 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
437 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
438 *     the new page is added to swapcache and locked
439 * Returns ZSWAP_SWAPCACHE_FAIL on error
440 */
441static int zswap_get_swap_cache_page(swp_entry_t entry,
442				struct page **retpage)
443{
444	struct page *found_page, *new_page = NULL;
445	struct address_space *swapper_space = swap_address_space(entry);
446	int err;
447
448	*retpage = NULL;
449	do {
450		/*
451		 * First check the swap cache.  Since this is normally
452		 * called after lookup_swap_cache() failed, re-calling
453		 * that would confuse statistics.
454		 */
455		found_page = find_get_page(swapper_space, entry.val);
456		if (found_page)
457			break;
458
459		/*
460		 * Get a new page to read into from swap.
461		 */
462		if (!new_page) {
463			new_page = alloc_page(GFP_KERNEL);
464			if (!new_page)
465				break; /* Out of memory */
466		}
467
468		/*
469		 * call radix_tree_preload() while we can wait.
470		 */
471		err = radix_tree_preload(GFP_KERNEL);
472		if (err)
473			break;
474
475		/*
476		 * Swap entry may have been freed since our caller observed it.
477		 */
478		err = swapcache_prepare(entry);
479		if (err == -EEXIST) { /* seems racy */
480			radix_tree_preload_end();
481			continue;
482		}
483		if (err) { /* swp entry is obsolete ? */
484			radix_tree_preload_end();
485			break;
486		}
487
488		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
489		__set_page_locked(new_page);
490		SetPageSwapBacked(new_page);
491		err = __add_to_swap_cache(new_page, entry);
492		if (likely(!err)) {
493			radix_tree_preload_end();
494			lru_cache_add_anon(new_page);
495			*retpage = new_page;
496			return ZSWAP_SWAPCACHE_NEW;
497		}
498		radix_tree_preload_end();
499		ClearPageSwapBacked(new_page);
500		__clear_page_locked(new_page);
501		/*
502		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
503		 * clear SWAP_HAS_CACHE flag.
504		 */
505		swapcache_free(entry, NULL);
506	} while (err != -ENOMEM);
 
 
 
 
 
507
508	if (new_page)
509		page_cache_release(new_page);
510	if (!found_page)
511		return ZSWAP_SWAPCACHE_FAIL;
512	*retpage = found_page;
513	return ZSWAP_SWAPCACHE_EXIST;
514}
515
 
 
 
516/*
517 * Attempts to free an entry by adding a page to the swap cache,
518 * decompressing the entry data into the page, and issuing a
519 * bio write to write the page back to the swap device.
520 *
521 * This can be thought of as a "resumed writeback" of the page
522 * to the swap device.  We are basically resuming the same swap
523 * writeback path that was intercepted with the frontswap_store()
524 * in the first place.  After the page has been decompressed into
525 * the swap cache, the compressed version stored by zswap can be
526 * freed.
527 */
528static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
 
529{
530	struct zswap_header *zhdr;
531	swp_entry_t swpentry;
532	struct zswap_tree *tree;
533	pgoff_t offset;
534	struct zswap_entry *entry;
535	struct page *page;
536	u8 *src, *dst;
537	unsigned int dlen;
538	int ret;
539	struct writeback_control wbc = {
540		.sync_mode = WB_SYNC_NONE,
541	};
542
543	/* extract swpentry from data */
544	zhdr = zbud_map(pool, handle);
545	swpentry = zhdr->swpentry; /* here */
546	zbud_unmap(pool, handle);
547	tree = zswap_trees[swp_type(swpentry)];
548	offset = swp_offset(swpentry);
549
550	/* find and ref zswap entry */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551	spin_lock(&tree->lock);
552	entry = zswap_entry_find_get(&tree->rbroot, offset);
553	if (!entry) {
554		/* entry was invalidated */
555		spin_unlock(&tree->lock);
556		return 0;
 
 
 
557	}
 
 
 
558	spin_unlock(&tree->lock);
559	BUG_ON(offset != entry->offset);
560
561	/* try to allocate swap cache page */
562	switch (zswap_get_swap_cache_page(swpentry, &page)) {
563	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
564		ret = -ENOMEM;
565		goto fail;
566
567	case ZSWAP_SWAPCACHE_EXIST:
568		/* page is already in the swap cache, ignore for now */
569		page_cache_release(page);
570		ret = -EEXIST;
571		goto fail;
572
573	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
574		/* decompress */
575		dlen = PAGE_SIZE;
576		src = (u8 *)zbud_map(zswap_pool, entry->handle) +
577			sizeof(struct zswap_header);
578		dst = kmap_atomic(page);
579		ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
580				entry->length, dst, &dlen);
581		kunmap_atomic(dst);
582		zbud_unmap(zswap_pool, entry->handle);
583		BUG_ON(ret);
584		BUG_ON(dlen != PAGE_SIZE);
585
586		/* page is up to date */
587		SetPageUptodate(page);
588	}
 
589
590	/* move it to the tail of the inactive list after end_writeback */
591	SetPageReclaim(page);
592
593	/* start writeback */
594	__swap_writepage(page, &wbc, end_swap_bio_write);
595	page_cache_release(page);
596	zswap_written_back_pages++;
597
598	spin_lock(&tree->lock);
599	/* drop local reference */
600	zswap_entry_put(tree, entry);
 
 
 
 
 
 
 
 
 
 
 
601
602	/*
603	* There are two possible situations for entry here:
604	* (1) refcount is 1(normal case),  entry is valid and on the tree
605	* (2) refcount is 0, entry is freed and not on the tree
606	*     because invalidate happened during writeback
607	*  search the tree and free the entry if find entry
608	*/
609	if (entry == zswap_rb_search(&tree->rbroot, offset))
610		zswap_entry_put(tree, entry);
611	spin_unlock(&tree->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
612
613	goto end;
 
 
 
 
 
614
615	/*
616	* if we get here due to ZSWAP_SWAPCACHE_EXIST
617	* a load may happening concurrently
618	* it is safe and okay to not free the entry
619	* if we free the entry in the following put
620	* it it either okay to return !0
621	*/
622fail:
623	spin_lock(&tree->lock);
624	zswap_entry_put(tree, entry);
625	spin_unlock(&tree->lock);
626
627end:
 
 
 
 
 
 
 
 
 
 
 
 
 
628	return ret;
629}
630
631/*********************************
632* frontswap hooks
633**********************************/
634/* attempts to compress and store an single page */
635static int zswap_frontswap_store(unsigned type, pgoff_t offset,
636				struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
637{
638	struct zswap_tree *tree = zswap_trees[type];
 
 
639	struct zswap_entry *entry, *dupentry;
640	int ret;
641	unsigned int dlen = PAGE_SIZE, len;
642	unsigned long handle;
643	char *buf;
644	u8 *src, *dst;
645	struct zswap_header *zhdr;
646
647	if (!tree) {
648		ret = -ENODEV;
649		goto reject;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
650	}
651
652	/* reclaim space if needed */
653	if (zswap_is_full()) {
654		zswap_pool_limit_hit++;
655		if (zbud_reclaim_page(zswap_pool, 8)) {
656			zswap_reject_reclaim_fail++;
657			ret = -ENOMEM;
658			goto reject;
659		}
 
 
 
 
660	}
661
662	/* allocate entry */
663	entry = zswap_entry_cache_alloc(GFP_KERNEL);
664	if (!entry) {
665		zswap_reject_kmemcache_fail++;
666		ret = -ENOMEM;
667		goto reject;
668	}
669
670	/* compress */
671	dst = get_cpu_var(zswap_dstmem);
672	src = kmap_atomic(page);
673	ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen);
674	kunmap_atomic(src);
675	if (ret) {
676		ret = -EINVAL;
677		goto freepage;
 
 
 
 
 
678	}
679
680	/* store */
681	len = dlen + sizeof(struct zswap_header);
682	ret = zbud_alloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN,
683		&handle);
684	if (ret == -ENOSPC) {
685		zswap_reject_compress_poor++;
686		goto freepage;
687	}
688	if (ret) {
689		zswap_reject_alloc_fail++;
 
690		goto freepage;
 
 
 
 
 
 
 
 
691	}
692	zhdr = zbud_map(zswap_pool, handle);
693	zhdr->swpentry = swp_entry(type, offset);
694	buf = (u8 *)(zhdr + 1);
695	memcpy(buf, dst, dlen);
696	zbud_unmap(zswap_pool, handle);
697	put_cpu_var(zswap_dstmem);
698
699	/* populate entry */
700	entry->offset = offset;
701	entry->handle = handle;
702	entry->length = dlen;
 
 
 
 
 
 
 
703
704	/* map */
705	spin_lock(&tree->lock);
706	do {
707		ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
708		if (ret == -EEXIST) {
709			zswap_duplicate_entry++;
710			/* remove from rbtree */
711			zswap_rb_erase(&tree->rbroot, dupentry);
712			zswap_entry_put(tree, dupentry);
713		}
714	} while (ret == -EEXIST);
 
 
 
 
715	spin_unlock(&tree->lock);
716
717	/* update stats */
718	atomic_inc(&zswap_stored_pages);
719	zswap_pool_pages = zbud_get_pool_size(zswap_pool);
 
720
721	return 0;
722
 
 
723freepage:
724	put_cpu_var(zswap_dstmem);
725	zswap_entry_cache_free(entry);
726reject:
727	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728}
729
730/*
731 * returns 0 if the page was successfully decompressed
732 * return -1 on entry not found or error
733*/
734static int zswap_frontswap_load(unsigned type, pgoff_t offset,
735				struct page *page)
736{
737	struct zswap_tree *tree = zswap_trees[type];
 
 
 
 
738	struct zswap_entry *entry;
739	u8 *src, *dst;
740	unsigned int dlen;
741	int ret;
742
743	/* find */
744	spin_lock(&tree->lock);
745	entry = zswap_entry_find_get(&tree->rbroot, offset);
746	if (!entry) {
747		/* entry was written back */
748		spin_unlock(&tree->lock);
749		return -1;
750	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
751	spin_unlock(&tree->lock);
752
753	/* decompress */
754	dlen = PAGE_SIZE;
755	src = (u8 *)zbud_map(zswap_pool, entry->handle) +
756			sizeof(struct zswap_header);
757	dst = kmap_atomic(page);
758	ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length,
759		dst, &dlen);
760	kunmap_atomic(dst);
761	zbud_unmap(zswap_pool, entry->handle);
762	BUG_ON(ret);
763
764	spin_lock(&tree->lock);
765	zswap_entry_put(tree, entry);
766	spin_unlock(&tree->lock);
767
768	return 0;
 
 
 
 
 
769}
770
771/* frees an entry in zswap */
772static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
773{
774	struct zswap_tree *tree = zswap_trees[type];
 
775	struct zswap_entry *entry;
776
777	/* find */
778	spin_lock(&tree->lock);
779	entry = zswap_rb_search(&tree->rbroot, offset);
780	if (!entry) {
781		/* entry was written back */
782		spin_unlock(&tree->lock);
783		return;
784	}
785
786	/* remove from rbtree */
787	zswap_rb_erase(&tree->rbroot, entry);
788
789	/* drop the initial reference from entry creation */
790	zswap_entry_put(tree, entry);
791
792	spin_unlock(&tree->lock);
793}
794
795/* frees all zswap entries for the given swap type */
796static void zswap_frontswap_invalidate_area(unsigned type)
797{
798	struct zswap_tree *tree = zswap_trees[type];
799	struct zswap_entry *entry, *n;
800
801	if (!tree)
802		return;
 
 
 
 
803
804	/* walk the tree and free everything */
805	spin_lock(&tree->lock);
806	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
807		zswap_free_entry(entry);
808	tree->rbroot = RB_ROOT;
809	spin_unlock(&tree->lock);
810	kfree(tree);
811	zswap_trees[type] = NULL;
812}
813
814static struct zbud_ops zswap_zbud_ops = {
815	.evict = zswap_writeback_entry
816};
 
817
818static void zswap_frontswap_init(unsigned type)
819{
820	struct zswap_tree *tree;
 
821
822	tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
823	if (!tree) {
824		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
825		return;
826	}
827
828	tree->rbroot = RB_ROOT;
829	spin_lock_init(&tree->lock);
830	zswap_trees[type] = tree;
831}
832
833static struct frontswap_ops zswap_frontswap_ops = {
834	.store = zswap_frontswap_store,
835	.load = zswap_frontswap_load,
836	.invalidate_page = zswap_frontswap_invalidate_page,
837	.invalidate_area = zswap_frontswap_invalidate_area,
838	.init = zswap_frontswap_init
839};
840
841/*********************************
842* debugfs functions
843**********************************/
844#ifdef CONFIG_DEBUG_FS
845#include <linux/debugfs.h>
846
847static struct dentry *zswap_debugfs_root;
848
849static int __init zswap_debugfs_init(void)
850{
851	if (!debugfs_initialized())
852		return -ENODEV;
853
854	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
855	if (!zswap_debugfs_root)
856		return -ENOMEM;
857
858	debugfs_create_u64("pool_limit_hit", S_IRUGO,
859			zswap_debugfs_root, &zswap_pool_limit_hit);
860	debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
861			zswap_debugfs_root, &zswap_reject_reclaim_fail);
862	debugfs_create_u64("reject_alloc_fail", S_IRUGO,
863			zswap_debugfs_root, &zswap_reject_alloc_fail);
864	debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
865			zswap_debugfs_root, &zswap_reject_kmemcache_fail);
866	debugfs_create_u64("reject_compress_poor", S_IRUGO,
867			zswap_debugfs_root, &zswap_reject_compress_poor);
868	debugfs_create_u64("written_back_pages", S_IRUGO,
869			zswap_debugfs_root, &zswap_written_back_pages);
870	debugfs_create_u64("duplicate_entry", S_IRUGO,
871			zswap_debugfs_root, &zswap_duplicate_entry);
872	debugfs_create_u64("pool_pages", S_IRUGO,
873			zswap_debugfs_root, &zswap_pool_pages);
874	debugfs_create_atomic_t("stored_pages", S_IRUGO,
875			zswap_debugfs_root, &zswap_stored_pages);
 
 
876
877	return 0;
878}
879
880static void __exit zswap_debugfs_exit(void)
881{
882	debugfs_remove_recursive(zswap_debugfs_root);
883}
884#else
885static int __init zswap_debugfs_init(void)
886{
887	return 0;
888}
889
890static void __exit zswap_debugfs_exit(void) { }
891#endif
892
893/*********************************
894* module init and exit
895**********************************/
896static int __init init_zswap(void)
897{
898	if (!zswap_enabled)
899		return 0;
900
901	pr_info("loading zswap\n");
902
903	zswap_pool = zbud_create_pool(GFP_KERNEL, &zswap_zbud_ops);
904	if (!zswap_pool) {
905		pr_err("zbud pool creation failed\n");
906		goto error;
907	}
908
909	if (zswap_entry_cache_create()) {
 
910		pr_err("entry cache creation failed\n");
911		goto cachefail;
912	}
913	if (zswap_comp_init()) {
914		pr_err("compressor initialization failed\n");
915		goto compfail;
916	}
917	if (zswap_cpu_init()) {
918		pr_err("per-cpu initialization failed\n");
919		goto pcpufail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
920	}
921
922	frontswap_register_ops(&zswap_frontswap_ops);
923	if (zswap_debugfs_init())
924		pr_warn("debugfs initialization failed\n");
 
925	return 0;
926pcpufail:
927	zswap_comp_exit();
928compfail:
929	zswap_entry_cache_destory();
930cachefail:
931	zbud_destroy_pool(zswap_pool);
932error:
 
 
 
 
 
 
933	return -ENOMEM;
934}
 
 
 
 
 
 
 
935/* must be late so crypto has time to come up */
936late_initcall(init_zswap);
937
938MODULE_LICENSE("GPL");
939MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>");
940MODULE_DESCRIPTION("Compressed cache for swap pages");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * zswap.c - zswap driver file
   4 *
   5 * zswap is a cache that takes pages that are in the process
   6 * of being swapped out and attempts to compress and store them in a
   7 * RAM-based memory pool.  This can result in a significant I/O reduction on
   8 * the swap device and, in the case where decompressing from RAM is faster
   9 * than reading from the swap device, can also improve workload performance.
  10 *
  11 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
 
 
 
 
 
 
 
 
 
 
  12*/
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/cpu.h>
  18#include <linux/highmem.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <linux/types.h>
  22#include <linux/atomic.h>
 
  23#include <linux/rbtree.h>
  24#include <linux/swap.h>
  25#include <linux/crypto.h>
  26#include <linux/scatterlist.h>
  27#include <linux/mempolicy.h>
  28#include <linux/mempool.h>
  29#include <linux/zpool.h>
  30#include <crypto/acompress.h>
  31#include <linux/zswap.h>
  32#include <linux/mm_types.h>
  33#include <linux/page-flags.h>
  34#include <linux/swapops.h>
  35#include <linux/writeback.h>
  36#include <linux/pagemap.h>
  37#include <linux/workqueue.h>
  38#include <linux/list_lru.h>
  39
  40#include "swap.h"
  41#include "internal.h"
  42
  43/*********************************
  44* statistics
  45**********************************/
  46/* Total bytes used by the compressed storage */
  47u64 zswap_pool_total_size;
  48/* The number of compressed pages currently stored in zswap */
  49atomic_t zswap_stored_pages = ATOMIC_INIT(0);
  50/* The number of same-value filled pages currently stored in zswap */
  51static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
  52
  53/*
  54 * The statistics below are not protected from concurrent access for
  55 * performance reasons so they may not be a 100% accurate.  However,
  56 * they do provide useful information on roughly how many times a
  57 * certain event is occurring.
  58*/
  59
  60/* Pool limit was hit (see zswap_max_pool_percent) */
  61static u64 zswap_pool_limit_hit;
  62/* Pages written back when pool limit was reached */
  63static u64 zswap_written_back_pages;
  64/* Store failed due to a reclaim failure after pool limit was reached */
  65static u64 zswap_reject_reclaim_fail;
  66/* Store failed due to compression algorithm failure */
  67static u64 zswap_reject_compress_fail;
  68/* Compressed page was too big for the allocator to (optimally) store */
  69static u64 zswap_reject_compress_poor;
  70/* Store failed because underlying allocator could not get memory */
  71static u64 zswap_reject_alloc_fail;
  72/* Store failed because the entry metadata could not be allocated (rare) */
  73static u64 zswap_reject_kmemcache_fail;
  74
  75/* Shrinker work queue */
  76static struct workqueue_struct *shrink_wq;
  77/* Pool limit was hit, we need to calm down */
  78static bool zswap_pool_reached_full;
  79
  80/*********************************
  81* tunables
  82**********************************/
 
 
 
 
 
 
 
 
  83
  84#define ZSWAP_PARAM_UNSET ""
 
 
 
  85
  86static int zswap_setup(void);
 
  87
  88/* Enable/disable zswap */
  89static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
  90static int zswap_enabled_param_set(const char *,
  91				   const struct kernel_param *);
  92static const struct kernel_param_ops zswap_enabled_param_ops = {
  93	.set =		zswap_enabled_param_set,
  94	.get =		param_get_bool,
  95};
  96module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
  97
  98/* Crypto compressor to use */
  99static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
 100static int zswap_compressor_param_set(const char *,
 101				      const struct kernel_param *);
 102static const struct kernel_param_ops zswap_compressor_param_ops = {
 103	.set =		zswap_compressor_param_set,
 104	.get =		param_get_charp,
 105	.free =		param_free_charp,
 106};
 107module_param_cb(compressor, &zswap_compressor_param_ops,
 108		&zswap_compressor, 0644);
 109
 110/* Compressed storage zpool to use */
 111static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
 112static int zswap_zpool_param_set(const char *, const struct kernel_param *);
 113static const struct kernel_param_ops zswap_zpool_param_ops = {
 114	.set =		zswap_zpool_param_set,
 115	.get =		param_get_charp,
 116	.free =		param_free_charp,
 117};
 118module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
 119
 120/* The maximum percentage of memory that the compressed pool can occupy */
 121static unsigned int zswap_max_pool_percent = 20;
 122module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
 
 
 
 
 
 
 
 
 123
 124/* The threshold for accepting new pages after the max_pool_percent was hit */
 125static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
 126module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
 127		   uint, 0644);
 128
 129/*
 130 * Enable/disable handling same-value filled pages (enabled by default).
 131 * If disabled every page is considered non-same-value filled.
 132 */
 133static bool zswap_same_filled_pages_enabled = true;
 134module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
 135		   bool, 0644);
 136
 137/* Enable/disable handling non-same-value filled pages (enabled by default) */
 138static bool zswap_non_same_filled_pages_enabled = true;
 139module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
 140		   bool, 0644);
 141
 142/* Number of zpools in zswap_pool (empirically determined for scalability) */
 143#define ZSWAP_NR_ZPOOLS 32
 144
 145/* Enable/disable memory pressure-based shrinker. */
 146static bool zswap_shrinker_enabled = IS_ENABLED(
 147		CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
 148module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
 149
 150bool is_zswap_enabled(void)
 151{
 152	return zswap_enabled;
 
 
 153}
 154
 155/*********************************
 156* data structures
 157**********************************/
 158
 159struct crypto_acomp_ctx {
 160	struct crypto_acomp *acomp;
 161	struct acomp_req *req;
 162	struct crypto_wait wait;
 163	u8 *buffer;
 164	struct mutex mutex;
 165	bool is_sleepable;
 166};
 167
 168/*
 169 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
 170 * The only case where lru_lock is not acquired while holding tree.lock is
 171 * when a zswap_entry is taken off the lru for writeback, in that case it
 172 * needs to be verified that it's still valid in the tree.
 173 */
 174struct zswap_pool {
 175	struct zpool *zpools[ZSWAP_NR_ZPOOLS];
 176	struct crypto_acomp_ctx __percpu *acomp_ctx;
 177	struct percpu_ref ref;
 178	struct list_head list;
 179	struct work_struct release_work;
 180	struct hlist_node node;
 181	char tfm_name[CRYPTO_MAX_ALG_NAME];
 182};
 183
 184/* Global LRU lists shared by all zswap pools. */
 185static struct list_lru zswap_list_lru;
 186/* counter of pages stored in all zswap pools. */
 187static atomic_t zswap_nr_stored = ATOMIC_INIT(0);
 188
 189/* The lock protects zswap_next_shrink updates. */
 190static DEFINE_SPINLOCK(zswap_shrink_lock);
 191static struct mem_cgroup *zswap_next_shrink;
 192static struct work_struct zswap_shrink_work;
 193static struct shrinker *zswap_shrinker;
 194
 195/*
 196 * struct zswap_entry
 197 *
 198 * This structure contains the metadata for tracking a single compressed
 199 * page within zswap.
 200 *
 201 * rbnode - links the entry into red-black tree for the appropriate swap type
 202 * swpentry - associated swap entry, the offset indexes into the red-black tree
 
 
 
 
 
 
 
 203 * length - the length in bytes of the compressed page data.  Needed during
 204 *          decompression. For a same value filled page length is 0, and both
 205 *          pool and lru are invalid and must be ignored.
 206 * pool - the zswap_pool the entry's data is in
 207 * handle - zpool allocation handle that stores the compressed page data
 208 * value - value of the same-value filled pages which have same content
 209 * objcg - the obj_cgroup that the compressed memory is charged to
 210 * lru - handle to the pool's lru used to evict pages.
 211 */
 212struct zswap_entry {
 213	struct rb_node rbnode;
 
 
 
 
 
 
 
 214	swp_entry_t swpentry;
 215	unsigned int length;
 216	struct zswap_pool *pool;
 217	union {
 218		unsigned long handle;
 219		unsigned long value;
 220	};
 221	struct obj_cgroup *objcg;
 222	struct list_head lru;
 223};
 224
 
 
 
 
 
 225struct zswap_tree {
 226	struct rb_root rbroot;
 227	spinlock_t lock;
 228};
 229
 230static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 231static unsigned int nr_zswap_trees[MAX_SWAPFILES];
 232
 233/* RCU-protected iteration */
 234static LIST_HEAD(zswap_pools);
 235/* protects zswap_pools list modification */
 236static DEFINE_SPINLOCK(zswap_pools_lock);
 237/* pool counter to provide unique names to zpool */
 238static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 239
 240enum zswap_init_type {
 241	ZSWAP_UNINIT,
 242	ZSWAP_INIT_SUCCEED,
 243	ZSWAP_INIT_FAILED
 244};
 245
 246static enum zswap_init_type zswap_init_state;
 247
 248/* used to ensure the integrity of initialization */
 249static DEFINE_MUTEX(zswap_init_lock);
 250
 251/* init completed, but couldn't create the initial pool */
 252static bool zswap_has_pool;
 253
 254/*********************************
 255* helpers and fwd declarations
 256**********************************/
 
 257
 258static inline struct zswap_tree *swap_zswap_tree(swp_entry_t swp)
 259{
 260	return &zswap_trees[swp_type(swp)][swp_offset(swp)
 261		>> SWAP_ADDRESS_SPACE_SHIFT];
 262}
 263
 264#define zswap_pool_debug(msg, p)				\
 265	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
 266		 zpool_get_type((p)->zpools[0]))
 267
 268static bool zswap_is_full(void)
 269{
 270	return totalram_pages() * zswap_max_pool_percent / 100 <
 271			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 272}
 273
 274static bool zswap_can_accept(void)
 275{
 276	return totalram_pages() * zswap_accept_thr_percent / 100 *
 277				zswap_max_pool_percent / 100 >
 278			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 279}
 280
 281static u64 get_zswap_pool_size(struct zswap_pool *pool)
 282{
 283	u64 pool_size = 0;
 284	int i;
 285
 286	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
 287		pool_size += zpool_get_total_size(pool->zpools[i]);
 288
 289	return pool_size;
 290}
 291
 292static void zswap_update_total_size(void)
 293{
 294	struct zswap_pool *pool;
 295	u64 total = 0;
 296
 297	rcu_read_lock();
 298
 299	list_for_each_entry_rcu(pool, &zswap_pools, list)
 300		total += get_zswap_pool_size(pool);
 301
 302	rcu_read_unlock();
 303
 304	zswap_pool_total_size = total;
 305}
 306
 307/*********************************
 308* pool functions
 309**********************************/
 310static void __zswap_pool_empty(struct percpu_ref *ref);
 311
 312static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 313{
 314	int i;
 315	struct zswap_pool *pool;
 316	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
 317	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 318	int ret;
 319
 320	if (!zswap_has_pool) {
 321		/* if either are unset, pool initialization failed, and we
 322		 * need both params to be set correctly before trying to
 323		 * create a pool.
 324		 */
 325		if (!strcmp(type, ZSWAP_PARAM_UNSET))
 326			return NULL;
 327		if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
 328			return NULL;
 329	}
 330
 331	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 332	if (!pool)
 333		return NULL;
 334
 335	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
 336		/* unique name for each pool specifically required by zsmalloc */
 337		snprintf(name, 38, "zswap%x",
 338			 atomic_inc_return(&zswap_pools_count));
 339
 340		pool->zpools[i] = zpool_create_pool(type, name, gfp);
 341		if (!pool->zpools[i]) {
 342			pr_err("%s zpool not available\n", type);
 343			goto error;
 344		}
 345	}
 346	pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
 347
 348	strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
 349
 350	pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
 351	if (!pool->acomp_ctx) {
 352		pr_err("percpu alloc failed\n");
 353		goto error;
 354	}
 355
 356	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
 357				       &pool->node);
 358	if (ret)
 359		goto error;
 360
 361	/* being the current pool takes 1 ref; this func expects the
 362	 * caller to always add the new pool as the current pool
 363	 */
 364	ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
 365			      PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
 366	if (ret)
 367		goto ref_fail;
 368	INIT_LIST_HEAD(&pool->list);
 369
 370	zswap_pool_debug("created", pool);
 371
 372	return pool;
 373
 374ref_fail:
 375	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
 376error:
 377	if (pool->acomp_ctx)
 378		free_percpu(pool->acomp_ctx);
 379	while (i--)
 380		zpool_destroy_pool(pool->zpools[i]);
 381	kfree(pool);
 382	return NULL;
 383}
 384
 385static struct zswap_pool *__zswap_pool_create_fallback(void)
 386{
 387	bool has_comp, has_zpool;
 388
 389	has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
 390	if (!has_comp && strcmp(zswap_compressor,
 391				CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
 392		pr_err("compressor %s not available, using default %s\n",
 393		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
 394		param_free_charp(&zswap_compressor);
 395		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
 396		has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
 397	}
 398	if (!has_comp) {
 399		pr_err("default compressor %s not available\n",
 400		       zswap_compressor);
 401		param_free_charp(&zswap_compressor);
 402		zswap_compressor = ZSWAP_PARAM_UNSET;
 403	}
 404
 405	has_zpool = zpool_has_pool(zswap_zpool_type);
 406	if (!has_zpool && strcmp(zswap_zpool_type,
 407				 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
 408		pr_err("zpool %s not available, using default %s\n",
 409		       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
 410		param_free_charp(&zswap_zpool_type);
 411		zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
 412		has_zpool = zpool_has_pool(zswap_zpool_type);
 413	}
 414	if (!has_zpool) {
 415		pr_err("default zpool %s not available\n",
 416		       zswap_zpool_type);
 417		param_free_charp(&zswap_zpool_type);
 418		zswap_zpool_type = ZSWAP_PARAM_UNSET;
 419	}
 420
 421	if (!has_comp || !has_zpool)
 422		return NULL;
 423
 424	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
 425}
 426
 427static void zswap_pool_destroy(struct zswap_pool *pool)
 428{
 429	int i;
 430
 431	zswap_pool_debug("destroying", pool);
 432
 433	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
 434	free_percpu(pool->acomp_ctx);
 435
 436	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
 437		zpool_destroy_pool(pool->zpools[i]);
 438	kfree(pool);
 439}
 440
 441static void __zswap_pool_release(struct work_struct *work)
 442{
 443	struct zswap_pool *pool = container_of(work, typeof(*pool),
 444						release_work);
 445
 446	synchronize_rcu();
 447
 448	/* nobody should have been able to get a ref... */
 449	WARN_ON(!percpu_ref_is_zero(&pool->ref));
 450	percpu_ref_exit(&pool->ref);
 451
 452	/* pool is now off zswap_pools list and has no references. */
 453	zswap_pool_destroy(pool);
 454}
 455
 456static struct zswap_pool *zswap_pool_current(void);
 457
 458static void __zswap_pool_empty(struct percpu_ref *ref)
 459{
 460	struct zswap_pool *pool;
 461
 462	pool = container_of(ref, typeof(*pool), ref);
 463
 464	spin_lock_bh(&zswap_pools_lock);
 465
 466	WARN_ON(pool == zswap_pool_current());
 467
 468	list_del_rcu(&pool->list);
 469
 470	INIT_WORK(&pool->release_work, __zswap_pool_release);
 471	schedule_work(&pool->release_work);
 472
 473	spin_unlock_bh(&zswap_pools_lock);
 474}
 475
 476static int __must_check zswap_pool_get(struct zswap_pool *pool)
 477{
 478	if (!pool)
 479		return 0;
 480
 481	return percpu_ref_tryget(&pool->ref);
 482}
 483
 484static void zswap_pool_put(struct zswap_pool *pool)
 485{
 486	percpu_ref_put(&pool->ref);
 487}
 488
 489static struct zswap_pool *__zswap_pool_current(void)
 490{
 491	struct zswap_pool *pool;
 492
 493	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
 494	WARN_ONCE(!pool && zswap_has_pool,
 495		  "%s: no page storage pool!\n", __func__);
 496
 497	return pool;
 498}
 499
 500static struct zswap_pool *zswap_pool_current(void)
 501{
 502	assert_spin_locked(&zswap_pools_lock);
 503
 504	return __zswap_pool_current();
 505}
 506
 507static struct zswap_pool *zswap_pool_current_get(void)
 508{
 509	struct zswap_pool *pool;
 510
 511	rcu_read_lock();
 512
 513	pool = __zswap_pool_current();
 514	if (!zswap_pool_get(pool))
 515		pool = NULL;
 516
 517	rcu_read_unlock();
 518
 519	return pool;
 520}
 521
 522/* type and compressor must be null-terminated */
 523static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 524{
 525	struct zswap_pool *pool;
 526
 527	assert_spin_locked(&zswap_pools_lock);
 528
 529	list_for_each_entry_rcu(pool, &zswap_pools, list) {
 530		if (strcmp(pool->tfm_name, compressor))
 531			continue;
 532		/* all zpools share the same type */
 533		if (strcmp(zpool_get_type(pool->zpools[0]), type))
 534			continue;
 535		/* if we can't get it, it's about to be destroyed */
 536		if (!zswap_pool_get(pool))
 537			continue;
 538		return pool;
 539	}
 540
 541	return NULL;
 542}
 543
 544/*********************************
 545* param callbacks
 546**********************************/
 547
 548static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
 549{
 550	/* no change required */
 551	if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
 552		return false;
 553	return true;
 554}
 555
 556/* val must be a null-terminated string */
 557static int __zswap_param_set(const char *val, const struct kernel_param *kp,
 558			     char *type, char *compressor)
 559{
 560	struct zswap_pool *pool, *put_pool = NULL;
 561	char *s = strstrip((char *)val);
 562	int ret = 0;
 563	bool new_pool = false;
 564
 565	mutex_lock(&zswap_init_lock);
 566	switch (zswap_init_state) {
 567	case ZSWAP_UNINIT:
 568		/* if this is load-time (pre-init) param setting,
 569		 * don't create a pool; that's done during init.
 570		 */
 571		ret = param_set_charp(s, kp);
 572		break;
 573	case ZSWAP_INIT_SUCCEED:
 574		new_pool = zswap_pool_changed(s, kp);
 575		break;
 576	case ZSWAP_INIT_FAILED:
 577		pr_err("can't set param, initialization failed\n");
 578		ret = -ENODEV;
 579	}
 580	mutex_unlock(&zswap_init_lock);
 581
 582	/* no need to create a new pool, return directly */
 583	if (!new_pool)
 584		return ret;
 585
 586	if (!type) {
 587		if (!zpool_has_pool(s)) {
 588			pr_err("zpool %s not available\n", s);
 589			return -ENOENT;
 590		}
 591		type = s;
 592	} else if (!compressor) {
 593		if (!crypto_has_acomp(s, 0, 0)) {
 594			pr_err("compressor %s not available\n", s);
 595			return -ENOENT;
 596		}
 597		compressor = s;
 598	} else {
 599		WARN_ON(1);
 600		return -EINVAL;
 601	}
 602
 603	spin_lock_bh(&zswap_pools_lock);
 604
 605	pool = zswap_pool_find_get(type, compressor);
 606	if (pool) {
 607		zswap_pool_debug("using existing", pool);
 608		WARN_ON(pool == zswap_pool_current());
 609		list_del_rcu(&pool->list);
 610	}
 611
 612	spin_unlock_bh(&zswap_pools_lock);
 613
 614	if (!pool)
 615		pool = zswap_pool_create(type, compressor);
 616	else {
 617		/*
 618		 * Restore the initial ref dropped by percpu_ref_kill()
 619		 * when the pool was decommissioned and switch it again
 620		 * to percpu mode.
 621		 */
 622		percpu_ref_resurrect(&pool->ref);
 623
 624		/* Drop the ref from zswap_pool_find_get(). */
 625		zswap_pool_put(pool);
 626	}
 627
 628	if (pool)
 629		ret = param_set_charp(s, kp);
 630	else
 631		ret = -EINVAL;
 632
 633	spin_lock_bh(&zswap_pools_lock);
 634
 635	if (!ret) {
 636		put_pool = zswap_pool_current();
 637		list_add_rcu(&pool->list, &zswap_pools);
 638		zswap_has_pool = true;
 639	} else if (pool) {
 640		/* add the possibly pre-existing pool to the end of the pools
 641		 * list; if it's new (and empty) then it'll be removed and
 642		 * destroyed by the put after we drop the lock
 643		 */
 644		list_add_tail_rcu(&pool->list, &zswap_pools);
 645		put_pool = pool;
 646	}
 647
 648	spin_unlock_bh(&zswap_pools_lock);
 649
 650	if (!zswap_has_pool && !pool) {
 651		/* if initial pool creation failed, and this pool creation also
 652		 * failed, maybe both compressor and zpool params were bad.
 653		 * Allow changing this param, so pool creation will succeed
 654		 * when the other param is changed. We already verified this
 655		 * param is ok in the zpool_has_pool() or crypto_has_acomp()
 656		 * checks above.
 657		 */
 658		ret = param_set_charp(s, kp);
 659	}
 660
 661	/* drop the ref from either the old current pool,
 662	 * or the new pool we failed to add
 663	 */
 664	if (put_pool)
 665		percpu_ref_kill(&put_pool->ref);
 666
 667	return ret;
 668}
 669
 670static int zswap_compressor_param_set(const char *val,
 671				      const struct kernel_param *kp)
 672{
 673	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
 674}
 675
 676static int zswap_zpool_param_set(const char *val,
 677				 const struct kernel_param *kp)
 678{
 679	return __zswap_param_set(val, kp, NULL, zswap_compressor);
 680}
 681
 682static int zswap_enabled_param_set(const char *val,
 683				   const struct kernel_param *kp)
 684{
 685	int ret = -ENODEV;
 686
 687	/* if this is load-time (pre-init) param setting, only set param. */
 688	if (system_state != SYSTEM_RUNNING)
 689		return param_set_bool(val, kp);
 690
 691	mutex_lock(&zswap_init_lock);
 692	switch (zswap_init_state) {
 693	case ZSWAP_UNINIT:
 694		if (zswap_setup())
 695			break;
 696		fallthrough;
 697	case ZSWAP_INIT_SUCCEED:
 698		if (!zswap_has_pool)
 699			pr_err("can't enable, no pool configured\n");
 700		else
 701			ret = param_set_bool(val, kp);
 702		break;
 703	case ZSWAP_INIT_FAILED:
 704		pr_err("can't enable, initialization failed\n");
 705	}
 706	mutex_unlock(&zswap_init_lock);
 707
 708	return ret;
 709}
 710
 711/*********************************
 712* lru functions
 713**********************************/
 714
 715/* should be called under RCU */
 716#ifdef CONFIG_MEMCG
 717static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
 718{
 719	return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
 720}
 721#else
 722static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
 723{
 724	return NULL;
 725}
 726#endif
 727
 728static inline int entry_to_nid(struct zswap_entry *entry)
 729{
 730	return page_to_nid(virt_to_page(entry));
 731}
 732
 733static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
 734{
 735	atomic_long_t *nr_zswap_protected;
 736	unsigned long lru_size, old, new;
 737	int nid = entry_to_nid(entry);
 738	struct mem_cgroup *memcg;
 739	struct lruvec *lruvec;
 740
 741	/*
 742	 * Note that it is safe to use rcu_read_lock() here, even in the face of
 743	 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
 744	 * used in list_lru lookup, only two scenarios are possible:
 745	 *
 746	 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
 747	 *    new entry will be reparented to memcg's parent's list_lru.
 748	 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
 749	 *    new entry will be added directly to memcg's parent's list_lru.
 750	 *
 751	 * Similar reasoning holds for list_lru_del().
 752	 */
 753	rcu_read_lock();
 754	memcg = mem_cgroup_from_entry(entry);
 755	/* will always succeed */
 756	list_lru_add(list_lru, &entry->lru, nid, memcg);
 757
 758	/* Update the protection area */
 759	lru_size = list_lru_count_one(list_lru, nid, memcg);
 760	lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
 761	nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
 762	old = atomic_long_inc_return(nr_zswap_protected);
 763	/*
 764	 * Decay to avoid overflow and adapt to changing workloads.
 765	 * This is based on LRU reclaim cost decaying heuristics.
 766	 */
 767	do {
 768		new = old > lru_size / 4 ? old / 2 : old;
 769	} while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
 770	rcu_read_unlock();
 771}
 772
 773static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
 774{
 775	int nid = entry_to_nid(entry);
 776	struct mem_cgroup *memcg;
 777
 778	rcu_read_lock();
 779	memcg = mem_cgroup_from_entry(entry);
 780	/* will always succeed */
 781	list_lru_del(list_lru, &entry->lru, nid, memcg);
 782	rcu_read_unlock();
 783}
 784
 785void zswap_lruvec_state_init(struct lruvec *lruvec)
 786{
 787	atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
 788}
 789
 790void zswap_folio_swapin(struct folio *folio)
 791{
 792	struct lruvec *lruvec;
 793
 794	if (folio) {
 795		lruvec = folio_lruvec(folio);
 796		atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
 797	}
 798}
 799
 800void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
 801{
 802	/* lock out zswap shrinker walking memcg tree */
 803	spin_lock(&zswap_shrink_lock);
 804	if (zswap_next_shrink == memcg)
 805		zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
 806	spin_unlock(&zswap_shrink_lock);
 807}
 808
 809/*********************************
 810* rbtree functions
 811**********************************/
 812static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
 813{
 814	struct rb_node *node = root->rb_node;
 815	struct zswap_entry *entry;
 816	pgoff_t entry_offset;
 817
 818	while (node) {
 819		entry = rb_entry(node, struct zswap_entry, rbnode);
 820		entry_offset = swp_offset(entry->swpentry);
 821		if (entry_offset > offset)
 822			node = node->rb_left;
 823		else if (entry_offset < offset)
 824			node = node->rb_right;
 825		else
 826			return entry;
 827	}
 828	return NULL;
 829}
 830
 831/*
 832 * In the case that a entry with the same offset is found, a pointer to
 833 * the existing entry is stored in dupentry and the function returns -EEXIST
 834 */
 835static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
 836			struct zswap_entry **dupentry)
 837{
 838	struct rb_node **link = &root->rb_node, *parent = NULL;
 839	struct zswap_entry *myentry;
 840	pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
 841
 842	while (*link) {
 843		parent = *link;
 844		myentry = rb_entry(parent, struct zswap_entry, rbnode);
 845		myentry_offset = swp_offset(myentry->swpentry);
 846		if (myentry_offset > entry_offset)
 847			link = &(*link)->rb_left;
 848		else if (myentry_offset < entry_offset)
 849			link = &(*link)->rb_right;
 850		else {
 851			*dupentry = myentry;
 852			return -EEXIST;
 853		}
 854	}
 855	rb_link_node(&entry->rbnode, parent, link);
 856	rb_insert_color(&entry->rbnode, root);
 857	return 0;
 858}
 859
 860static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
 861{
 862	rb_erase(&entry->rbnode, root);
 863	RB_CLEAR_NODE(&entry->rbnode);
 864}
 865
 866/*********************************
 867* zswap entry functions
 868**********************************/
 869static struct kmem_cache *zswap_entry_cache;
 870
 871static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
 872{
 873	struct zswap_entry *entry;
 874	entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
 875	if (!entry)
 876		return NULL;
 877	RB_CLEAR_NODE(&entry->rbnode);
 878	return entry;
 879}
 880
 881static void zswap_entry_cache_free(struct zswap_entry *entry)
 882{
 883	kmem_cache_free(zswap_entry_cache, entry);
 884}
 885
 886static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
 887{
 888	int i = 0;
 889
 890	if (ZSWAP_NR_ZPOOLS > 1)
 891		i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
 892
 893	return entry->pool->zpools[i];
 894}
 895
 896/*
 897 * Carries out the common pattern of freeing and entry's zpool allocation,
 898 * freeing the entry itself, and decrementing the number of stored pages.
 899 */
 900static void zswap_entry_free(struct zswap_entry *entry)
 901{
 902	if (!entry->length)
 903		atomic_dec(&zswap_same_filled_pages);
 904	else {
 905		zswap_lru_del(&zswap_list_lru, entry);
 906		zpool_free(zswap_find_zpool(entry), entry->handle);
 907		atomic_dec(&zswap_nr_stored);
 908		zswap_pool_put(entry->pool);
 909	}
 910	if (entry->objcg) {
 911		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
 912		obj_cgroup_put(entry->objcg);
 913	}
 914	zswap_entry_cache_free(entry);
 915	atomic_dec(&zswap_stored_pages);
 916	zswap_update_total_size();
 917}
 918
 919/*
 920 * The caller hold the tree lock and search the entry from the tree,
 921 * so it must be on the tree, remove it from the tree and free it.
 922 */
 923static void zswap_invalidate_entry(struct zswap_tree *tree,
 924				   struct zswap_entry *entry)
 925{
 926	zswap_rb_erase(&tree->rbroot, entry);
 927	zswap_entry_free(entry);
 928}
 929
 930/*********************************
 931* compressed storage functions
 932**********************************/
 933static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
 
 934{
 935	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 936	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 937	struct crypto_acomp *acomp;
 938	struct acomp_req *req;
 939	int ret;
 940
 941	mutex_init(&acomp_ctx->mutex);
 942
 943	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
 944	if (!acomp_ctx->buffer)
 945		return -ENOMEM;
 946
 947	acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
 948	if (IS_ERR(acomp)) {
 949		pr_err("could not alloc crypto acomp %s : %ld\n",
 950				pool->tfm_name, PTR_ERR(acomp));
 951		ret = PTR_ERR(acomp);
 952		goto acomp_fail;
 953	}
 954	acomp_ctx->acomp = acomp;
 955	acomp_ctx->is_sleepable = acomp_is_async(acomp);
 956
 957	req = acomp_request_alloc(acomp_ctx->acomp);
 958	if (!req) {
 959		pr_err("could not alloc crypto acomp_request %s\n",
 960		       pool->tfm_name);
 961		ret = -ENOMEM;
 962		goto req_fail;
 963	}
 964	acomp_ctx->req = req;
 965
 966	crypto_init_wait(&acomp_ctx->wait);
 967	/*
 968	 * if the backend of acomp is async zip, crypto_req_done() will wakeup
 969	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
 970	 * won't be called, crypto_wait_req() will return without blocking.
 971	 */
 972	acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 973				   crypto_req_done, &acomp_ctx->wait);
 974
 975	return 0;
 
 
 976
 977req_fail:
 978	crypto_free_acomp(acomp_ctx->acomp);
 979acomp_fail:
 980	kfree(acomp_ctx->buffer);
 981	return ret;
 982}
 983
 984static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
 
 
 
 
 
 985{
 986	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 987	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 988
 989	if (!IS_ERR_OR_NULL(acomp_ctx)) {
 990		if (!IS_ERR_OR_NULL(acomp_ctx->req))
 991			acomp_request_free(acomp_ctx->req);
 992		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
 993			crypto_free_acomp(acomp_ctx->acomp);
 994		kfree(acomp_ctx->buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995	}
 996
 997	return 0;
 998}
 999
1000static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
 
1001{
1002	struct crypto_acomp_ctx *acomp_ctx;
1003	struct scatterlist input, output;
1004	int comp_ret = 0, alloc_ret = 0;
1005	unsigned int dlen = PAGE_SIZE;
1006	unsigned long handle;
1007	struct zpool *zpool;
1008	char *buf;
1009	gfp_t gfp;
1010	u8 *dst;
1011
1012	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
 
 
1013
1014	mutex_lock(&acomp_ctx->mutex);
 
 
1015
1016	dst = acomp_ctx->buffer;
1017	sg_init_table(&input, 1);
1018	sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
 
 
 
 
1019
1020	/*
1021	 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1022	 * and hardware-accelerators may won't check the dst buffer size, so
1023	 * giving the dst buffer with enough length to avoid buffer overflow.
1024	 */
1025	sg_init_one(&output, dst, PAGE_SIZE * 2);
1026	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1027
1028	/*
1029	 * it maybe looks a little bit silly that we send an asynchronous request,
1030	 * then wait for its completion synchronously. This makes the process look
1031	 * synchronous in fact.
1032	 * Theoretically, acomp supports users send multiple acomp requests in one
1033	 * acomp instance, then get those requests done simultaneously. but in this
1034	 * case, zswap actually does store and load page by page, there is no
1035	 * existing method to send the second page before the first page is done
1036	 * in one thread doing zwap.
1037	 * but in different threads running on different cpu, we have different
1038	 * acomp instance, so multiple threads can do (de)compression in parallel.
1039	 */
1040	comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1041	dlen = acomp_ctx->req->dlen;
1042	if (comp_ret)
1043		goto unlock;
1044
1045	zpool = zswap_find_zpool(entry);
1046	gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1047	if (zpool_malloc_support_movable(zpool))
1048		gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1049	alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
1050	if (alloc_ret)
1051		goto unlock;
1052
1053	buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
1054	memcpy(buf, dst, dlen);
1055	zpool_unmap_handle(zpool, handle);
 
 
 
 
 
 
1056
1057	entry->handle = handle;
1058	entry->length = dlen;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059
1060unlock:
1061	if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
1062		zswap_reject_compress_poor++;
1063	else if (comp_ret)
1064		zswap_reject_compress_fail++;
1065	else if (alloc_ret)
1066		zswap_reject_alloc_fail++;
 
 
 
1067
1068	mutex_unlock(&acomp_ctx->mutex);
1069	return comp_ret == 0 && alloc_ret == 0;
1070}
 
 
 
 
 
1071
1072static void zswap_decompress(struct zswap_entry *entry, struct page *page)
1073{
1074	struct zpool *zpool = zswap_find_zpool(entry);
1075	struct scatterlist input, output;
1076	struct crypto_acomp_ctx *acomp_ctx;
1077	u8 *src;
1078
1079	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1080	mutex_lock(&acomp_ctx->mutex);
 
 
 
 
 
 
 
 
 
 
1081
1082	src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1083	/*
1084	 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
1085	 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
1086	 * resort to copying the buffer to a temporary one.
1087	 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
1088	 * such as a kmap address of high memory or even ever a vmap address.
1089	 * However, sg_init_one is only equipped to handle linearly mapped low memory.
1090	 * In such cases, we also must copy the buffer to a temporary and lowmem one.
1091	 */
1092	if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
1093	    !virt_addr_valid(src)) {
1094		memcpy(acomp_ctx->buffer, src, entry->length);
1095		src = acomp_ctx->buffer;
1096		zpool_unmap_handle(zpool, entry->handle);
1097	}
1098
1099	sg_init_one(&input, src, entry->length);
1100	sg_init_table(&output, 1);
1101	sg_set_page(&output, page, PAGE_SIZE, 0);
1102	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1103	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1104	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1105	mutex_unlock(&acomp_ctx->mutex);
1106
1107	if (src != acomp_ctx->buffer)
1108		zpool_unmap_handle(zpool, entry->handle);
 
 
 
 
1109}
1110
1111/*********************************
1112* writeback code
1113**********************************/
1114/*
1115 * Attempts to free an entry by adding a folio to the swap cache,
1116 * decompressing the entry data into the folio, and issuing a
1117 * bio write to write the folio back to the swap device.
1118 *
1119 * This can be thought of as a "resumed writeback" of the folio
1120 * to the swap device.  We are basically resuming the same swap
1121 * writeback path that was intercepted with the zswap_store()
1122 * in the first place.  After the folio has been decompressed into
1123 * the swap cache, the compressed version stored by zswap can be
1124 * freed.
1125 */
1126static int zswap_writeback_entry(struct zswap_entry *entry,
1127				 swp_entry_t swpentry)
1128{
 
 
1129	struct zswap_tree *tree;
1130	struct folio *folio;
1131	struct mempolicy *mpol;
1132	bool folio_was_allocated;
 
 
 
1133	struct writeback_control wbc = {
1134		.sync_mode = WB_SYNC_NONE,
1135	};
1136
1137	/* try to allocate swap cache folio */
1138	mpol = get_task_policy(current);
1139	folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1140				NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1141	if (!folio)
1142		return -ENOMEM;
1143
1144	/*
1145	 * Found an existing folio, we raced with swapin or concurrent
1146	 * shrinker. We generally writeback cold folios from zswap, and
1147	 * swapin means the folio just became hot, so skip this folio.
1148	 * For unlikely concurrent shrinker case, it will be unlinked
1149	 * and freed when invalidated by the concurrent shrinker anyway.
1150	 */
1151	if (!folio_was_allocated) {
1152		folio_put(folio);
1153		return -EEXIST;
1154	}
1155
1156	/*
1157	 * folio is locked, and the swapcache is now secured against
1158	 * concurrent swapping to and from the slot, and concurrent
1159	 * swapoff so we can safely dereference the zswap tree here.
1160	 * Verify that the swap entry hasn't been invalidated and recycled
1161	 * behind our backs, to avoid overwriting a new swap folio with
1162	 * old compressed data. Only when this is successful can the entry
1163	 * be dereferenced.
1164	 */
1165	tree = swap_zswap_tree(swpentry);
1166	spin_lock(&tree->lock);
1167	if (zswap_rb_search(&tree->rbroot, swp_offset(swpentry)) != entry) {
 
 
1168		spin_unlock(&tree->lock);
1169		delete_from_swap_cache(folio);
1170		folio_unlock(folio);
1171		folio_put(folio);
1172		return -ENOMEM;
1173	}
1174
1175	/* Safe to deref entry after the entry is verified above. */
1176	zswap_rb_erase(&tree->rbroot, entry);
1177	spin_unlock(&tree->lock);
 
1178
1179	zswap_decompress(entry, &folio->page);
 
 
 
 
1180
1181	count_vm_event(ZSWPWB);
1182	if (entry->objcg)
1183		count_objcg_event(entry->objcg, ZSWPWB);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184
1185	zswap_entry_free(entry);
1186
1187	/* folio is up to date */
1188	folio_mark_uptodate(folio);
1189
1190	/* move it to the tail of the inactive list after end_writeback */
1191	folio_set_reclaim(folio);
1192
1193	/* start writeback */
1194	__swap_writepage(folio, &wbc);
1195	folio_put(folio);
 
1196
1197	return 0;
1198}
1199
1200/*********************************
1201* shrinker functions
1202**********************************/
1203static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1204				       spinlock_t *lock, void *arg)
1205{
1206	struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1207	bool *encountered_page_in_swapcache = (bool *)arg;
1208	swp_entry_t swpentry;
1209	enum lru_status ret = LRU_REMOVED_RETRY;
1210	int writeback_result;
1211
1212	/*
1213	 * As soon as we drop the LRU lock, the entry can be freed by
1214	 * a concurrent invalidation. This means the following:
1215	 *
1216	 * 1. We extract the swp_entry_t to the stack, allowing
1217	 *    zswap_writeback_entry() to pin the swap entry and
1218	 *    then validate the zwap entry against that swap entry's
1219	 *    tree using pointer value comparison. Only when that
1220	 *    is successful can the entry be dereferenced.
1221	 *
1222	 * 2. Usually, objects are taken off the LRU for reclaim. In
1223	 *    this case this isn't possible, because if reclaim fails
1224	 *    for whatever reason, we have no means of knowing if the
1225	 *    entry is alive to put it back on the LRU.
1226	 *
1227	 *    So rotate it before dropping the lock. If the entry is
1228	 *    written back or invalidated, the free path will unlink
1229	 *    it. For failures, rotation is the right thing as well.
1230	 *
1231	 *    Temporary failures, where the same entry should be tried
1232	 *    again immediately, almost never happen for this shrinker.
1233	 *    We don't do any trylocking; -ENOMEM comes closest,
1234	 *    but that's extremely rare and doesn't happen spuriously
1235	 *    either. Don't bother distinguishing this case.
1236	 */
1237	list_move_tail(item, &l->list);
1238
1239	/*
1240	 * Once the lru lock is dropped, the entry might get freed. The
1241	 * swpentry is copied to the stack, and entry isn't deref'd again
1242	 * until the entry is verified to still be alive in the tree.
1243	 */
1244	swpentry = entry->swpentry;
1245
1246	/*
1247	 * It's safe to drop the lock here because we return either
1248	 * LRU_REMOVED_RETRY or LRU_RETRY.
1249	 */
1250	spin_unlock(lock);
1251
1252	writeback_result = zswap_writeback_entry(entry, swpentry);
1253
1254	if (writeback_result) {
1255		zswap_reject_reclaim_fail++;
1256		ret = LRU_RETRY;
1257
1258		/*
1259		 * Encountering a page already in swap cache is a sign that we are shrinking
1260		 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1261		 * shrinker context).
1262		 */
1263		if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1264			ret = LRU_STOP;
1265			*encountered_page_in_swapcache = true;
1266		}
1267	} else {
1268		zswap_written_back_pages++;
1269	}
1270
1271	spin_lock(lock);
1272	return ret;
1273}
1274
1275static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1276		struct shrink_control *sc)
1277{
1278	struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
1279	unsigned long shrink_ret, nr_protected, lru_size;
1280	bool encountered_page_in_swapcache = false;
1281
1282	if (!zswap_shrinker_enabled ||
1283			!mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1284		sc->nr_scanned = 0;
1285		return SHRINK_STOP;
1286	}
1287
1288	nr_protected =
1289		atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1290	lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
1291
1292	/*
1293	 * Abort if we are shrinking into the protected region.
1294	 *
1295	 * This short-circuiting is necessary because if we have too many multiple
1296	 * concurrent reclaimers getting the freeable zswap object counts at the
1297	 * same time (before any of them made reasonable progress), the total
1298	 * number of reclaimed objects might be more than the number of unprotected
1299	 * objects (i.e the reclaimers will reclaim into the protected area of the
1300	 * zswap LRU).
1301	 */
1302	if (nr_protected >= lru_size - sc->nr_to_scan) {
1303		sc->nr_scanned = 0;
1304		return SHRINK_STOP;
1305	}
1306
1307	shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1308		&encountered_page_in_swapcache);
1309
1310	if (encountered_page_in_swapcache)
1311		return SHRINK_STOP;
1312
1313	return shrink_ret ? shrink_ret : SHRINK_STOP;
1314}
1315
1316static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1317		struct shrink_control *sc)
1318{
1319	struct mem_cgroup *memcg = sc->memcg;
1320	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1321	unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
1322
1323	if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1324		return 0;
1325
1326	/*
1327	 * The shrinker resumes swap writeback, which will enter block
1328	 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1329	 * rules (may_enter_fs()), which apply on a per-folio basis.
1330	 */
1331	if (!gfp_has_io_fs(sc->gfp_mask))
1332		return 0;
1333
1334	/*
1335	 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1336	 * have them per-node and thus per-lruvec. Careful if memcg is
1337	 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1338	 * for the lruvec, but not for memcg_page_state().
1339	 *
1340	 * Without memcg, use the zswap pool-wide metrics.
1341	 */
1342	if (!mem_cgroup_disabled()) {
1343		mem_cgroup_flush_stats(memcg);
1344		nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1345		nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1346	} else {
1347		nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
1348		nr_stored = atomic_read(&zswap_nr_stored);
1349	}
1350
1351	if (!nr_stored)
1352		return 0;
1353
1354	nr_protected =
1355		atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1356	nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1357	/*
1358	 * Subtract the lru size by an estimate of the number of pages
1359	 * that should be protected.
1360	 */
1361	nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
1362
1363	/*
1364	 * Scale the number of freeable pages by the memory saving factor.
1365	 * This ensures that the better zswap compresses memory, the fewer
1366	 * pages we will evict to swap (as it will otherwise incur IO for
1367	 * relatively small memory saving).
1368	 */
1369	return mult_frac(nr_freeable, nr_backing, nr_stored);
1370}
1371
1372static struct shrinker *zswap_alloc_shrinker(void)
1373{
1374	struct shrinker *shrinker;
1375
1376	shrinker =
1377		shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1378	if (!shrinker)
1379		return NULL;
1380
1381	shrinker->scan_objects = zswap_shrinker_scan;
1382	shrinker->count_objects = zswap_shrinker_count;
1383	shrinker->batch = 0;
1384	shrinker->seeks = DEFAULT_SEEKS;
1385	return shrinker;
1386}
1387
1388static int shrink_memcg(struct mem_cgroup *memcg)
1389{
1390	int nid, shrunk = 0;
1391
1392	if (!mem_cgroup_zswap_writeback_enabled(memcg))
1393		return -EINVAL;
1394
1395	/*
1396	 * Skip zombies because their LRUs are reparented and we would be
1397	 * reclaiming from the parent instead of the dead memcg.
1398	 */
1399	if (memcg && !mem_cgroup_online(memcg))
1400		return -ENOENT;
1401
1402	for_each_node_state(nid, N_NORMAL_MEMORY) {
1403		unsigned long nr_to_walk = 1;
1404
1405		shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1406					    &shrink_memcg_cb, NULL, &nr_to_walk);
1407	}
1408	return shrunk ? 0 : -EAGAIN;
1409}
1410
1411static void shrink_worker(struct work_struct *w)
1412{
1413	struct mem_cgroup *memcg;
1414	int ret, failures = 0;
1415
1416	/* global reclaim will select cgroup in a round-robin fashion. */
1417	do {
1418		spin_lock(&zswap_shrink_lock);
1419		zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1420		memcg = zswap_next_shrink;
1421
1422		/*
1423		 * We need to retry if we have gone through a full round trip, or if we
1424		 * got an offline memcg (or else we risk undoing the effect of the
1425		 * zswap memcg offlining cleanup callback). This is not catastrophic
1426		 * per se, but it will keep the now offlined memcg hostage for a while.
1427		 *
1428		 * Note that if we got an online memcg, we will keep the extra
1429		 * reference in case the original reference obtained by mem_cgroup_iter
1430		 * is dropped by the zswap memcg offlining callback, ensuring that the
1431		 * memcg is not killed when we are reclaiming.
1432		 */
1433		if (!memcg) {
1434			spin_unlock(&zswap_shrink_lock);
1435			if (++failures == MAX_RECLAIM_RETRIES)
1436				break;
1437
1438			goto resched;
1439		}
1440
1441		if (!mem_cgroup_tryget_online(memcg)) {
1442			/* drop the reference from mem_cgroup_iter() */
1443			mem_cgroup_iter_break(NULL, memcg);
1444			zswap_next_shrink = NULL;
1445			spin_unlock(&zswap_shrink_lock);
1446
1447			if (++failures == MAX_RECLAIM_RETRIES)
1448				break;
1449
1450			goto resched;
1451		}
1452		spin_unlock(&zswap_shrink_lock);
1453
1454		ret = shrink_memcg(memcg);
1455		/* drop the extra reference */
1456		mem_cgroup_put(memcg);
1457
1458		if (ret == -EINVAL)
1459			break;
1460		if (ret && ++failures == MAX_RECLAIM_RETRIES)
1461			break;
1462
1463resched:
1464		cond_resched();
1465	} while (!zswap_can_accept());
1466}
1467
1468static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1469{
1470	unsigned long *page;
1471	unsigned long val;
1472	unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1473
1474	page = (unsigned long *)ptr;
1475	val = page[0];
1476
1477	if (val != page[last_pos])
1478		return 0;
1479
1480	for (pos = 1; pos < last_pos; pos++) {
1481		if (val != page[pos])
1482			return 0;
1483	}
1484
1485	*value = val;
1486
1487	return 1;
1488}
1489
1490static void zswap_fill_page(void *ptr, unsigned long value)
1491{
1492	unsigned long *page;
1493
1494	page = (unsigned long *)ptr;
1495	memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1496}
1497
1498bool zswap_store(struct folio *folio)
1499{
1500	swp_entry_t swp = folio->swap;
1501	pgoff_t offset = swp_offset(swp);
1502	struct zswap_tree *tree = swap_zswap_tree(swp);
1503	struct zswap_entry *entry, *dupentry;
1504	struct obj_cgroup *objcg = NULL;
1505	struct mem_cgroup *memcg = NULL;
 
 
 
 
1506
1507	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1508	VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1509
1510	/* Large folios aren't supported */
1511	if (folio_test_large(folio))
1512		return false;
1513
1514	if (!zswap_enabled)
1515		goto check_old;
1516
1517	objcg = get_obj_cgroup_from_folio(folio);
1518	if (objcg && !obj_cgroup_may_zswap(objcg)) {
1519		memcg = get_mem_cgroup_from_objcg(objcg);
1520		if (shrink_memcg(memcg)) {
1521			mem_cgroup_put(memcg);
1522			goto reject;
1523		}
1524		mem_cgroup_put(memcg);
1525	}
1526
1527	/* reclaim space if needed */
1528	if (zswap_is_full()) {
1529		zswap_pool_limit_hit++;
1530		zswap_pool_reached_full = true;
1531		goto shrink;
1532	}
1533
1534	if (zswap_pool_reached_full) {
1535	       if (!zswap_can_accept())
1536			goto shrink;
1537		else
1538			zswap_pool_reached_full = false;
1539	}
1540
1541	/* allocate entry */
1542	entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
1543	if (!entry) {
1544		zswap_reject_kmemcache_fail++;
 
1545		goto reject;
1546	}
1547
1548	if (zswap_same_filled_pages_enabled) {
1549		unsigned long value;
1550		u8 *src;
1551
1552		src = kmap_local_folio(folio, 0);
1553		if (zswap_is_page_same_filled(src, &value)) {
1554			kunmap_local(src);
1555			entry->length = 0;
1556			entry->value = value;
1557			atomic_inc(&zswap_same_filled_pages);
1558			goto insert_entry;
1559		}
1560		kunmap_local(src);
1561	}
1562
1563	if (!zswap_non_same_filled_pages_enabled)
 
 
 
 
 
1564		goto freepage;
1565
1566	/* if entry is successfully added, it keeps the reference */
1567	entry->pool = zswap_pool_current_get();
1568	if (!entry->pool)
1569		goto freepage;
1570
1571	if (objcg) {
1572		memcg = get_mem_cgroup_from_objcg(objcg);
1573		if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1574			mem_cgroup_put(memcg);
1575			goto put_pool;
1576		}
1577		mem_cgroup_put(memcg);
1578	}
 
 
 
 
 
 
1579
1580	if (!zswap_compress(folio, entry))
1581		goto put_pool;
1582
1583insert_entry:
1584	entry->swpentry = swp;
1585	entry->objcg = objcg;
1586	if (objcg) {
1587		obj_cgroup_charge_zswap(objcg, entry->length);
1588		/* Account before objcg ref is moved to tree */
1589		count_objcg_event(objcg, ZSWPOUT);
1590	}
1591
1592	/* map */
1593	spin_lock(&tree->lock);
1594	/*
1595	 * The folio may have been dirtied again, invalidate the
1596	 * possibly stale entry before inserting the new entry.
1597	 */
1598	if (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
1599		zswap_invalidate_entry(tree, dupentry);
1600		WARN_ON(zswap_rb_insert(&tree->rbroot, entry, &dupentry));
1601	}
1602	if (entry->length) {
1603		INIT_LIST_HEAD(&entry->lru);
1604		zswap_lru_add(&zswap_list_lru, entry);
1605		atomic_inc(&zswap_nr_stored);
1606	}
1607	spin_unlock(&tree->lock);
1608
1609	/* update stats */
1610	atomic_inc(&zswap_stored_pages);
1611	zswap_update_total_size();
1612	count_vm_event(ZSWPOUT);
1613
1614	return true;
1615
1616put_pool:
1617	zswap_pool_put(entry->pool);
1618freepage:
 
1619	zswap_entry_cache_free(entry);
1620reject:
1621	if (objcg)
1622		obj_cgroup_put(objcg);
1623check_old:
1624	/*
1625	 * If the zswap store fails or zswap is disabled, we must invalidate the
1626	 * possibly stale entry which was previously stored at this offset.
1627	 * Otherwise, writeback could overwrite the new data in the swapfile.
1628	 */
1629	spin_lock(&tree->lock);
1630	entry = zswap_rb_search(&tree->rbroot, offset);
1631	if (entry)
1632		zswap_invalidate_entry(tree, entry);
1633	spin_unlock(&tree->lock);
1634	return false;
1635
1636shrink:
1637	queue_work(shrink_wq, &zswap_shrink_work);
1638	goto reject;
1639}
1640
1641bool zswap_load(struct folio *folio)
 
 
 
 
 
1642{
1643	swp_entry_t swp = folio->swap;
1644	pgoff_t offset = swp_offset(swp);
1645	struct page *page = &folio->page;
1646	bool swapcache = folio_test_swapcache(folio);
1647	struct zswap_tree *tree = swap_zswap_tree(swp);
1648	struct zswap_entry *entry;
1649	u8 *dst;
1650
1651	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1652
 
1653	spin_lock(&tree->lock);
1654	entry = zswap_rb_search(&tree->rbroot, offset);
1655	if (!entry) {
 
1656		spin_unlock(&tree->lock);
1657		return false;
1658	}
1659	/*
1660	 * When reading into the swapcache, invalidate our entry. The
1661	 * swapcache can be the authoritative owner of the page and
1662	 * its mappings, and the pressure that results from having two
1663	 * in-memory copies outweighs any benefits of caching the
1664	 * compression work.
1665	 *
1666	 * (Most swapins go through the swapcache. The notable
1667	 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1668	 * files, which reads into a private page and may free it if
1669	 * the fault fails. We remain the primary owner of the entry.)
1670	 */
1671	if (swapcache)
1672		zswap_rb_erase(&tree->rbroot, entry);
1673	spin_unlock(&tree->lock);
1674
1675	if (entry->length)
1676		zswap_decompress(entry, page);
1677	else {
1678		dst = kmap_local_page(page);
1679		zswap_fill_page(dst, entry->value);
1680		kunmap_local(dst);
1681	}
 
 
 
1682
1683	count_vm_event(ZSWPIN);
1684	if (entry->objcg)
1685		count_objcg_event(entry->objcg, ZSWPIN);
1686
1687	if (swapcache) {
1688		zswap_entry_free(entry);
1689		folio_mark_dirty(folio);
1690	}
1691
1692	return true;
1693}
1694
1695void zswap_invalidate(swp_entry_t swp)
 
1696{
1697	pgoff_t offset = swp_offset(swp);
1698	struct zswap_tree *tree = swap_zswap_tree(swp);
1699	struct zswap_entry *entry;
1700
 
1701	spin_lock(&tree->lock);
1702	entry = zswap_rb_search(&tree->rbroot, offset);
1703	if (entry)
1704		zswap_invalidate_entry(tree, entry);
 
 
 
 
 
 
 
 
 
 
1705	spin_unlock(&tree->lock);
1706}
1707
1708int zswap_swapon(int type, unsigned long nr_pages)
 
1709{
1710	struct zswap_tree *trees, *tree;
1711	unsigned int nr, i;
1712
1713	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1714	trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1715	if (!trees) {
1716		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1717		return -ENOMEM;
1718	}
1719
1720	for (i = 0; i < nr; i++) {
1721		tree = trees + i;
1722		tree->rbroot = RB_ROOT;
1723		spin_lock_init(&tree->lock);
1724	}
 
 
 
 
1725
1726	nr_zswap_trees[type] = nr;
1727	zswap_trees[type] = trees;
1728	return 0;
1729}
1730
1731void zswap_swapoff(int type)
1732{
1733	struct zswap_tree *trees = zswap_trees[type];
1734	unsigned int i;
1735
1736	if (!trees)
 
 
1737		return;
 
1738
1739	/* try_to_unuse() invalidated all the entries already */
1740	for (i = 0; i < nr_zswap_trees[type]; i++)
1741		WARN_ON_ONCE(!RB_EMPTY_ROOT(&trees[i].rbroot));
 
1742
1743	kvfree(trees);
1744	nr_zswap_trees[type] = 0;
1745	zswap_trees[type] = NULL;
1746}
 
 
 
1747
1748/*********************************
1749* debugfs functions
1750**********************************/
1751#ifdef CONFIG_DEBUG_FS
1752#include <linux/debugfs.h>
1753
1754static struct dentry *zswap_debugfs_root;
1755
1756static int zswap_debugfs_init(void)
1757{
1758	if (!debugfs_initialized())
1759		return -ENODEV;
1760
1761	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
 
 
1762
1763	debugfs_create_u64("pool_limit_hit", 0444,
1764			   zswap_debugfs_root, &zswap_pool_limit_hit);
1765	debugfs_create_u64("reject_reclaim_fail", 0444,
1766			   zswap_debugfs_root, &zswap_reject_reclaim_fail);
1767	debugfs_create_u64("reject_alloc_fail", 0444,
1768			   zswap_debugfs_root, &zswap_reject_alloc_fail);
1769	debugfs_create_u64("reject_kmemcache_fail", 0444,
1770			   zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1771	debugfs_create_u64("reject_compress_fail", 0444,
1772			   zswap_debugfs_root, &zswap_reject_compress_fail);
1773	debugfs_create_u64("reject_compress_poor", 0444,
1774			   zswap_debugfs_root, &zswap_reject_compress_poor);
1775	debugfs_create_u64("written_back_pages", 0444,
1776			   zswap_debugfs_root, &zswap_written_back_pages);
1777	debugfs_create_u64("pool_total_size", 0444,
1778			   zswap_debugfs_root, &zswap_pool_total_size);
1779	debugfs_create_atomic_t("stored_pages", 0444,
1780				zswap_debugfs_root, &zswap_stored_pages);
1781	debugfs_create_atomic_t("same_filled_pages", 0444,
1782				zswap_debugfs_root, &zswap_same_filled_pages);
1783
1784	return 0;
1785}
 
 
 
 
 
1786#else
1787static int zswap_debugfs_init(void)
1788{
1789	return 0;
1790}
 
 
1791#endif
1792
1793/*********************************
1794* module init and exit
1795**********************************/
1796static int zswap_setup(void)
1797{
1798	struct zswap_pool *pool;
1799	int ret;
 
 
 
 
 
 
 
 
1800
1801	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1802	if (!zswap_entry_cache) {
1803		pr_err("entry cache creation failed\n");
1804		goto cache_fail;
 
 
 
 
1805	}
1806
1807	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1808				      "mm/zswap_pool:prepare",
1809				      zswap_cpu_comp_prepare,
1810				      zswap_cpu_comp_dead);
1811	if (ret)
1812		goto hp_fail;
1813
1814	shrink_wq = alloc_workqueue("zswap-shrink",
1815			WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1816	if (!shrink_wq)
1817		goto shrink_wq_fail;
1818
1819	zswap_shrinker = zswap_alloc_shrinker();
1820	if (!zswap_shrinker)
1821		goto shrinker_fail;
1822	if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1823		goto lru_fail;
1824	shrinker_register(zswap_shrinker);
1825
1826	INIT_WORK(&zswap_shrink_work, shrink_worker);
1827
1828	pool = __zswap_pool_create_fallback();
1829	if (pool) {
1830		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1831			zpool_get_type(pool->zpools[0]));
1832		list_add(&pool->list, &zswap_pools);
1833		zswap_has_pool = true;
1834	} else {
1835		pr_err("pool creation failed\n");
1836		zswap_enabled = false;
1837	}
1838
 
1839	if (zswap_debugfs_init())
1840		pr_warn("debugfs initialization failed\n");
1841	zswap_init_state = ZSWAP_INIT_SUCCEED;
1842	return 0;
1843
1844lru_fail:
1845	shrinker_free(zswap_shrinker);
1846shrinker_fail:
1847	destroy_workqueue(shrink_wq);
1848shrink_wq_fail:
1849	cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1850hp_fail:
1851	kmem_cache_destroy(zswap_entry_cache);
1852cache_fail:
1853	/* if built-in, we aren't unloaded on failure; don't allow use */
1854	zswap_init_state = ZSWAP_INIT_FAILED;
1855	zswap_enabled = false;
1856	return -ENOMEM;
1857}
1858
1859static int __init zswap_init(void)
1860{
1861	if (!zswap_enabled)
1862		return 0;
1863	return zswap_setup();
1864}
1865/* must be late so crypto has time to come up */
1866late_initcall(zswap_init);
1867
1868MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
 
1869MODULE_DESCRIPTION("Compressed cache for swap pages");