Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * zswap.c - zswap driver file
4 *
5 * zswap is a cache that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
12*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
23#include <linux/rbtree.h>
24#include <linux/swap.h>
25#include <linux/crypto.h>
26#include <linux/scatterlist.h>
27#include <linux/mempolicy.h>
28#include <linux/mempool.h>
29#include <linux/zpool.h>
30#include <crypto/acompress.h>
31#include <linux/zswap.h>
32#include <linux/mm_types.h>
33#include <linux/page-flags.h>
34#include <linux/swapops.h>
35#include <linux/writeback.h>
36#include <linux/pagemap.h>
37#include <linux/workqueue.h>
38#include <linux/list_lru.h>
39
40#include "swap.h"
41#include "internal.h"
42
43/*********************************
44* statistics
45**********************************/
46/* Total bytes used by the compressed storage */
47u64 zswap_pool_total_size;
48/* The number of compressed pages currently stored in zswap */
49atomic_t zswap_stored_pages = ATOMIC_INIT(0);
50/* The number of same-value filled pages currently stored in zswap */
51static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
52
53/*
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
58*/
59
60/* Pool limit was hit (see zswap_max_pool_percent) */
61static u64 zswap_pool_limit_hit;
62/* Pages written back when pool limit was reached */
63static u64 zswap_written_back_pages;
64/* Store failed due to a reclaim failure after pool limit was reached */
65static u64 zswap_reject_reclaim_fail;
66/* Store failed due to compression algorithm failure */
67static u64 zswap_reject_compress_fail;
68/* Compressed page was too big for the allocator to (optimally) store */
69static u64 zswap_reject_compress_poor;
70/* Store failed because underlying allocator could not get memory */
71static u64 zswap_reject_alloc_fail;
72/* Store failed because the entry metadata could not be allocated (rare) */
73static u64 zswap_reject_kmemcache_fail;
74/* Duplicate store was encountered (rare) */
75static u64 zswap_duplicate_entry;
76
77/* Shrinker work queue */
78static struct workqueue_struct *shrink_wq;
79/* Pool limit was hit, we need to calm down */
80static bool zswap_pool_reached_full;
81
82/*********************************
83* tunables
84**********************************/
85
86#define ZSWAP_PARAM_UNSET ""
87
88static int zswap_setup(void);
89
90/* Enable/disable zswap */
91static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
92static int zswap_enabled_param_set(const char *,
93 const struct kernel_param *);
94static const struct kernel_param_ops zswap_enabled_param_ops = {
95 .set = zswap_enabled_param_set,
96 .get = param_get_bool,
97};
98module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
99
100/* Crypto compressor to use */
101static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
102static int zswap_compressor_param_set(const char *,
103 const struct kernel_param *);
104static const struct kernel_param_ops zswap_compressor_param_ops = {
105 .set = zswap_compressor_param_set,
106 .get = param_get_charp,
107 .free = param_free_charp,
108};
109module_param_cb(compressor, &zswap_compressor_param_ops,
110 &zswap_compressor, 0644);
111
112/* Compressed storage zpool to use */
113static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
114static int zswap_zpool_param_set(const char *, const struct kernel_param *);
115static const struct kernel_param_ops zswap_zpool_param_ops = {
116 .set = zswap_zpool_param_set,
117 .get = param_get_charp,
118 .free = param_free_charp,
119};
120module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
121
122/* The maximum percentage of memory that the compressed pool can occupy */
123static unsigned int zswap_max_pool_percent = 20;
124module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
125
126/* The threshold for accepting new pages after the max_pool_percent was hit */
127static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
128module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
129 uint, 0644);
130
131/*
132 * Enable/disable handling same-value filled pages (enabled by default).
133 * If disabled every page is considered non-same-value filled.
134 */
135static bool zswap_same_filled_pages_enabled = true;
136module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
137 bool, 0644);
138
139/* Enable/disable handling non-same-value filled pages (enabled by default) */
140static bool zswap_non_same_filled_pages_enabled = true;
141module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
142 bool, 0644);
143
144static bool zswap_exclusive_loads_enabled = IS_ENABLED(
145 CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
146module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
147
148/* Number of zpools in zswap_pool (empirically determined for scalability) */
149#define ZSWAP_NR_ZPOOLS 32
150
151/* Enable/disable memory pressure-based shrinker. */
152static bool zswap_shrinker_enabled = IS_ENABLED(
153 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
154module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
155
156bool is_zswap_enabled(void)
157{
158 return zswap_enabled;
159}
160
161/*********************************
162* data structures
163**********************************/
164
165struct crypto_acomp_ctx {
166 struct crypto_acomp *acomp;
167 struct acomp_req *req;
168 struct crypto_wait wait;
169 u8 *buffer;
170 struct mutex mutex;
171};
172
173/*
174 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
175 * The only case where lru_lock is not acquired while holding tree.lock is
176 * when a zswap_entry is taken off the lru for writeback, in that case it
177 * needs to be verified that it's still valid in the tree.
178 */
179struct zswap_pool {
180 struct zpool *zpools[ZSWAP_NR_ZPOOLS];
181 struct crypto_acomp_ctx __percpu *acomp_ctx;
182 struct kref kref;
183 struct list_head list;
184 struct work_struct release_work;
185 struct work_struct shrink_work;
186 struct hlist_node node;
187 char tfm_name[CRYPTO_MAX_ALG_NAME];
188 struct list_lru list_lru;
189 struct mem_cgroup *next_shrink;
190 struct shrinker *shrinker;
191 atomic_t nr_stored;
192};
193
194/*
195 * struct zswap_entry
196 *
197 * This structure contains the metadata for tracking a single compressed
198 * page within zswap.
199 *
200 * rbnode - links the entry into red-black tree for the appropriate swap type
201 * swpentry - associated swap entry, the offset indexes into the red-black tree
202 * refcount - the number of outstanding reference to the entry. This is needed
203 * to protect against premature freeing of the entry by code
204 * concurrent calls to load, invalidate, and writeback. The lock
205 * for the zswap_tree structure that contains the entry must
206 * be held while changing the refcount. Since the lock must
207 * be held, there is no reason to also make refcount atomic.
208 * length - the length in bytes of the compressed page data. Needed during
209 * decompression. For a same value filled page length is 0, and both
210 * pool and lru are invalid and must be ignored.
211 * pool - the zswap_pool the entry's data is in
212 * handle - zpool allocation handle that stores the compressed page data
213 * value - value of the same-value filled pages which have same content
214 * objcg - the obj_cgroup that the compressed memory is charged to
215 * lru - handle to the pool's lru used to evict pages.
216 */
217struct zswap_entry {
218 struct rb_node rbnode;
219 swp_entry_t swpentry;
220 int refcount;
221 unsigned int length;
222 struct zswap_pool *pool;
223 union {
224 unsigned long handle;
225 unsigned long value;
226 };
227 struct obj_cgroup *objcg;
228 struct list_head lru;
229};
230
231/*
232 * The tree lock in the zswap_tree struct protects a few things:
233 * - the rbtree
234 * - the refcount field of each entry in the tree
235 */
236struct zswap_tree {
237 struct rb_root rbroot;
238 spinlock_t lock;
239};
240
241static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
242
243/* RCU-protected iteration */
244static LIST_HEAD(zswap_pools);
245/* protects zswap_pools list modification */
246static DEFINE_SPINLOCK(zswap_pools_lock);
247/* pool counter to provide unique names to zpool */
248static atomic_t zswap_pools_count = ATOMIC_INIT(0);
249
250enum zswap_init_type {
251 ZSWAP_UNINIT,
252 ZSWAP_INIT_SUCCEED,
253 ZSWAP_INIT_FAILED
254};
255
256static enum zswap_init_type zswap_init_state;
257
258/* used to ensure the integrity of initialization */
259static DEFINE_MUTEX(zswap_init_lock);
260
261/* init completed, but couldn't create the initial pool */
262static bool zswap_has_pool;
263
264/*********************************
265* helpers and fwd declarations
266**********************************/
267
268#define zswap_pool_debug(msg, p) \
269 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
270 zpool_get_type((p)->zpools[0]))
271
272static int zswap_writeback_entry(struct zswap_entry *entry,
273 struct zswap_tree *tree);
274static int zswap_pool_get(struct zswap_pool *pool);
275static void zswap_pool_put(struct zswap_pool *pool);
276
277static bool zswap_is_full(void)
278{
279 return totalram_pages() * zswap_max_pool_percent / 100 <
280 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
281}
282
283static bool zswap_can_accept(void)
284{
285 return totalram_pages() * zswap_accept_thr_percent / 100 *
286 zswap_max_pool_percent / 100 >
287 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
288}
289
290static u64 get_zswap_pool_size(struct zswap_pool *pool)
291{
292 u64 pool_size = 0;
293 int i;
294
295 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
296 pool_size += zpool_get_total_size(pool->zpools[i]);
297
298 return pool_size;
299}
300
301static void zswap_update_total_size(void)
302{
303 struct zswap_pool *pool;
304 u64 total = 0;
305
306 rcu_read_lock();
307
308 list_for_each_entry_rcu(pool, &zswap_pools, list)
309 total += get_zswap_pool_size(pool);
310
311 rcu_read_unlock();
312
313 zswap_pool_total_size = total;
314}
315
316/* should be called under RCU */
317#ifdef CONFIG_MEMCG
318static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
319{
320 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
321}
322#else
323static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
324{
325 return NULL;
326}
327#endif
328
329static inline int entry_to_nid(struct zswap_entry *entry)
330{
331 return page_to_nid(virt_to_page(entry));
332}
333
334void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
335{
336 struct zswap_pool *pool;
337
338 /* lock out zswap pools list modification */
339 spin_lock(&zswap_pools_lock);
340 list_for_each_entry(pool, &zswap_pools, list) {
341 if (pool->next_shrink == memcg)
342 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
343 }
344 spin_unlock(&zswap_pools_lock);
345}
346
347/*********************************
348* zswap entry functions
349**********************************/
350static struct kmem_cache *zswap_entry_cache;
351
352static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
353{
354 struct zswap_entry *entry;
355 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
356 if (!entry)
357 return NULL;
358 entry->refcount = 1;
359 RB_CLEAR_NODE(&entry->rbnode);
360 return entry;
361}
362
363static void zswap_entry_cache_free(struct zswap_entry *entry)
364{
365 kmem_cache_free(zswap_entry_cache, entry);
366}
367
368/*********************************
369* zswap lruvec functions
370**********************************/
371void zswap_lruvec_state_init(struct lruvec *lruvec)
372{
373 atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
374}
375
376void zswap_folio_swapin(struct folio *folio)
377{
378 struct lruvec *lruvec;
379
380 VM_WARN_ON_ONCE(!folio_test_locked(folio));
381 lruvec = folio_lruvec(folio);
382 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
383}
384
385/*********************************
386* lru functions
387**********************************/
388static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
389{
390 atomic_long_t *nr_zswap_protected;
391 unsigned long lru_size, old, new;
392 int nid = entry_to_nid(entry);
393 struct mem_cgroup *memcg;
394 struct lruvec *lruvec;
395
396 /*
397 * Note that it is safe to use rcu_read_lock() here, even in the face of
398 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
399 * used in list_lru lookup, only two scenarios are possible:
400 *
401 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
402 * new entry will be reparented to memcg's parent's list_lru.
403 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
404 * new entry will be added directly to memcg's parent's list_lru.
405 *
406 * Similar reasoning holds for list_lru_del() and list_lru_putback().
407 */
408 rcu_read_lock();
409 memcg = mem_cgroup_from_entry(entry);
410 /* will always succeed */
411 list_lru_add(list_lru, &entry->lru, nid, memcg);
412
413 /* Update the protection area */
414 lru_size = list_lru_count_one(list_lru, nid, memcg);
415 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
416 nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
417 old = atomic_long_inc_return(nr_zswap_protected);
418 /*
419 * Decay to avoid overflow and adapt to changing workloads.
420 * This is based on LRU reclaim cost decaying heuristics.
421 */
422 do {
423 new = old > lru_size / 4 ? old / 2 : old;
424 } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
425 rcu_read_unlock();
426}
427
428static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
429{
430 int nid = entry_to_nid(entry);
431 struct mem_cgroup *memcg;
432
433 rcu_read_lock();
434 memcg = mem_cgroup_from_entry(entry);
435 /* will always succeed */
436 list_lru_del(list_lru, &entry->lru, nid, memcg);
437 rcu_read_unlock();
438}
439
440static void zswap_lru_putback(struct list_lru *list_lru,
441 struct zswap_entry *entry)
442{
443 int nid = entry_to_nid(entry);
444 spinlock_t *lock = &list_lru->node[nid].lock;
445 struct mem_cgroup *memcg;
446 struct lruvec *lruvec;
447
448 rcu_read_lock();
449 memcg = mem_cgroup_from_entry(entry);
450 spin_lock(lock);
451 /* we cannot use list_lru_add here, because it increments node's lru count */
452 list_lru_putback(list_lru, &entry->lru, nid, memcg);
453 spin_unlock(lock);
454
455 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(entry_to_nid(entry)));
456 /* increment the protection area to account for the LRU rotation. */
457 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
458 rcu_read_unlock();
459}
460
461/*********************************
462* rbtree functions
463**********************************/
464static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
465{
466 struct rb_node *node = root->rb_node;
467 struct zswap_entry *entry;
468 pgoff_t entry_offset;
469
470 while (node) {
471 entry = rb_entry(node, struct zswap_entry, rbnode);
472 entry_offset = swp_offset(entry->swpentry);
473 if (entry_offset > offset)
474 node = node->rb_left;
475 else if (entry_offset < offset)
476 node = node->rb_right;
477 else
478 return entry;
479 }
480 return NULL;
481}
482
483/*
484 * In the case that a entry with the same offset is found, a pointer to
485 * the existing entry is stored in dupentry and the function returns -EEXIST
486 */
487static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
488 struct zswap_entry **dupentry)
489{
490 struct rb_node **link = &root->rb_node, *parent = NULL;
491 struct zswap_entry *myentry;
492 pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
493
494 while (*link) {
495 parent = *link;
496 myentry = rb_entry(parent, struct zswap_entry, rbnode);
497 myentry_offset = swp_offset(myentry->swpentry);
498 if (myentry_offset > entry_offset)
499 link = &(*link)->rb_left;
500 else if (myentry_offset < entry_offset)
501 link = &(*link)->rb_right;
502 else {
503 *dupentry = myentry;
504 return -EEXIST;
505 }
506 }
507 rb_link_node(&entry->rbnode, parent, link);
508 rb_insert_color(&entry->rbnode, root);
509 return 0;
510}
511
512static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
513{
514 if (!RB_EMPTY_NODE(&entry->rbnode)) {
515 rb_erase(&entry->rbnode, root);
516 RB_CLEAR_NODE(&entry->rbnode);
517 return true;
518 }
519 return false;
520}
521
522static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
523{
524 int i = 0;
525
526 if (ZSWAP_NR_ZPOOLS > 1)
527 i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
528
529 return entry->pool->zpools[i];
530}
531
532/*
533 * Carries out the common pattern of freeing and entry's zpool allocation,
534 * freeing the entry itself, and decrementing the number of stored pages.
535 */
536static void zswap_free_entry(struct zswap_entry *entry)
537{
538 if (!entry->length)
539 atomic_dec(&zswap_same_filled_pages);
540 else {
541 zswap_lru_del(&entry->pool->list_lru, entry);
542 zpool_free(zswap_find_zpool(entry), entry->handle);
543 atomic_dec(&entry->pool->nr_stored);
544 zswap_pool_put(entry->pool);
545 }
546 if (entry->objcg) {
547 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
548 obj_cgroup_put(entry->objcg);
549 }
550 zswap_entry_cache_free(entry);
551 atomic_dec(&zswap_stored_pages);
552 zswap_update_total_size();
553}
554
555/* caller must hold the tree lock */
556static void zswap_entry_get(struct zswap_entry *entry)
557{
558 entry->refcount++;
559}
560
561/* caller must hold the tree lock
562* remove from the tree and free it, if nobody reference the entry
563*/
564static void zswap_entry_put(struct zswap_tree *tree,
565 struct zswap_entry *entry)
566{
567 int refcount = --entry->refcount;
568
569 WARN_ON_ONCE(refcount < 0);
570 if (refcount == 0) {
571 WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode));
572 zswap_free_entry(entry);
573 }
574}
575
576/* caller must hold the tree lock */
577static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
578 pgoff_t offset)
579{
580 struct zswap_entry *entry;
581
582 entry = zswap_rb_search(root, offset);
583 if (entry)
584 zswap_entry_get(entry);
585
586 return entry;
587}
588
589/*********************************
590* shrinker functions
591**********************************/
592static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
593 spinlock_t *lock, void *arg);
594
595static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
596 struct shrink_control *sc)
597{
598 struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
599 unsigned long shrink_ret, nr_protected, lru_size;
600 struct zswap_pool *pool = shrinker->private_data;
601 bool encountered_page_in_swapcache = false;
602
603 if (!zswap_shrinker_enabled ||
604 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
605 sc->nr_scanned = 0;
606 return SHRINK_STOP;
607 }
608
609 nr_protected =
610 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
611 lru_size = list_lru_shrink_count(&pool->list_lru, sc);
612
613 /*
614 * Abort if we are shrinking into the protected region.
615 *
616 * This short-circuiting is necessary because if we have too many multiple
617 * concurrent reclaimers getting the freeable zswap object counts at the
618 * same time (before any of them made reasonable progress), the total
619 * number of reclaimed objects might be more than the number of unprotected
620 * objects (i.e the reclaimers will reclaim into the protected area of the
621 * zswap LRU).
622 */
623 if (nr_protected >= lru_size - sc->nr_to_scan) {
624 sc->nr_scanned = 0;
625 return SHRINK_STOP;
626 }
627
628 shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
629 &encountered_page_in_swapcache);
630
631 if (encountered_page_in_swapcache)
632 return SHRINK_STOP;
633
634 return shrink_ret ? shrink_ret : SHRINK_STOP;
635}
636
637static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
638 struct shrink_control *sc)
639{
640 struct zswap_pool *pool = shrinker->private_data;
641 struct mem_cgroup *memcg = sc->memcg;
642 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
643 unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
644
645 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
646 return 0;
647
648#ifdef CONFIG_MEMCG_KMEM
649 mem_cgroup_flush_stats(memcg);
650 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
651 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
652#else
653 /* use pool stats instead of memcg stats */
654 nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
655 nr_stored = atomic_read(&pool->nr_stored);
656#endif
657
658 if (!nr_stored)
659 return 0;
660
661 nr_protected =
662 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
663 nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
664 /*
665 * Subtract the lru size by an estimate of the number of pages
666 * that should be protected.
667 */
668 nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
669
670 /*
671 * Scale the number of freeable pages by the memory saving factor.
672 * This ensures that the better zswap compresses memory, the fewer
673 * pages we will evict to swap (as it will otherwise incur IO for
674 * relatively small memory saving).
675 */
676 return mult_frac(nr_freeable, nr_backing, nr_stored);
677}
678
679static void zswap_alloc_shrinker(struct zswap_pool *pool)
680{
681 pool->shrinker =
682 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
683 if (!pool->shrinker)
684 return;
685
686 pool->shrinker->private_data = pool;
687 pool->shrinker->scan_objects = zswap_shrinker_scan;
688 pool->shrinker->count_objects = zswap_shrinker_count;
689 pool->shrinker->batch = 0;
690 pool->shrinker->seeks = DEFAULT_SEEKS;
691}
692
693/*********************************
694* per-cpu code
695**********************************/
696static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
697{
698 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
699 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
700 struct crypto_acomp *acomp;
701 struct acomp_req *req;
702 int ret;
703
704 mutex_init(&acomp_ctx->mutex);
705
706 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
707 if (!acomp_ctx->buffer)
708 return -ENOMEM;
709
710 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
711 if (IS_ERR(acomp)) {
712 pr_err("could not alloc crypto acomp %s : %ld\n",
713 pool->tfm_name, PTR_ERR(acomp));
714 ret = PTR_ERR(acomp);
715 goto acomp_fail;
716 }
717 acomp_ctx->acomp = acomp;
718
719 req = acomp_request_alloc(acomp_ctx->acomp);
720 if (!req) {
721 pr_err("could not alloc crypto acomp_request %s\n",
722 pool->tfm_name);
723 ret = -ENOMEM;
724 goto req_fail;
725 }
726 acomp_ctx->req = req;
727
728 crypto_init_wait(&acomp_ctx->wait);
729 /*
730 * if the backend of acomp is async zip, crypto_req_done() will wakeup
731 * crypto_wait_req(); if the backend of acomp is scomp, the callback
732 * won't be called, crypto_wait_req() will return without blocking.
733 */
734 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
735 crypto_req_done, &acomp_ctx->wait);
736
737 return 0;
738
739req_fail:
740 crypto_free_acomp(acomp_ctx->acomp);
741acomp_fail:
742 kfree(acomp_ctx->buffer);
743 return ret;
744}
745
746static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
747{
748 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
749 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
750
751 if (!IS_ERR_OR_NULL(acomp_ctx)) {
752 if (!IS_ERR_OR_NULL(acomp_ctx->req))
753 acomp_request_free(acomp_ctx->req);
754 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
755 crypto_free_acomp(acomp_ctx->acomp);
756 kfree(acomp_ctx->buffer);
757 }
758
759 return 0;
760}
761
762/*********************************
763* pool functions
764**********************************/
765
766static struct zswap_pool *__zswap_pool_current(void)
767{
768 struct zswap_pool *pool;
769
770 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
771 WARN_ONCE(!pool && zswap_has_pool,
772 "%s: no page storage pool!\n", __func__);
773
774 return pool;
775}
776
777static struct zswap_pool *zswap_pool_current(void)
778{
779 assert_spin_locked(&zswap_pools_lock);
780
781 return __zswap_pool_current();
782}
783
784static struct zswap_pool *zswap_pool_current_get(void)
785{
786 struct zswap_pool *pool;
787
788 rcu_read_lock();
789
790 pool = __zswap_pool_current();
791 if (!zswap_pool_get(pool))
792 pool = NULL;
793
794 rcu_read_unlock();
795
796 return pool;
797}
798
799static struct zswap_pool *zswap_pool_last_get(void)
800{
801 struct zswap_pool *pool, *last = NULL;
802
803 rcu_read_lock();
804
805 list_for_each_entry_rcu(pool, &zswap_pools, list)
806 last = pool;
807 WARN_ONCE(!last && zswap_has_pool,
808 "%s: no page storage pool!\n", __func__);
809 if (!zswap_pool_get(last))
810 last = NULL;
811
812 rcu_read_unlock();
813
814 return last;
815}
816
817/* type and compressor must be null-terminated */
818static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
819{
820 struct zswap_pool *pool;
821
822 assert_spin_locked(&zswap_pools_lock);
823
824 list_for_each_entry_rcu(pool, &zswap_pools, list) {
825 if (strcmp(pool->tfm_name, compressor))
826 continue;
827 /* all zpools share the same type */
828 if (strcmp(zpool_get_type(pool->zpools[0]), type))
829 continue;
830 /* if we can't get it, it's about to be destroyed */
831 if (!zswap_pool_get(pool))
832 continue;
833 return pool;
834 }
835
836 return NULL;
837}
838
839/*
840 * If the entry is still valid in the tree, drop the initial ref and remove it
841 * from the tree. This function must be called with an additional ref held,
842 * otherwise it may race with another invalidation freeing the entry.
843 */
844static void zswap_invalidate_entry(struct zswap_tree *tree,
845 struct zswap_entry *entry)
846{
847 if (zswap_rb_erase(&tree->rbroot, entry))
848 zswap_entry_put(tree, entry);
849}
850
851static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
852 spinlock_t *lock, void *arg)
853{
854 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
855 bool *encountered_page_in_swapcache = (bool *)arg;
856 struct zswap_tree *tree;
857 pgoff_t swpoffset;
858 enum lru_status ret = LRU_REMOVED_RETRY;
859 int writeback_result;
860
861 /*
862 * Once the lru lock is dropped, the entry might get freed. The
863 * swpoffset is copied to the stack, and entry isn't deref'd again
864 * until the entry is verified to still be alive in the tree.
865 */
866 swpoffset = swp_offset(entry->swpentry);
867 tree = zswap_trees[swp_type(entry->swpentry)];
868 list_lru_isolate(l, item);
869 /*
870 * It's safe to drop the lock here because we return either
871 * LRU_REMOVED_RETRY or LRU_RETRY.
872 */
873 spin_unlock(lock);
874
875 /* Check for invalidate() race */
876 spin_lock(&tree->lock);
877 if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
878 goto unlock;
879
880 /* Hold a reference to prevent a free during writeback */
881 zswap_entry_get(entry);
882 spin_unlock(&tree->lock);
883
884 writeback_result = zswap_writeback_entry(entry, tree);
885
886 spin_lock(&tree->lock);
887 if (writeback_result) {
888 zswap_reject_reclaim_fail++;
889 zswap_lru_putback(&entry->pool->list_lru, entry);
890 ret = LRU_RETRY;
891
892 /*
893 * Encountering a page already in swap cache is a sign that we are shrinking
894 * into the warmer region. We should terminate shrinking (if we're in the dynamic
895 * shrinker context).
896 */
897 if (writeback_result == -EEXIST && encountered_page_in_swapcache)
898 *encountered_page_in_swapcache = true;
899
900 goto put_unlock;
901 }
902 zswap_written_back_pages++;
903
904 if (entry->objcg)
905 count_objcg_event(entry->objcg, ZSWPWB);
906
907 count_vm_event(ZSWPWB);
908 /*
909 * Writeback started successfully, the page now belongs to the
910 * swapcache. Drop the entry from zswap - unless invalidate already
911 * took it out while we had the tree->lock released for IO.
912 */
913 zswap_invalidate_entry(tree, entry);
914
915put_unlock:
916 /* Drop local reference */
917 zswap_entry_put(tree, entry);
918unlock:
919 spin_unlock(&tree->lock);
920 spin_lock(lock);
921 return ret;
922}
923
924static int shrink_memcg(struct mem_cgroup *memcg)
925{
926 struct zswap_pool *pool;
927 int nid, shrunk = 0;
928
929 if (!mem_cgroup_zswap_writeback_enabled(memcg))
930 return -EINVAL;
931
932 /*
933 * Skip zombies because their LRUs are reparented and we would be
934 * reclaiming from the parent instead of the dead memcg.
935 */
936 if (memcg && !mem_cgroup_online(memcg))
937 return -ENOENT;
938
939 pool = zswap_pool_current_get();
940 if (!pool)
941 return -EINVAL;
942
943 for_each_node_state(nid, N_NORMAL_MEMORY) {
944 unsigned long nr_to_walk = 1;
945
946 shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
947 &shrink_memcg_cb, NULL, &nr_to_walk);
948 }
949 zswap_pool_put(pool);
950 return shrunk ? 0 : -EAGAIN;
951}
952
953static void shrink_worker(struct work_struct *w)
954{
955 struct zswap_pool *pool = container_of(w, typeof(*pool),
956 shrink_work);
957 struct mem_cgroup *memcg;
958 int ret, failures = 0;
959
960 /* global reclaim will select cgroup in a round-robin fashion. */
961 do {
962 spin_lock(&zswap_pools_lock);
963 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
964 memcg = pool->next_shrink;
965
966 /*
967 * We need to retry if we have gone through a full round trip, or if we
968 * got an offline memcg (or else we risk undoing the effect of the
969 * zswap memcg offlining cleanup callback). This is not catastrophic
970 * per se, but it will keep the now offlined memcg hostage for a while.
971 *
972 * Note that if we got an online memcg, we will keep the extra
973 * reference in case the original reference obtained by mem_cgroup_iter
974 * is dropped by the zswap memcg offlining callback, ensuring that the
975 * memcg is not killed when we are reclaiming.
976 */
977 if (!memcg) {
978 spin_unlock(&zswap_pools_lock);
979 if (++failures == MAX_RECLAIM_RETRIES)
980 break;
981
982 goto resched;
983 }
984
985 if (!mem_cgroup_tryget_online(memcg)) {
986 /* drop the reference from mem_cgroup_iter() */
987 mem_cgroup_iter_break(NULL, memcg);
988 pool->next_shrink = NULL;
989 spin_unlock(&zswap_pools_lock);
990
991 if (++failures == MAX_RECLAIM_RETRIES)
992 break;
993
994 goto resched;
995 }
996 spin_unlock(&zswap_pools_lock);
997
998 ret = shrink_memcg(memcg);
999 /* drop the extra reference */
1000 mem_cgroup_put(memcg);
1001
1002 if (ret == -EINVAL)
1003 break;
1004 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1005 break;
1006
1007resched:
1008 cond_resched();
1009 } while (!zswap_can_accept());
1010 zswap_pool_put(pool);
1011}
1012
1013static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
1014{
1015 int i;
1016 struct zswap_pool *pool;
1017 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
1018 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1019 int ret;
1020
1021 if (!zswap_has_pool) {
1022 /* if either are unset, pool initialization failed, and we
1023 * need both params to be set correctly before trying to
1024 * create a pool.
1025 */
1026 if (!strcmp(type, ZSWAP_PARAM_UNSET))
1027 return NULL;
1028 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
1029 return NULL;
1030 }
1031
1032 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1033 if (!pool)
1034 return NULL;
1035
1036 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
1037 /* unique name for each pool specifically required by zsmalloc */
1038 snprintf(name, 38, "zswap%x",
1039 atomic_inc_return(&zswap_pools_count));
1040
1041 pool->zpools[i] = zpool_create_pool(type, name, gfp);
1042 if (!pool->zpools[i]) {
1043 pr_err("%s zpool not available\n", type);
1044 goto error;
1045 }
1046 }
1047 pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
1048
1049 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
1050
1051 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
1052 if (!pool->acomp_ctx) {
1053 pr_err("percpu alloc failed\n");
1054 goto error;
1055 }
1056
1057 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
1058 &pool->node);
1059 if (ret)
1060 goto error;
1061
1062 zswap_alloc_shrinker(pool);
1063 if (!pool->shrinker)
1064 goto error;
1065
1066 pr_debug("using %s compressor\n", pool->tfm_name);
1067
1068 /* being the current pool takes 1 ref; this func expects the
1069 * caller to always add the new pool as the current pool
1070 */
1071 kref_init(&pool->kref);
1072 INIT_LIST_HEAD(&pool->list);
1073 if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
1074 goto lru_fail;
1075 shrinker_register(pool->shrinker);
1076 INIT_WORK(&pool->shrink_work, shrink_worker);
1077 atomic_set(&pool->nr_stored, 0);
1078
1079 zswap_pool_debug("created", pool);
1080
1081 return pool;
1082
1083lru_fail:
1084 list_lru_destroy(&pool->list_lru);
1085 shrinker_free(pool->shrinker);
1086error:
1087 if (pool->acomp_ctx)
1088 free_percpu(pool->acomp_ctx);
1089 while (i--)
1090 zpool_destroy_pool(pool->zpools[i]);
1091 kfree(pool);
1092 return NULL;
1093}
1094
1095static struct zswap_pool *__zswap_pool_create_fallback(void)
1096{
1097 bool has_comp, has_zpool;
1098
1099 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
1100 if (!has_comp && strcmp(zswap_compressor,
1101 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
1102 pr_err("compressor %s not available, using default %s\n",
1103 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
1104 param_free_charp(&zswap_compressor);
1105 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
1106 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
1107 }
1108 if (!has_comp) {
1109 pr_err("default compressor %s not available\n",
1110 zswap_compressor);
1111 param_free_charp(&zswap_compressor);
1112 zswap_compressor = ZSWAP_PARAM_UNSET;
1113 }
1114
1115 has_zpool = zpool_has_pool(zswap_zpool_type);
1116 if (!has_zpool && strcmp(zswap_zpool_type,
1117 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
1118 pr_err("zpool %s not available, using default %s\n",
1119 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
1120 param_free_charp(&zswap_zpool_type);
1121 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
1122 has_zpool = zpool_has_pool(zswap_zpool_type);
1123 }
1124 if (!has_zpool) {
1125 pr_err("default zpool %s not available\n",
1126 zswap_zpool_type);
1127 param_free_charp(&zswap_zpool_type);
1128 zswap_zpool_type = ZSWAP_PARAM_UNSET;
1129 }
1130
1131 if (!has_comp || !has_zpool)
1132 return NULL;
1133
1134 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
1135}
1136
1137static void zswap_pool_destroy(struct zswap_pool *pool)
1138{
1139 int i;
1140
1141 zswap_pool_debug("destroying", pool);
1142
1143 shrinker_free(pool->shrinker);
1144 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
1145 free_percpu(pool->acomp_ctx);
1146 list_lru_destroy(&pool->list_lru);
1147
1148 spin_lock(&zswap_pools_lock);
1149 mem_cgroup_iter_break(NULL, pool->next_shrink);
1150 pool->next_shrink = NULL;
1151 spin_unlock(&zswap_pools_lock);
1152
1153 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
1154 zpool_destroy_pool(pool->zpools[i]);
1155 kfree(pool);
1156}
1157
1158static int __must_check zswap_pool_get(struct zswap_pool *pool)
1159{
1160 if (!pool)
1161 return 0;
1162
1163 return kref_get_unless_zero(&pool->kref);
1164}
1165
1166static void __zswap_pool_release(struct work_struct *work)
1167{
1168 struct zswap_pool *pool = container_of(work, typeof(*pool),
1169 release_work);
1170
1171 synchronize_rcu();
1172
1173 /* nobody should have been able to get a kref... */
1174 WARN_ON(kref_get_unless_zero(&pool->kref));
1175
1176 /* pool is now off zswap_pools list and has no references. */
1177 zswap_pool_destroy(pool);
1178}
1179
1180static void __zswap_pool_empty(struct kref *kref)
1181{
1182 struct zswap_pool *pool;
1183
1184 pool = container_of(kref, typeof(*pool), kref);
1185
1186 spin_lock(&zswap_pools_lock);
1187
1188 WARN_ON(pool == zswap_pool_current());
1189
1190 list_del_rcu(&pool->list);
1191
1192 INIT_WORK(&pool->release_work, __zswap_pool_release);
1193 schedule_work(&pool->release_work);
1194
1195 spin_unlock(&zswap_pools_lock);
1196}
1197
1198static void zswap_pool_put(struct zswap_pool *pool)
1199{
1200 kref_put(&pool->kref, __zswap_pool_empty);
1201}
1202
1203/*********************************
1204* param callbacks
1205**********************************/
1206
1207static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
1208{
1209 /* no change required */
1210 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
1211 return false;
1212 return true;
1213}
1214
1215/* val must be a null-terminated string */
1216static int __zswap_param_set(const char *val, const struct kernel_param *kp,
1217 char *type, char *compressor)
1218{
1219 struct zswap_pool *pool, *put_pool = NULL;
1220 char *s = strstrip((char *)val);
1221 int ret = 0;
1222 bool new_pool = false;
1223
1224 mutex_lock(&zswap_init_lock);
1225 switch (zswap_init_state) {
1226 case ZSWAP_UNINIT:
1227 /* if this is load-time (pre-init) param setting,
1228 * don't create a pool; that's done during init.
1229 */
1230 ret = param_set_charp(s, kp);
1231 break;
1232 case ZSWAP_INIT_SUCCEED:
1233 new_pool = zswap_pool_changed(s, kp);
1234 break;
1235 case ZSWAP_INIT_FAILED:
1236 pr_err("can't set param, initialization failed\n");
1237 ret = -ENODEV;
1238 }
1239 mutex_unlock(&zswap_init_lock);
1240
1241 /* no need to create a new pool, return directly */
1242 if (!new_pool)
1243 return ret;
1244
1245 if (!type) {
1246 if (!zpool_has_pool(s)) {
1247 pr_err("zpool %s not available\n", s);
1248 return -ENOENT;
1249 }
1250 type = s;
1251 } else if (!compressor) {
1252 if (!crypto_has_acomp(s, 0, 0)) {
1253 pr_err("compressor %s not available\n", s);
1254 return -ENOENT;
1255 }
1256 compressor = s;
1257 } else {
1258 WARN_ON(1);
1259 return -EINVAL;
1260 }
1261
1262 spin_lock(&zswap_pools_lock);
1263
1264 pool = zswap_pool_find_get(type, compressor);
1265 if (pool) {
1266 zswap_pool_debug("using existing", pool);
1267 WARN_ON(pool == zswap_pool_current());
1268 list_del_rcu(&pool->list);
1269 }
1270
1271 spin_unlock(&zswap_pools_lock);
1272
1273 if (!pool)
1274 pool = zswap_pool_create(type, compressor);
1275
1276 if (pool)
1277 ret = param_set_charp(s, kp);
1278 else
1279 ret = -EINVAL;
1280
1281 spin_lock(&zswap_pools_lock);
1282
1283 if (!ret) {
1284 put_pool = zswap_pool_current();
1285 list_add_rcu(&pool->list, &zswap_pools);
1286 zswap_has_pool = true;
1287 } else if (pool) {
1288 /* add the possibly pre-existing pool to the end of the pools
1289 * list; if it's new (and empty) then it'll be removed and
1290 * destroyed by the put after we drop the lock
1291 */
1292 list_add_tail_rcu(&pool->list, &zswap_pools);
1293 put_pool = pool;
1294 }
1295
1296 spin_unlock(&zswap_pools_lock);
1297
1298 if (!zswap_has_pool && !pool) {
1299 /* if initial pool creation failed, and this pool creation also
1300 * failed, maybe both compressor and zpool params were bad.
1301 * Allow changing this param, so pool creation will succeed
1302 * when the other param is changed. We already verified this
1303 * param is ok in the zpool_has_pool() or crypto_has_acomp()
1304 * checks above.
1305 */
1306 ret = param_set_charp(s, kp);
1307 }
1308
1309 /* drop the ref from either the old current pool,
1310 * or the new pool we failed to add
1311 */
1312 if (put_pool)
1313 zswap_pool_put(put_pool);
1314
1315 return ret;
1316}
1317
1318static int zswap_compressor_param_set(const char *val,
1319 const struct kernel_param *kp)
1320{
1321 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
1322}
1323
1324static int zswap_zpool_param_set(const char *val,
1325 const struct kernel_param *kp)
1326{
1327 return __zswap_param_set(val, kp, NULL, zswap_compressor);
1328}
1329
1330static int zswap_enabled_param_set(const char *val,
1331 const struct kernel_param *kp)
1332{
1333 int ret = -ENODEV;
1334
1335 /* if this is load-time (pre-init) param setting, only set param. */
1336 if (system_state != SYSTEM_RUNNING)
1337 return param_set_bool(val, kp);
1338
1339 mutex_lock(&zswap_init_lock);
1340 switch (zswap_init_state) {
1341 case ZSWAP_UNINIT:
1342 if (zswap_setup())
1343 break;
1344 fallthrough;
1345 case ZSWAP_INIT_SUCCEED:
1346 if (!zswap_has_pool)
1347 pr_err("can't enable, no pool configured\n");
1348 else
1349 ret = param_set_bool(val, kp);
1350 break;
1351 case ZSWAP_INIT_FAILED:
1352 pr_err("can't enable, initialization failed\n");
1353 }
1354 mutex_unlock(&zswap_init_lock);
1355
1356 return ret;
1357}
1358
1359static void __zswap_load(struct zswap_entry *entry, struct page *page)
1360{
1361 struct zpool *zpool = zswap_find_zpool(entry);
1362 struct scatterlist input, output;
1363 struct crypto_acomp_ctx *acomp_ctx;
1364 u8 *src;
1365
1366 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1367 mutex_lock(&acomp_ctx->mutex);
1368
1369 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1370 if (!zpool_can_sleep_mapped(zpool)) {
1371 memcpy(acomp_ctx->buffer, src, entry->length);
1372 src = acomp_ctx->buffer;
1373 zpool_unmap_handle(zpool, entry->handle);
1374 }
1375
1376 sg_init_one(&input, src, entry->length);
1377 sg_init_table(&output, 1);
1378 sg_set_page(&output, page, PAGE_SIZE, 0);
1379 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1380 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1381 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1382 mutex_unlock(&acomp_ctx->mutex);
1383
1384 if (zpool_can_sleep_mapped(zpool))
1385 zpool_unmap_handle(zpool, entry->handle);
1386}
1387
1388/*********************************
1389* writeback code
1390**********************************/
1391/*
1392 * Attempts to free an entry by adding a folio to the swap cache,
1393 * decompressing the entry data into the folio, and issuing a
1394 * bio write to write the folio back to the swap device.
1395 *
1396 * This can be thought of as a "resumed writeback" of the folio
1397 * to the swap device. We are basically resuming the same swap
1398 * writeback path that was intercepted with the zswap_store()
1399 * in the first place. After the folio has been decompressed into
1400 * the swap cache, the compressed version stored by zswap can be
1401 * freed.
1402 */
1403static int zswap_writeback_entry(struct zswap_entry *entry,
1404 struct zswap_tree *tree)
1405{
1406 swp_entry_t swpentry = entry->swpentry;
1407 struct folio *folio;
1408 struct mempolicy *mpol;
1409 bool folio_was_allocated;
1410 struct writeback_control wbc = {
1411 .sync_mode = WB_SYNC_NONE,
1412 };
1413
1414 /* try to allocate swap cache folio */
1415 mpol = get_task_policy(current);
1416 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1417 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1418 if (!folio)
1419 return -ENOMEM;
1420
1421 /*
1422 * Found an existing folio, we raced with load/swapin. We generally
1423 * writeback cold folios from zswap, and swapin means the folio just
1424 * became hot. Skip this folio and let the caller find another one.
1425 */
1426 if (!folio_was_allocated) {
1427 folio_put(folio);
1428 return -EEXIST;
1429 }
1430
1431 /*
1432 * folio is locked, and the swapcache is now secured against
1433 * concurrent swapping to and from the slot. Verify that the
1434 * swap entry hasn't been invalidated and recycled behind our
1435 * backs (our zswap_entry reference doesn't prevent that), to
1436 * avoid overwriting a new swap folio with old compressed data.
1437 */
1438 spin_lock(&tree->lock);
1439 if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
1440 spin_unlock(&tree->lock);
1441 delete_from_swap_cache(folio);
1442 folio_unlock(folio);
1443 folio_put(folio);
1444 return -ENOMEM;
1445 }
1446 spin_unlock(&tree->lock);
1447
1448 __zswap_load(entry, &folio->page);
1449
1450 /* folio is up to date */
1451 folio_mark_uptodate(folio);
1452
1453 /* move it to the tail of the inactive list after end_writeback */
1454 folio_set_reclaim(folio);
1455
1456 /* start writeback */
1457 __swap_writepage(folio, &wbc);
1458 folio_put(folio);
1459
1460 return 0;
1461}
1462
1463static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1464{
1465 unsigned long *page;
1466 unsigned long val;
1467 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1468
1469 page = (unsigned long *)ptr;
1470 val = page[0];
1471
1472 if (val != page[last_pos])
1473 return 0;
1474
1475 for (pos = 1; pos < last_pos; pos++) {
1476 if (val != page[pos])
1477 return 0;
1478 }
1479
1480 *value = val;
1481
1482 return 1;
1483}
1484
1485static void zswap_fill_page(void *ptr, unsigned long value)
1486{
1487 unsigned long *page;
1488
1489 page = (unsigned long *)ptr;
1490 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1491}
1492
1493bool zswap_store(struct folio *folio)
1494{
1495 swp_entry_t swp = folio->swap;
1496 int type = swp_type(swp);
1497 pgoff_t offset = swp_offset(swp);
1498 struct page *page = &folio->page;
1499 struct zswap_tree *tree = zswap_trees[type];
1500 struct zswap_entry *entry, *dupentry;
1501 struct scatterlist input, output;
1502 struct crypto_acomp_ctx *acomp_ctx;
1503 struct obj_cgroup *objcg = NULL;
1504 struct mem_cgroup *memcg = NULL;
1505 struct zswap_pool *pool;
1506 struct zpool *zpool;
1507 unsigned int dlen = PAGE_SIZE;
1508 unsigned long handle, value;
1509 char *buf;
1510 u8 *src, *dst;
1511 gfp_t gfp;
1512 int ret;
1513
1514 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1515 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1516
1517 /* Large folios aren't supported */
1518 if (folio_test_large(folio))
1519 return false;
1520
1521 if (!tree)
1522 return false;
1523
1524 /*
1525 * If this is a duplicate, it must be removed before attempting to store
1526 * it, otherwise, if the store fails the old page won't be removed from
1527 * the tree, and it might be written back overriding the new data.
1528 */
1529 spin_lock(&tree->lock);
1530 dupentry = zswap_rb_search(&tree->rbroot, offset);
1531 if (dupentry) {
1532 zswap_duplicate_entry++;
1533 zswap_invalidate_entry(tree, dupentry);
1534 }
1535 spin_unlock(&tree->lock);
1536
1537 if (!zswap_enabled)
1538 return false;
1539
1540 objcg = get_obj_cgroup_from_folio(folio);
1541 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1542 memcg = get_mem_cgroup_from_objcg(objcg);
1543 if (shrink_memcg(memcg)) {
1544 mem_cgroup_put(memcg);
1545 goto reject;
1546 }
1547 mem_cgroup_put(memcg);
1548 }
1549
1550 /* reclaim space if needed */
1551 if (zswap_is_full()) {
1552 zswap_pool_limit_hit++;
1553 zswap_pool_reached_full = true;
1554 goto shrink;
1555 }
1556
1557 if (zswap_pool_reached_full) {
1558 if (!zswap_can_accept())
1559 goto shrink;
1560 else
1561 zswap_pool_reached_full = false;
1562 }
1563
1564 /* allocate entry */
1565 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
1566 if (!entry) {
1567 zswap_reject_kmemcache_fail++;
1568 goto reject;
1569 }
1570
1571 if (zswap_same_filled_pages_enabled) {
1572 src = kmap_local_page(page);
1573 if (zswap_is_page_same_filled(src, &value)) {
1574 kunmap_local(src);
1575 entry->swpentry = swp_entry(type, offset);
1576 entry->length = 0;
1577 entry->value = value;
1578 atomic_inc(&zswap_same_filled_pages);
1579 goto insert_entry;
1580 }
1581 kunmap_local(src);
1582 }
1583
1584 if (!zswap_non_same_filled_pages_enabled)
1585 goto freepage;
1586
1587 /* if entry is successfully added, it keeps the reference */
1588 entry->pool = zswap_pool_current_get();
1589 if (!entry->pool)
1590 goto freepage;
1591
1592 if (objcg) {
1593 memcg = get_mem_cgroup_from_objcg(objcg);
1594 if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
1595 mem_cgroup_put(memcg);
1596 goto put_pool;
1597 }
1598 mem_cgroup_put(memcg);
1599 }
1600
1601 /* compress */
1602 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1603
1604 mutex_lock(&acomp_ctx->mutex);
1605
1606 dst = acomp_ctx->buffer;
1607 sg_init_table(&input, 1);
1608 sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
1609
1610 /*
1611 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1612 * and hardware-accelerators may won't check the dst buffer size, so
1613 * giving the dst buffer with enough length to avoid buffer overflow.
1614 */
1615 sg_init_one(&output, dst, PAGE_SIZE * 2);
1616 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1617 /*
1618 * it maybe looks a little bit silly that we send an asynchronous request,
1619 * then wait for its completion synchronously. This makes the process look
1620 * synchronous in fact.
1621 * Theoretically, acomp supports users send multiple acomp requests in one
1622 * acomp instance, then get those requests done simultaneously. but in this
1623 * case, zswap actually does store and load page by page, there is no
1624 * existing method to send the second page before the first page is done
1625 * in one thread doing zwap.
1626 * but in different threads running on different cpu, we have different
1627 * acomp instance, so multiple threads can do (de)compression in parallel.
1628 */
1629 ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1630 dlen = acomp_ctx->req->dlen;
1631
1632 if (ret) {
1633 zswap_reject_compress_fail++;
1634 goto put_dstmem;
1635 }
1636
1637 /* store */
1638 zpool = zswap_find_zpool(entry);
1639 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1640 if (zpool_malloc_support_movable(zpool))
1641 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1642 ret = zpool_malloc(zpool, dlen, gfp, &handle);
1643 if (ret == -ENOSPC) {
1644 zswap_reject_compress_poor++;
1645 goto put_dstmem;
1646 }
1647 if (ret) {
1648 zswap_reject_alloc_fail++;
1649 goto put_dstmem;
1650 }
1651 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
1652 memcpy(buf, dst, dlen);
1653 zpool_unmap_handle(zpool, handle);
1654 mutex_unlock(&acomp_ctx->mutex);
1655
1656 /* populate entry */
1657 entry->swpentry = swp_entry(type, offset);
1658 entry->handle = handle;
1659 entry->length = dlen;
1660
1661insert_entry:
1662 entry->objcg = objcg;
1663 if (objcg) {
1664 obj_cgroup_charge_zswap(objcg, entry->length);
1665 /* Account before objcg ref is moved to tree */
1666 count_objcg_event(objcg, ZSWPOUT);
1667 }
1668
1669 /* map */
1670 spin_lock(&tree->lock);
1671 /*
1672 * A duplicate entry should have been removed at the beginning of this
1673 * function. Since the swap entry should be pinned, if a duplicate is
1674 * found again here it means that something went wrong in the swap
1675 * cache.
1676 */
1677 while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
1678 WARN_ON(1);
1679 zswap_duplicate_entry++;
1680 zswap_invalidate_entry(tree, dupentry);
1681 }
1682 if (entry->length) {
1683 INIT_LIST_HEAD(&entry->lru);
1684 zswap_lru_add(&entry->pool->list_lru, entry);
1685 atomic_inc(&entry->pool->nr_stored);
1686 }
1687 spin_unlock(&tree->lock);
1688
1689 /* update stats */
1690 atomic_inc(&zswap_stored_pages);
1691 zswap_update_total_size();
1692 count_vm_event(ZSWPOUT);
1693
1694 return true;
1695
1696put_dstmem:
1697 mutex_unlock(&acomp_ctx->mutex);
1698put_pool:
1699 zswap_pool_put(entry->pool);
1700freepage:
1701 zswap_entry_cache_free(entry);
1702reject:
1703 if (objcg)
1704 obj_cgroup_put(objcg);
1705 return false;
1706
1707shrink:
1708 pool = zswap_pool_last_get();
1709 if (pool && !queue_work(shrink_wq, &pool->shrink_work))
1710 zswap_pool_put(pool);
1711 goto reject;
1712}
1713
1714bool zswap_load(struct folio *folio)
1715{
1716 swp_entry_t swp = folio->swap;
1717 int type = swp_type(swp);
1718 pgoff_t offset = swp_offset(swp);
1719 struct page *page = &folio->page;
1720 struct zswap_tree *tree = zswap_trees[type];
1721 struct zswap_entry *entry;
1722 u8 *dst;
1723
1724 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1725
1726 /* find */
1727 spin_lock(&tree->lock);
1728 entry = zswap_entry_find_get(&tree->rbroot, offset);
1729 if (!entry) {
1730 spin_unlock(&tree->lock);
1731 return false;
1732 }
1733 spin_unlock(&tree->lock);
1734
1735 if (entry->length)
1736 __zswap_load(entry, page);
1737 else {
1738 dst = kmap_local_page(page);
1739 zswap_fill_page(dst, entry->value);
1740 kunmap_local(dst);
1741 }
1742
1743 count_vm_event(ZSWPIN);
1744 if (entry->objcg)
1745 count_objcg_event(entry->objcg, ZSWPIN);
1746
1747 spin_lock(&tree->lock);
1748 if (zswap_exclusive_loads_enabled) {
1749 zswap_invalidate_entry(tree, entry);
1750 folio_mark_dirty(folio);
1751 } else if (entry->length) {
1752 zswap_lru_del(&entry->pool->list_lru, entry);
1753 zswap_lru_add(&entry->pool->list_lru, entry);
1754 }
1755 zswap_entry_put(tree, entry);
1756 spin_unlock(&tree->lock);
1757
1758 return true;
1759}
1760
1761void zswap_invalidate(int type, pgoff_t offset)
1762{
1763 struct zswap_tree *tree = zswap_trees[type];
1764 struct zswap_entry *entry;
1765
1766 /* find */
1767 spin_lock(&tree->lock);
1768 entry = zswap_rb_search(&tree->rbroot, offset);
1769 if (!entry) {
1770 /* entry was written back */
1771 spin_unlock(&tree->lock);
1772 return;
1773 }
1774 zswap_invalidate_entry(tree, entry);
1775 spin_unlock(&tree->lock);
1776}
1777
1778void zswap_swapon(int type)
1779{
1780 struct zswap_tree *tree;
1781
1782 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1783 if (!tree) {
1784 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1785 return;
1786 }
1787
1788 tree->rbroot = RB_ROOT;
1789 spin_lock_init(&tree->lock);
1790 zswap_trees[type] = tree;
1791}
1792
1793void zswap_swapoff(int type)
1794{
1795 struct zswap_tree *tree = zswap_trees[type];
1796 struct zswap_entry *entry, *n;
1797
1798 if (!tree)
1799 return;
1800
1801 /* walk the tree and free everything */
1802 spin_lock(&tree->lock);
1803 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1804 zswap_free_entry(entry);
1805 tree->rbroot = RB_ROOT;
1806 spin_unlock(&tree->lock);
1807 kfree(tree);
1808 zswap_trees[type] = NULL;
1809}
1810
1811/*********************************
1812* debugfs functions
1813**********************************/
1814#ifdef CONFIG_DEBUG_FS
1815#include <linux/debugfs.h>
1816
1817static struct dentry *zswap_debugfs_root;
1818
1819static int zswap_debugfs_init(void)
1820{
1821 if (!debugfs_initialized())
1822 return -ENODEV;
1823
1824 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1825
1826 debugfs_create_u64("pool_limit_hit", 0444,
1827 zswap_debugfs_root, &zswap_pool_limit_hit);
1828 debugfs_create_u64("reject_reclaim_fail", 0444,
1829 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1830 debugfs_create_u64("reject_alloc_fail", 0444,
1831 zswap_debugfs_root, &zswap_reject_alloc_fail);
1832 debugfs_create_u64("reject_kmemcache_fail", 0444,
1833 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1834 debugfs_create_u64("reject_compress_fail", 0444,
1835 zswap_debugfs_root, &zswap_reject_compress_fail);
1836 debugfs_create_u64("reject_compress_poor", 0444,
1837 zswap_debugfs_root, &zswap_reject_compress_poor);
1838 debugfs_create_u64("written_back_pages", 0444,
1839 zswap_debugfs_root, &zswap_written_back_pages);
1840 debugfs_create_u64("duplicate_entry", 0444,
1841 zswap_debugfs_root, &zswap_duplicate_entry);
1842 debugfs_create_u64("pool_total_size", 0444,
1843 zswap_debugfs_root, &zswap_pool_total_size);
1844 debugfs_create_atomic_t("stored_pages", 0444,
1845 zswap_debugfs_root, &zswap_stored_pages);
1846 debugfs_create_atomic_t("same_filled_pages", 0444,
1847 zswap_debugfs_root, &zswap_same_filled_pages);
1848
1849 return 0;
1850}
1851#else
1852static int zswap_debugfs_init(void)
1853{
1854 return 0;
1855}
1856#endif
1857
1858/*********************************
1859* module init and exit
1860**********************************/
1861static int zswap_setup(void)
1862{
1863 struct zswap_pool *pool;
1864 int ret;
1865
1866 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1867 if (!zswap_entry_cache) {
1868 pr_err("entry cache creation failed\n");
1869 goto cache_fail;
1870 }
1871
1872 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1873 "mm/zswap_pool:prepare",
1874 zswap_cpu_comp_prepare,
1875 zswap_cpu_comp_dead);
1876 if (ret)
1877 goto hp_fail;
1878
1879 pool = __zswap_pool_create_fallback();
1880 if (pool) {
1881 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1882 zpool_get_type(pool->zpools[0]));
1883 list_add(&pool->list, &zswap_pools);
1884 zswap_has_pool = true;
1885 } else {
1886 pr_err("pool creation failed\n");
1887 zswap_enabled = false;
1888 }
1889
1890 shrink_wq = create_workqueue("zswap-shrink");
1891 if (!shrink_wq)
1892 goto fallback_fail;
1893
1894 if (zswap_debugfs_init())
1895 pr_warn("debugfs initialization failed\n");
1896 zswap_init_state = ZSWAP_INIT_SUCCEED;
1897 return 0;
1898
1899fallback_fail:
1900 if (pool)
1901 zswap_pool_destroy(pool);
1902hp_fail:
1903 kmem_cache_destroy(zswap_entry_cache);
1904cache_fail:
1905 /* if built-in, we aren't unloaded on failure; don't allow use */
1906 zswap_init_state = ZSWAP_INIT_FAILED;
1907 zswap_enabled = false;
1908 return -ENOMEM;
1909}
1910
1911static int __init zswap_init(void)
1912{
1913 if (!zswap_enabled)
1914 return 0;
1915 return zswap_setup();
1916}
1917/* must be late so crypto has time to come up */
1918late_initcall(zswap_init);
1919
1920MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1921MODULE_DESCRIPTION("Compressed cache for swap pages");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * zswap.c - zswap driver file
4 *
5 * zswap is a cache that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
12*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
23#include <linux/swap.h>
24#include <linux/crypto.h>
25#include <linux/scatterlist.h>
26#include <linux/mempolicy.h>
27#include <linux/mempool.h>
28#include <linux/zpool.h>
29#include <crypto/acompress.h>
30#include <linux/zswap.h>
31#include <linux/mm_types.h>
32#include <linux/page-flags.h>
33#include <linux/swapops.h>
34#include <linux/writeback.h>
35#include <linux/pagemap.h>
36#include <linux/workqueue.h>
37#include <linux/list_lru.h>
38
39#include "swap.h"
40#include "internal.h"
41
42/*********************************
43* statistics
44**********************************/
45/* The number of compressed pages currently stored in zswap */
46atomic_long_t zswap_stored_pages = ATOMIC_INIT(0);
47
48/*
49 * The statistics below are not protected from concurrent access for
50 * performance reasons so they may not be a 100% accurate. However,
51 * they do provide useful information on roughly how many times a
52 * certain event is occurring.
53*/
54
55/* Pool limit was hit (see zswap_max_pool_percent) */
56static u64 zswap_pool_limit_hit;
57/* Pages written back when pool limit was reached */
58static u64 zswap_written_back_pages;
59/* Store failed due to a reclaim failure after pool limit was reached */
60static u64 zswap_reject_reclaim_fail;
61/* Store failed due to compression algorithm failure */
62static u64 zswap_reject_compress_fail;
63/* Compressed page was too big for the allocator to (optimally) store */
64static u64 zswap_reject_compress_poor;
65/* Store failed because underlying allocator could not get memory */
66static u64 zswap_reject_alloc_fail;
67/* Store failed because the entry metadata could not be allocated (rare) */
68static u64 zswap_reject_kmemcache_fail;
69
70/* Shrinker work queue */
71static struct workqueue_struct *shrink_wq;
72/* Pool limit was hit, we need to calm down */
73static bool zswap_pool_reached_full;
74
75/*********************************
76* tunables
77**********************************/
78
79#define ZSWAP_PARAM_UNSET ""
80
81static int zswap_setup(void);
82
83/* Enable/disable zswap */
84static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
85static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
86static int zswap_enabled_param_set(const char *,
87 const struct kernel_param *);
88static const struct kernel_param_ops zswap_enabled_param_ops = {
89 .set = zswap_enabled_param_set,
90 .get = param_get_bool,
91};
92module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
93
94/* Crypto compressor to use */
95static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
96static int zswap_compressor_param_set(const char *,
97 const struct kernel_param *);
98static const struct kernel_param_ops zswap_compressor_param_ops = {
99 .set = zswap_compressor_param_set,
100 .get = param_get_charp,
101 .free = param_free_charp,
102};
103module_param_cb(compressor, &zswap_compressor_param_ops,
104 &zswap_compressor, 0644);
105
106/* Compressed storage zpool to use */
107static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
108static int zswap_zpool_param_set(const char *, const struct kernel_param *);
109static const struct kernel_param_ops zswap_zpool_param_ops = {
110 .set = zswap_zpool_param_set,
111 .get = param_get_charp,
112 .free = param_free_charp,
113};
114module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
115
116/* The maximum percentage of memory that the compressed pool can occupy */
117static unsigned int zswap_max_pool_percent = 20;
118module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
119
120/* The threshold for accepting new pages after the max_pool_percent was hit */
121static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
122module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
123 uint, 0644);
124
125/* Enable/disable memory pressure-based shrinker. */
126static bool zswap_shrinker_enabled = IS_ENABLED(
127 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
128module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
129
130bool zswap_is_enabled(void)
131{
132 return zswap_enabled;
133}
134
135bool zswap_never_enabled(void)
136{
137 return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
138}
139
140/*********************************
141* data structures
142**********************************/
143
144struct crypto_acomp_ctx {
145 struct crypto_acomp *acomp;
146 struct acomp_req *req;
147 struct crypto_wait wait;
148 u8 *buffer;
149 struct mutex mutex;
150 bool is_sleepable;
151};
152
153/*
154 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
155 * The only case where lru_lock is not acquired while holding tree.lock is
156 * when a zswap_entry is taken off the lru for writeback, in that case it
157 * needs to be verified that it's still valid in the tree.
158 */
159struct zswap_pool {
160 struct zpool *zpool;
161 struct crypto_acomp_ctx __percpu *acomp_ctx;
162 struct percpu_ref ref;
163 struct list_head list;
164 struct work_struct release_work;
165 struct hlist_node node;
166 char tfm_name[CRYPTO_MAX_ALG_NAME];
167};
168
169/* Global LRU lists shared by all zswap pools. */
170static struct list_lru zswap_list_lru;
171
172/* The lock protects zswap_next_shrink updates. */
173static DEFINE_SPINLOCK(zswap_shrink_lock);
174static struct mem_cgroup *zswap_next_shrink;
175static struct work_struct zswap_shrink_work;
176static struct shrinker *zswap_shrinker;
177
178/*
179 * struct zswap_entry
180 *
181 * This structure contains the metadata for tracking a single compressed
182 * page within zswap.
183 *
184 * swpentry - associated swap entry, the offset indexes into the red-black tree
185 * length - the length in bytes of the compressed page data. Needed during
186 * decompression.
187 * referenced - true if the entry recently entered the zswap pool. Unset by the
188 * writeback logic. The entry is only reclaimed by the writeback
189 * logic if referenced is unset. See comments in the shrinker
190 * section for context.
191 * pool - the zswap_pool the entry's data is in
192 * handle - zpool allocation handle that stores the compressed page data
193 * objcg - the obj_cgroup that the compressed memory is charged to
194 * lru - handle to the pool's lru used to evict pages.
195 */
196struct zswap_entry {
197 swp_entry_t swpentry;
198 unsigned int length;
199 bool referenced;
200 struct zswap_pool *pool;
201 unsigned long handle;
202 struct obj_cgroup *objcg;
203 struct list_head lru;
204};
205
206static struct xarray *zswap_trees[MAX_SWAPFILES];
207static unsigned int nr_zswap_trees[MAX_SWAPFILES];
208
209/* RCU-protected iteration */
210static LIST_HEAD(zswap_pools);
211/* protects zswap_pools list modification */
212static DEFINE_SPINLOCK(zswap_pools_lock);
213/* pool counter to provide unique names to zpool */
214static atomic_t zswap_pools_count = ATOMIC_INIT(0);
215
216enum zswap_init_type {
217 ZSWAP_UNINIT,
218 ZSWAP_INIT_SUCCEED,
219 ZSWAP_INIT_FAILED
220};
221
222static enum zswap_init_type zswap_init_state;
223
224/* used to ensure the integrity of initialization */
225static DEFINE_MUTEX(zswap_init_lock);
226
227/* init completed, but couldn't create the initial pool */
228static bool zswap_has_pool;
229
230/*********************************
231* helpers and fwd declarations
232**********************************/
233
234static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
235{
236 return &zswap_trees[swp_type(swp)][swp_offset(swp)
237 >> SWAP_ADDRESS_SPACE_SHIFT];
238}
239
240#define zswap_pool_debug(msg, p) \
241 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
242 zpool_get_type((p)->zpool))
243
244/*********************************
245* pool functions
246**********************************/
247static void __zswap_pool_empty(struct percpu_ref *ref);
248
249static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
250{
251 struct zswap_pool *pool;
252 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
253 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
254 int ret, cpu;
255
256 if (!zswap_has_pool) {
257 /* if either are unset, pool initialization failed, and we
258 * need both params to be set correctly before trying to
259 * create a pool.
260 */
261 if (!strcmp(type, ZSWAP_PARAM_UNSET))
262 return NULL;
263 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
264 return NULL;
265 }
266
267 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
268 if (!pool)
269 return NULL;
270
271 /* unique name for each pool specifically required by zsmalloc */
272 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
273 pool->zpool = zpool_create_pool(type, name, gfp);
274 if (!pool->zpool) {
275 pr_err("%s zpool not available\n", type);
276 goto error;
277 }
278 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
279
280 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
281
282 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
283 if (!pool->acomp_ctx) {
284 pr_err("percpu alloc failed\n");
285 goto error;
286 }
287
288 for_each_possible_cpu(cpu)
289 mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex);
290
291 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
292 &pool->node);
293 if (ret)
294 goto error;
295
296 /* being the current pool takes 1 ref; this func expects the
297 * caller to always add the new pool as the current pool
298 */
299 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
300 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
301 if (ret)
302 goto ref_fail;
303 INIT_LIST_HEAD(&pool->list);
304
305 zswap_pool_debug("created", pool);
306
307 return pool;
308
309ref_fail:
310 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
311error:
312 if (pool->acomp_ctx)
313 free_percpu(pool->acomp_ctx);
314 if (pool->zpool)
315 zpool_destroy_pool(pool->zpool);
316 kfree(pool);
317 return NULL;
318}
319
320static struct zswap_pool *__zswap_pool_create_fallback(void)
321{
322 bool has_comp, has_zpool;
323
324 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
325 if (!has_comp && strcmp(zswap_compressor,
326 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
327 pr_err("compressor %s not available, using default %s\n",
328 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
329 param_free_charp(&zswap_compressor);
330 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
331 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
332 }
333 if (!has_comp) {
334 pr_err("default compressor %s not available\n",
335 zswap_compressor);
336 param_free_charp(&zswap_compressor);
337 zswap_compressor = ZSWAP_PARAM_UNSET;
338 }
339
340 has_zpool = zpool_has_pool(zswap_zpool_type);
341 if (!has_zpool && strcmp(zswap_zpool_type,
342 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
343 pr_err("zpool %s not available, using default %s\n",
344 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
345 param_free_charp(&zswap_zpool_type);
346 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
347 has_zpool = zpool_has_pool(zswap_zpool_type);
348 }
349 if (!has_zpool) {
350 pr_err("default zpool %s not available\n",
351 zswap_zpool_type);
352 param_free_charp(&zswap_zpool_type);
353 zswap_zpool_type = ZSWAP_PARAM_UNSET;
354 }
355
356 if (!has_comp || !has_zpool)
357 return NULL;
358
359 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
360}
361
362static void zswap_pool_destroy(struct zswap_pool *pool)
363{
364 zswap_pool_debug("destroying", pool);
365
366 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
367 free_percpu(pool->acomp_ctx);
368
369 zpool_destroy_pool(pool->zpool);
370 kfree(pool);
371}
372
373static void __zswap_pool_release(struct work_struct *work)
374{
375 struct zswap_pool *pool = container_of(work, typeof(*pool),
376 release_work);
377
378 synchronize_rcu();
379
380 /* nobody should have been able to get a ref... */
381 WARN_ON(!percpu_ref_is_zero(&pool->ref));
382 percpu_ref_exit(&pool->ref);
383
384 /* pool is now off zswap_pools list and has no references. */
385 zswap_pool_destroy(pool);
386}
387
388static struct zswap_pool *zswap_pool_current(void);
389
390static void __zswap_pool_empty(struct percpu_ref *ref)
391{
392 struct zswap_pool *pool;
393
394 pool = container_of(ref, typeof(*pool), ref);
395
396 spin_lock_bh(&zswap_pools_lock);
397
398 WARN_ON(pool == zswap_pool_current());
399
400 list_del_rcu(&pool->list);
401
402 INIT_WORK(&pool->release_work, __zswap_pool_release);
403 schedule_work(&pool->release_work);
404
405 spin_unlock_bh(&zswap_pools_lock);
406}
407
408static int __must_check zswap_pool_tryget(struct zswap_pool *pool)
409{
410 if (!pool)
411 return 0;
412
413 return percpu_ref_tryget(&pool->ref);
414}
415
416/* The caller must already have a reference. */
417static void zswap_pool_get(struct zswap_pool *pool)
418{
419 percpu_ref_get(&pool->ref);
420}
421
422static void zswap_pool_put(struct zswap_pool *pool)
423{
424 percpu_ref_put(&pool->ref);
425}
426
427static struct zswap_pool *__zswap_pool_current(void)
428{
429 struct zswap_pool *pool;
430
431 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
432 WARN_ONCE(!pool && zswap_has_pool,
433 "%s: no page storage pool!\n", __func__);
434
435 return pool;
436}
437
438static struct zswap_pool *zswap_pool_current(void)
439{
440 assert_spin_locked(&zswap_pools_lock);
441
442 return __zswap_pool_current();
443}
444
445static struct zswap_pool *zswap_pool_current_get(void)
446{
447 struct zswap_pool *pool;
448
449 rcu_read_lock();
450
451 pool = __zswap_pool_current();
452 if (!zswap_pool_tryget(pool))
453 pool = NULL;
454
455 rcu_read_unlock();
456
457 return pool;
458}
459
460/* type and compressor must be null-terminated */
461static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
462{
463 struct zswap_pool *pool;
464
465 assert_spin_locked(&zswap_pools_lock);
466
467 list_for_each_entry_rcu(pool, &zswap_pools, list) {
468 if (strcmp(pool->tfm_name, compressor))
469 continue;
470 if (strcmp(zpool_get_type(pool->zpool), type))
471 continue;
472 /* if we can't get it, it's about to be destroyed */
473 if (!zswap_pool_tryget(pool))
474 continue;
475 return pool;
476 }
477
478 return NULL;
479}
480
481static unsigned long zswap_max_pages(void)
482{
483 return totalram_pages() * zswap_max_pool_percent / 100;
484}
485
486static unsigned long zswap_accept_thr_pages(void)
487{
488 return zswap_max_pages() * zswap_accept_thr_percent / 100;
489}
490
491unsigned long zswap_total_pages(void)
492{
493 struct zswap_pool *pool;
494 unsigned long total = 0;
495
496 rcu_read_lock();
497 list_for_each_entry_rcu(pool, &zswap_pools, list)
498 total += zpool_get_total_pages(pool->zpool);
499 rcu_read_unlock();
500
501 return total;
502}
503
504static bool zswap_check_limits(void)
505{
506 unsigned long cur_pages = zswap_total_pages();
507 unsigned long max_pages = zswap_max_pages();
508
509 if (cur_pages >= max_pages) {
510 zswap_pool_limit_hit++;
511 zswap_pool_reached_full = true;
512 } else if (zswap_pool_reached_full &&
513 cur_pages <= zswap_accept_thr_pages()) {
514 zswap_pool_reached_full = false;
515 }
516 return zswap_pool_reached_full;
517}
518
519/*********************************
520* param callbacks
521**********************************/
522
523static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
524{
525 /* no change required */
526 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
527 return false;
528 return true;
529}
530
531/* val must be a null-terminated string */
532static int __zswap_param_set(const char *val, const struct kernel_param *kp,
533 char *type, char *compressor)
534{
535 struct zswap_pool *pool, *put_pool = NULL;
536 char *s = strstrip((char *)val);
537 int ret = 0;
538 bool new_pool = false;
539
540 mutex_lock(&zswap_init_lock);
541 switch (zswap_init_state) {
542 case ZSWAP_UNINIT:
543 /* if this is load-time (pre-init) param setting,
544 * don't create a pool; that's done during init.
545 */
546 ret = param_set_charp(s, kp);
547 break;
548 case ZSWAP_INIT_SUCCEED:
549 new_pool = zswap_pool_changed(s, kp);
550 break;
551 case ZSWAP_INIT_FAILED:
552 pr_err("can't set param, initialization failed\n");
553 ret = -ENODEV;
554 }
555 mutex_unlock(&zswap_init_lock);
556
557 /* no need to create a new pool, return directly */
558 if (!new_pool)
559 return ret;
560
561 if (!type) {
562 if (!zpool_has_pool(s)) {
563 pr_err("zpool %s not available\n", s);
564 return -ENOENT;
565 }
566 type = s;
567 } else if (!compressor) {
568 if (!crypto_has_acomp(s, 0, 0)) {
569 pr_err("compressor %s not available\n", s);
570 return -ENOENT;
571 }
572 compressor = s;
573 } else {
574 WARN_ON(1);
575 return -EINVAL;
576 }
577
578 spin_lock_bh(&zswap_pools_lock);
579
580 pool = zswap_pool_find_get(type, compressor);
581 if (pool) {
582 zswap_pool_debug("using existing", pool);
583 WARN_ON(pool == zswap_pool_current());
584 list_del_rcu(&pool->list);
585 }
586
587 spin_unlock_bh(&zswap_pools_lock);
588
589 if (!pool)
590 pool = zswap_pool_create(type, compressor);
591 else {
592 /*
593 * Restore the initial ref dropped by percpu_ref_kill()
594 * when the pool was decommissioned and switch it again
595 * to percpu mode.
596 */
597 percpu_ref_resurrect(&pool->ref);
598
599 /* Drop the ref from zswap_pool_find_get(). */
600 zswap_pool_put(pool);
601 }
602
603 if (pool)
604 ret = param_set_charp(s, kp);
605 else
606 ret = -EINVAL;
607
608 spin_lock_bh(&zswap_pools_lock);
609
610 if (!ret) {
611 put_pool = zswap_pool_current();
612 list_add_rcu(&pool->list, &zswap_pools);
613 zswap_has_pool = true;
614 } else if (pool) {
615 /* add the possibly pre-existing pool to the end of the pools
616 * list; if it's new (and empty) then it'll be removed and
617 * destroyed by the put after we drop the lock
618 */
619 list_add_tail_rcu(&pool->list, &zswap_pools);
620 put_pool = pool;
621 }
622
623 spin_unlock_bh(&zswap_pools_lock);
624
625 if (!zswap_has_pool && !pool) {
626 /* if initial pool creation failed, and this pool creation also
627 * failed, maybe both compressor and zpool params were bad.
628 * Allow changing this param, so pool creation will succeed
629 * when the other param is changed. We already verified this
630 * param is ok in the zpool_has_pool() or crypto_has_acomp()
631 * checks above.
632 */
633 ret = param_set_charp(s, kp);
634 }
635
636 /* drop the ref from either the old current pool,
637 * or the new pool we failed to add
638 */
639 if (put_pool)
640 percpu_ref_kill(&put_pool->ref);
641
642 return ret;
643}
644
645static int zswap_compressor_param_set(const char *val,
646 const struct kernel_param *kp)
647{
648 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
649}
650
651static int zswap_zpool_param_set(const char *val,
652 const struct kernel_param *kp)
653{
654 return __zswap_param_set(val, kp, NULL, zswap_compressor);
655}
656
657static int zswap_enabled_param_set(const char *val,
658 const struct kernel_param *kp)
659{
660 int ret = -ENODEV;
661
662 /* if this is load-time (pre-init) param setting, only set param. */
663 if (system_state != SYSTEM_RUNNING)
664 return param_set_bool(val, kp);
665
666 mutex_lock(&zswap_init_lock);
667 switch (zswap_init_state) {
668 case ZSWAP_UNINIT:
669 if (zswap_setup())
670 break;
671 fallthrough;
672 case ZSWAP_INIT_SUCCEED:
673 if (!zswap_has_pool)
674 pr_err("can't enable, no pool configured\n");
675 else
676 ret = param_set_bool(val, kp);
677 break;
678 case ZSWAP_INIT_FAILED:
679 pr_err("can't enable, initialization failed\n");
680 }
681 mutex_unlock(&zswap_init_lock);
682
683 return ret;
684}
685
686/*********************************
687* lru functions
688**********************************/
689
690/* should be called under RCU */
691#ifdef CONFIG_MEMCG
692static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
693{
694 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
695}
696#else
697static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
698{
699 return NULL;
700}
701#endif
702
703static inline int entry_to_nid(struct zswap_entry *entry)
704{
705 return page_to_nid(virt_to_page(entry));
706}
707
708static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
709{
710 int nid = entry_to_nid(entry);
711 struct mem_cgroup *memcg;
712
713 /*
714 * Note that it is safe to use rcu_read_lock() here, even in the face of
715 * concurrent memcg offlining:
716 *
717 * 1. list_lru_add() is called before list_lru_one is dead. The
718 * new entry will be reparented to memcg's parent's list_lru.
719 * 2. list_lru_add() is called after list_lru_one is dead. The
720 * new entry will be added directly to memcg's parent's list_lru.
721 *
722 * Similar reasoning holds for list_lru_del().
723 */
724 rcu_read_lock();
725 memcg = mem_cgroup_from_entry(entry);
726 /* will always succeed */
727 list_lru_add(list_lru, &entry->lru, nid, memcg);
728 rcu_read_unlock();
729}
730
731static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
732{
733 int nid = entry_to_nid(entry);
734 struct mem_cgroup *memcg;
735
736 rcu_read_lock();
737 memcg = mem_cgroup_from_entry(entry);
738 /* will always succeed */
739 list_lru_del(list_lru, &entry->lru, nid, memcg);
740 rcu_read_unlock();
741}
742
743void zswap_lruvec_state_init(struct lruvec *lruvec)
744{
745 atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0);
746}
747
748void zswap_folio_swapin(struct folio *folio)
749{
750 struct lruvec *lruvec;
751
752 if (folio) {
753 lruvec = folio_lruvec(folio);
754 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins);
755 }
756}
757
758/*
759 * This function should be called when a memcg is being offlined.
760 *
761 * Since the global shrinker shrink_worker() may hold a reference
762 * of the memcg, we must check and release the reference in
763 * zswap_next_shrink.
764 *
765 * shrink_worker() must handle the case where this function releases
766 * the reference of memcg being shrunk.
767 */
768void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
769{
770 /* lock out zswap shrinker walking memcg tree */
771 spin_lock(&zswap_shrink_lock);
772 if (zswap_next_shrink == memcg) {
773 do {
774 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
775 } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink));
776 }
777 spin_unlock(&zswap_shrink_lock);
778}
779
780/*********************************
781* zswap entry functions
782**********************************/
783static struct kmem_cache *zswap_entry_cache;
784
785static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
786{
787 struct zswap_entry *entry;
788 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
789 if (!entry)
790 return NULL;
791 return entry;
792}
793
794static void zswap_entry_cache_free(struct zswap_entry *entry)
795{
796 kmem_cache_free(zswap_entry_cache, entry);
797}
798
799/*
800 * Carries out the common pattern of freeing and entry's zpool allocation,
801 * freeing the entry itself, and decrementing the number of stored pages.
802 */
803static void zswap_entry_free(struct zswap_entry *entry)
804{
805 zswap_lru_del(&zswap_list_lru, entry);
806 zpool_free(entry->pool->zpool, entry->handle);
807 zswap_pool_put(entry->pool);
808 if (entry->objcg) {
809 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
810 obj_cgroup_put(entry->objcg);
811 }
812 zswap_entry_cache_free(entry);
813 atomic_long_dec(&zswap_stored_pages);
814}
815
816/*********************************
817* compressed storage functions
818**********************************/
819static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
820{
821 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
822 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
823 struct crypto_acomp *acomp = NULL;
824 struct acomp_req *req = NULL;
825 u8 *buffer = NULL;
826 int ret;
827
828 buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
829 if (!buffer) {
830 ret = -ENOMEM;
831 goto fail;
832 }
833
834 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
835 if (IS_ERR(acomp)) {
836 pr_err("could not alloc crypto acomp %s : %ld\n",
837 pool->tfm_name, PTR_ERR(acomp));
838 ret = PTR_ERR(acomp);
839 goto fail;
840 }
841
842 req = acomp_request_alloc(acomp);
843 if (!req) {
844 pr_err("could not alloc crypto acomp_request %s\n",
845 pool->tfm_name);
846 ret = -ENOMEM;
847 goto fail;
848 }
849
850 /*
851 * Only hold the mutex after completing allocations, otherwise we may
852 * recurse into zswap through reclaim and attempt to hold the mutex
853 * again resulting in a deadlock.
854 */
855 mutex_lock(&acomp_ctx->mutex);
856 crypto_init_wait(&acomp_ctx->wait);
857
858 /*
859 * if the backend of acomp is async zip, crypto_req_done() will wakeup
860 * crypto_wait_req(); if the backend of acomp is scomp, the callback
861 * won't be called, crypto_wait_req() will return without blocking.
862 */
863 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
864 crypto_req_done, &acomp_ctx->wait);
865
866 acomp_ctx->buffer = buffer;
867 acomp_ctx->acomp = acomp;
868 acomp_ctx->is_sleepable = acomp_is_async(acomp);
869 acomp_ctx->req = req;
870 mutex_unlock(&acomp_ctx->mutex);
871 return 0;
872
873fail:
874 if (acomp)
875 crypto_free_acomp(acomp);
876 kfree(buffer);
877 return ret;
878}
879
880static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
881{
882 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
883 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
884
885 mutex_lock(&acomp_ctx->mutex);
886 if (!IS_ERR_OR_NULL(acomp_ctx)) {
887 if (!IS_ERR_OR_NULL(acomp_ctx->req))
888 acomp_request_free(acomp_ctx->req);
889 acomp_ctx->req = NULL;
890 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
891 crypto_free_acomp(acomp_ctx->acomp);
892 kfree(acomp_ctx->buffer);
893 }
894 mutex_unlock(&acomp_ctx->mutex);
895
896 return 0;
897}
898
899static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool)
900{
901 struct crypto_acomp_ctx *acomp_ctx;
902
903 for (;;) {
904 acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
905 mutex_lock(&acomp_ctx->mutex);
906 if (likely(acomp_ctx->req))
907 return acomp_ctx;
908 /*
909 * It is possible that we were migrated to a different CPU after
910 * getting the per-CPU ctx but before the mutex was acquired. If
911 * the old CPU got offlined, zswap_cpu_comp_dead() could have
912 * already freed ctx->req (among other things) and set it to
913 * NULL. Just try again on the new CPU that we ended up on.
914 */
915 mutex_unlock(&acomp_ctx->mutex);
916 }
917}
918
919static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx)
920{
921 mutex_unlock(&acomp_ctx->mutex);
922}
923
924static bool zswap_compress(struct page *page, struct zswap_entry *entry,
925 struct zswap_pool *pool)
926{
927 struct crypto_acomp_ctx *acomp_ctx;
928 struct scatterlist input, output;
929 int comp_ret = 0, alloc_ret = 0;
930 unsigned int dlen = PAGE_SIZE;
931 unsigned long handle;
932 struct zpool *zpool;
933 char *buf;
934 gfp_t gfp;
935 u8 *dst;
936
937 acomp_ctx = acomp_ctx_get_cpu_lock(pool);
938 dst = acomp_ctx->buffer;
939 sg_init_table(&input, 1);
940 sg_set_page(&input, page, PAGE_SIZE, 0);
941
942 /*
943 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
944 * and hardware-accelerators may won't check the dst buffer size, so
945 * giving the dst buffer with enough length to avoid buffer overflow.
946 */
947 sg_init_one(&output, dst, PAGE_SIZE * 2);
948 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
949
950 /*
951 * it maybe looks a little bit silly that we send an asynchronous request,
952 * then wait for its completion synchronously. This makes the process look
953 * synchronous in fact.
954 * Theoretically, acomp supports users send multiple acomp requests in one
955 * acomp instance, then get those requests done simultaneously. but in this
956 * case, zswap actually does store and load page by page, there is no
957 * existing method to send the second page before the first page is done
958 * in one thread doing zwap.
959 * but in different threads running on different cpu, we have different
960 * acomp instance, so multiple threads can do (de)compression in parallel.
961 */
962 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
963 dlen = acomp_ctx->req->dlen;
964 if (comp_ret)
965 goto unlock;
966
967 zpool = pool->zpool;
968 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
969 if (zpool_malloc_support_movable(zpool))
970 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
971 alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
972 if (alloc_ret)
973 goto unlock;
974
975 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
976 memcpy(buf, dst, dlen);
977 zpool_unmap_handle(zpool, handle);
978
979 entry->handle = handle;
980 entry->length = dlen;
981
982unlock:
983 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
984 zswap_reject_compress_poor++;
985 else if (comp_ret)
986 zswap_reject_compress_fail++;
987 else if (alloc_ret)
988 zswap_reject_alloc_fail++;
989
990 acomp_ctx_put_unlock(acomp_ctx);
991 return comp_ret == 0 && alloc_ret == 0;
992}
993
994static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
995{
996 struct zpool *zpool = entry->pool->zpool;
997 struct scatterlist input, output;
998 struct crypto_acomp_ctx *acomp_ctx;
999 u8 *src;
1000
1001 acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
1002 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1003 /*
1004 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
1005 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
1006 * resort to copying the buffer to a temporary one.
1007 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
1008 * such as a kmap address of high memory or even ever a vmap address.
1009 * However, sg_init_one is only equipped to handle linearly mapped low memory.
1010 * In such cases, we also must copy the buffer to a temporary and lowmem one.
1011 */
1012 if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
1013 !virt_addr_valid(src)) {
1014 memcpy(acomp_ctx->buffer, src, entry->length);
1015 src = acomp_ctx->buffer;
1016 zpool_unmap_handle(zpool, entry->handle);
1017 }
1018
1019 sg_init_one(&input, src, entry->length);
1020 sg_init_table(&output, 1);
1021 sg_set_folio(&output, folio, PAGE_SIZE, 0);
1022 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1023 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1024 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1025
1026 if (src != acomp_ctx->buffer)
1027 zpool_unmap_handle(zpool, entry->handle);
1028 acomp_ctx_put_unlock(acomp_ctx);
1029}
1030
1031/*********************************
1032* writeback code
1033**********************************/
1034/*
1035 * Attempts to free an entry by adding a folio to the swap cache,
1036 * decompressing the entry data into the folio, and issuing a
1037 * bio write to write the folio back to the swap device.
1038 *
1039 * This can be thought of as a "resumed writeback" of the folio
1040 * to the swap device. We are basically resuming the same swap
1041 * writeback path that was intercepted with the zswap_store()
1042 * in the first place. After the folio has been decompressed into
1043 * the swap cache, the compressed version stored by zswap can be
1044 * freed.
1045 */
1046static int zswap_writeback_entry(struct zswap_entry *entry,
1047 swp_entry_t swpentry)
1048{
1049 struct xarray *tree;
1050 pgoff_t offset = swp_offset(swpentry);
1051 struct folio *folio;
1052 struct mempolicy *mpol;
1053 bool folio_was_allocated;
1054 struct writeback_control wbc = {
1055 .sync_mode = WB_SYNC_NONE,
1056 };
1057
1058 /* try to allocate swap cache folio */
1059 mpol = get_task_policy(current);
1060 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1061 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1062 if (!folio)
1063 return -ENOMEM;
1064
1065 /*
1066 * Found an existing folio, we raced with swapin or concurrent
1067 * shrinker. We generally writeback cold folios from zswap, and
1068 * swapin means the folio just became hot, so skip this folio.
1069 * For unlikely concurrent shrinker case, it will be unlinked
1070 * and freed when invalidated by the concurrent shrinker anyway.
1071 */
1072 if (!folio_was_allocated) {
1073 folio_put(folio);
1074 return -EEXIST;
1075 }
1076
1077 /*
1078 * folio is locked, and the swapcache is now secured against
1079 * concurrent swapping to and from the slot, and concurrent
1080 * swapoff so we can safely dereference the zswap tree here.
1081 * Verify that the swap entry hasn't been invalidated and recycled
1082 * behind our backs, to avoid overwriting a new swap folio with
1083 * old compressed data. Only when this is successful can the entry
1084 * be dereferenced.
1085 */
1086 tree = swap_zswap_tree(swpentry);
1087 if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
1088 delete_from_swap_cache(folio);
1089 folio_unlock(folio);
1090 folio_put(folio);
1091 return -ENOMEM;
1092 }
1093
1094 zswap_decompress(entry, folio);
1095
1096 count_vm_event(ZSWPWB);
1097 if (entry->objcg)
1098 count_objcg_events(entry->objcg, ZSWPWB, 1);
1099
1100 zswap_entry_free(entry);
1101
1102 /* folio is up to date */
1103 folio_mark_uptodate(folio);
1104
1105 /* move it to the tail of the inactive list after end_writeback */
1106 folio_set_reclaim(folio);
1107
1108 /* start writeback */
1109 __swap_writepage(folio, &wbc);
1110 folio_put(folio);
1111
1112 return 0;
1113}
1114
1115/*********************************
1116* shrinker functions
1117**********************************/
1118/*
1119 * The dynamic shrinker is modulated by the following factors:
1120 *
1121 * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving
1122 * the entry a second chance) before rotating it in the LRU list. If the
1123 * entry is considered again by the shrinker, with its referenced bit unset,
1124 * it is written back. The writeback rate as a result is dynamically
1125 * adjusted by the pool activities - if the pool is dominated by new entries
1126 * (i.e lots of recent zswapouts), these entries will be protected and
1127 * the writeback rate will slow down. On the other hand, if the pool has a
1128 * lot of stagnant entries, these entries will be reclaimed immediately,
1129 * effectively increasing the writeback rate.
1130 *
1131 * 2. Swapins counter: If we observe swapins, it is a sign that we are
1132 * overshrinking and should slow down. We maintain a swapins counter, which
1133 * is consumed and subtract from the number of eligible objects on the LRU
1134 * in zswap_shrinker_count().
1135 *
1136 * 3. Compression ratio. The better the workload compresses, the less gains we
1137 * can expect from writeback. We scale down the number of objects available
1138 * for reclaim by this ratio.
1139 */
1140static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1141 void *arg)
1142{
1143 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1144 bool *encountered_page_in_swapcache = (bool *)arg;
1145 swp_entry_t swpentry;
1146 enum lru_status ret = LRU_REMOVED_RETRY;
1147 int writeback_result;
1148
1149 /*
1150 * Second chance algorithm: if the entry has its referenced bit set, give it
1151 * a second chance. Only clear the referenced bit and rotate it in the
1152 * zswap's LRU list.
1153 */
1154 if (entry->referenced) {
1155 entry->referenced = false;
1156 return LRU_ROTATE;
1157 }
1158
1159 /*
1160 * As soon as we drop the LRU lock, the entry can be freed by
1161 * a concurrent invalidation. This means the following:
1162 *
1163 * 1. We extract the swp_entry_t to the stack, allowing
1164 * zswap_writeback_entry() to pin the swap entry and
1165 * then validate the zwap entry against that swap entry's
1166 * tree using pointer value comparison. Only when that
1167 * is successful can the entry be dereferenced.
1168 *
1169 * 2. Usually, objects are taken off the LRU for reclaim. In
1170 * this case this isn't possible, because if reclaim fails
1171 * for whatever reason, we have no means of knowing if the
1172 * entry is alive to put it back on the LRU.
1173 *
1174 * So rotate it before dropping the lock. If the entry is
1175 * written back or invalidated, the free path will unlink
1176 * it. For failures, rotation is the right thing as well.
1177 *
1178 * Temporary failures, where the same entry should be tried
1179 * again immediately, almost never happen for this shrinker.
1180 * We don't do any trylocking; -ENOMEM comes closest,
1181 * but that's extremely rare and doesn't happen spuriously
1182 * either. Don't bother distinguishing this case.
1183 */
1184 list_move_tail(item, &l->list);
1185
1186 /*
1187 * Once the lru lock is dropped, the entry might get freed. The
1188 * swpentry is copied to the stack, and entry isn't deref'd again
1189 * until the entry is verified to still be alive in the tree.
1190 */
1191 swpentry = entry->swpentry;
1192
1193 /*
1194 * It's safe to drop the lock here because we return either
1195 * LRU_REMOVED_RETRY or LRU_RETRY.
1196 */
1197 spin_unlock(&l->lock);
1198
1199 writeback_result = zswap_writeback_entry(entry, swpentry);
1200
1201 if (writeback_result) {
1202 zswap_reject_reclaim_fail++;
1203 ret = LRU_RETRY;
1204
1205 /*
1206 * Encountering a page already in swap cache is a sign that we are shrinking
1207 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1208 * shrinker context).
1209 */
1210 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1211 ret = LRU_STOP;
1212 *encountered_page_in_swapcache = true;
1213 }
1214 } else {
1215 zswap_written_back_pages++;
1216 }
1217
1218 return ret;
1219}
1220
1221static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1222 struct shrink_control *sc)
1223{
1224 unsigned long shrink_ret;
1225 bool encountered_page_in_swapcache = false;
1226
1227 if (!zswap_shrinker_enabled ||
1228 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1229 sc->nr_scanned = 0;
1230 return SHRINK_STOP;
1231 }
1232
1233 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1234 &encountered_page_in_swapcache);
1235
1236 if (encountered_page_in_swapcache)
1237 return SHRINK_STOP;
1238
1239 return shrink_ret ? shrink_ret : SHRINK_STOP;
1240}
1241
1242static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1243 struct shrink_control *sc)
1244{
1245 struct mem_cgroup *memcg = sc->memcg;
1246 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1247 atomic_long_t *nr_disk_swapins =
1248 &lruvec->zswap_lruvec_state.nr_disk_swapins;
1249 unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur,
1250 nr_remain;
1251
1252 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1253 return 0;
1254
1255 /*
1256 * The shrinker resumes swap writeback, which will enter block
1257 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1258 * rules (may_enter_fs()), which apply on a per-folio basis.
1259 */
1260 if (!gfp_has_io_fs(sc->gfp_mask))
1261 return 0;
1262
1263 /*
1264 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1265 * have them per-node and thus per-lruvec. Careful if memcg is
1266 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1267 * for the lruvec, but not for memcg_page_state().
1268 *
1269 * Without memcg, use the zswap pool-wide metrics.
1270 */
1271 if (!mem_cgroup_disabled()) {
1272 mem_cgroup_flush_stats(memcg);
1273 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1274 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1275 } else {
1276 nr_backing = zswap_total_pages();
1277 nr_stored = atomic_long_read(&zswap_stored_pages);
1278 }
1279
1280 if (!nr_stored)
1281 return 0;
1282
1283 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1284 if (!nr_freeable)
1285 return 0;
1286
1287 /*
1288 * Subtract from the lru size the number of pages that are recently swapped
1289 * in from disk. The idea is that had we protect the zswap's LRU by this
1290 * amount of pages, these disk swapins would not have happened.
1291 */
1292 nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins);
1293 do {
1294 if (nr_freeable >= nr_disk_swapins_cur)
1295 nr_remain = 0;
1296 else
1297 nr_remain = nr_disk_swapins_cur - nr_freeable;
1298 } while (!atomic_long_try_cmpxchg(
1299 nr_disk_swapins, &nr_disk_swapins_cur, nr_remain));
1300
1301 nr_freeable -= nr_disk_swapins_cur - nr_remain;
1302 if (!nr_freeable)
1303 return 0;
1304
1305 /*
1306 * Scale the number of freeable pages by the memory saving factor.
1307 * This ensures that the better zswap compresses memory, the fewer
1308 * pages we will evict to swap (as it will otherwise incur IO for
1309 * relatively small memory saving).
1310 */
1311 return mult_frac(nr_freeable, nr_backing, nr_stored);
1312}
1313
1314static struct shrinker *zswap_alloc_shrinker(void)
1315{
1316 struct shrinker *shrinker;
1317
1318 shrinker =
1319 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1320 if (!shrinker)
1321 return NULL;
1322
1323 shrinker->scan_objects = zswap_shrinker_scan;
1324 shrinker->count_objects = zswap_shrinker_count;
1325 shrinker->batch = 0;
1326 shrinker->seeks = DEFAULT_SEEKS;
1327 return shrinker;
1328}
1329
1330static int shrink_memcg(struct mem_cgroup *memcg)
1331{
1332 int nid, shrunk = 0, scanned = 0;
1333
1334 if (!mem_cgroup_zswap_writeback_enabled(memcg))
1335 return -ENOENT;
1336
1337 /*
1338 * Skip zombies because their LRUs are reparented and we would be
1339 * reclaiming from the parent instead of the dead memcg.
1340 */
1341 if (memcg && !mem_cgroup_online(memcg))
1342 return -ENOENT;
1343
1344 for_each_node_state(nid, N_NORMAL_MEMORY) {
1345 unsigned long nr_to_walk = 1;
1346
1347 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1348 &shrink_memcg_cb, NULL, &nr_to_walk);
1349 scanned += 1 - nr_to_walk;
1350 }
1351
1352 if (!scanned)
1353 return -ENOENT;
1354
1355 return shrunk ? 0 : -EAGAIN;
1356}
1357
1358static void shrink_worker(struct work_struct *w)
1359{
1360 struct mem_cgroup *memcg;
1361 int ret, failures = 0, attempts = 0;
1362 unsigned long thr;
1363
1364 /* Reclaim down to the accept threshold */
1365 thr = zswap_accept_thr_pages();
1366
1367 /*
1368 * Global reclaim will select cgroup in a round-robin fashion from all
1369 * online memcgs, but memcgs that have no pages in zswap and
1370 * writeback-disabled memcgs (memory.zswap.writeback=0) are not
1371 * candidates for shrinking.
1372 *
1373 * Shrinking will be aborted if we encounter the following
1374 * MAX_RECLAIM_RETRIES times:
1375 * - No writeback-candidate memcgs found in a memcg tree walk.
1376 * - Shrinking a writeback-candidate memcg failed.
1377 *
1378 * We save iteration cursor memcg into zswap_next_shrink,
1379 * which can be modified by the offline memcg cleaner
1380 * zswap_memcg_offline_cleanup().
1381 *
1382 * Since the offline cleaner is called only once, we cannot leave an
1383 * offline memcg reference in zswap_next_shrink.
1384 * We can rely on the cleaner only if we get online memcg under lock.
1385 *
1386 * If we get an offline memcg, we cannot determine if the cleaner has
1387 * already been called or will be called later. We must put back the
1388 * reference before returning from this function. Otherwise, the
1389 * offline memcg left in zswap_next_shrink will hold the reference
1390 * until the next run of shrink_worker().
1391 */
1392 do {
1393 /*
1394 * Start shrinking from the next memcg after zswap_next_shrink.
1395 * When the offline cleaner has already advanced the cursor,
1396 * advancing the cursor here overlooks one memcg, but this
1397 * should be negligibly rare.
1398 *
1399 * If we get an online memcg, keep the extra reference in case
1400 * the original one obtained by mem_cgroup_iter() is dropped by
1401 * zswap_memcg_offline_cleanup() while we are shrinking the
1402 * memcg.
1403 */
1404 spin_lock(&zswap_shrink_lock);
1405 do {
1406 memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1407 zswap_next_shrink = memcg;
1408 } while (memcg && !mem_cgroup_tryget_online(memcg));
1409 spin_unlock(&zswap_shrink_lock);
1410
1411 if (!memcg) {
1412 /*
1413 * Continue shrinking without incrementing failures if
1414 * we found candidate memcgs in the last tree walk.
1415 */
1416 if (!attempts && ++failures == MAX_RECLAIM_RETRIES)
1417 break;
1418
1419 attempts = 0;
1420 goto resched;
1421 }
1422
1423 ret = shrink_memcg(memcg);
1424 /* drop the extra reference */
1425 mem_cgroup_put(memcg);
1426
1427 /*
1428 * There are no writeback-candidate pages in the memcg.
1429 * This is not an issue as long as we can find another memcg
1430 * with pages in zswap. Skip this without incrementing attempts
1431 * and failures.
1432 */
1433 if (ret == -ENOENT)
1434 continue;
1435 ++attempts;
1436
1437 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1438 break;
1439resched:
1440 cond_resched();
1441 } while (zswap_total_pages() > thr);
1442}
1443
1444/*********************************
1445* main API
1446**********************************/
1447
1448static bool zswap_store_page(struct page *page,
1449 struct obj_cgroup *objcg,
1450 struct zswap_pool *pool)
1451{
1452 swp_entry_t page_swpentry = page_swap_entry(page);
1453 struct zswap_entry *entry, *old;
1454
1455 /* allocate entry */
1456 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
1457 if (!entry) {
1458 zswap_reject_kmemcache_fail++;
1459 return false;
1460 }
1461
1462 if (!zswap_compress(page, entry, pool))
1463 goto compress_failed;
1464
1465 old = xa_store(swap_zswap_tree(page_swpentry),
1466 swp_offset(page_swpentry),
1467 entry, GFP_KERNEL);
1468 if (xa_is_err(old)) {
1469 int err = xa_err(old);
1470
1471 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1472 zswap_reject_alloc_fail++;
1473 goto store_failed;
1474 }
1475
1476 /*
1477 * We may have had an existing entry that became stale when
1478 * the folio was redirtied and now the new version is being
1479 * swapped out. Get rid of the old.
1480 */
1481 if (old)
1482 zswap_entry_free(old);
1483
1484 /*
1485 * The entry is successfully compressed and stored in the tree, there is
1486 * no further possibility of failure. Grab refs to the pool and objcg,
1487 * charge zswap memory, and increment zswap_stored_pages.
1488 * The opposite actions will be performed by zswap_entry_free()
1489 * when the entry is removed from the tree.
1490 */
1491 zswap_pool_get(pool);
1492 if (objcg) {
1493 obj_cgroup_get(objcg);
1494 obj_cgroup_charge_zswap(objcg, entry->length);
1495 }
1496 atomic_long_inc(&zswap_stored_pages);
1497
1498 /*
1499 * We finish initializing the entry while it's already in xarray.
1500 * This is safe because:
1501 *
1502 * 1. Concurrent stores and invalidations are excluded by folio lock.
1503 *
1504 * 2. Writeback is excluded by the entry not being on the LRU yet.
1505 * The publishing order matters to prevent writeback from seeing
1506 * an incoherent entry.
1507 */
1508 entry->pool = pool;
1509 entry->swpentry = page_swpentry;
1510 entry->objcg = objcg;
1511 entry->referenced = true;
1512 if (entry->length) {
1513 INIT_LIST_HEAD(&entry->lru);
1514 zswap_lru_add(&zswap_list_lru, entry);
1515 }
1516
1517 return true;
1518
1519store_failed:
1520 zpool_free(pool->zpool, entry->handle);
1521compress_failed:
1522 zswap_entry_cache_free(entry);
1523 return false;
1524}
1525
1526bool zswap_store(struct folio *folio)
1527{
1528 long nr_pages = folio_nr_pages(folio);
1529 swp_entry_t swp = folio->swap;
1530 struct obj_cgroup *objcg = NULL;
1531 struct mem_cgroup *memcg = NULL;
1532 struct zswap_pool *pool;
1533 bool ret = false;
1534 long index;
1535
1536 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1537 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1538
1539 if (!zswap_enabled)
1540 goto check_old;
1541
1542 objcg = get_obj_cgroup_from_folio(folio);
1543 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1544 memcg = get_mem_cgroup_from_objcg(objcg);
1545 if (shrink_memcg(memcg)) {
1546 mem_cgroup_put(memcg);
1547 goto put_objcg;
1548 }
1549 mem_cgroup_put(memcg);
1550 }
1551
1552 if (zswap_check_limits())
1553 goto put_objcg;
1554
1555 pool = zswap_pool_current_get();
1556 if (!pool)
1557 goto put_objcg;
1558
1559 if (objcg) {
1560 memcg = get_mem_cgroup_from_objcg(objcg);
1561 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1562 mem_cgroup_put(memcg);
1563 goto put_pool;
1564 }
1565 mem_cgroup_put(memcg);
1566 }
1567
1568 for (index = 0; index < nr_pages; ++index) {
1569 struct page *page = folio_page(folio, index);
1570
1571 if (!zswap_store_page(page, objcg, pool))
1572 goto put_pool;
1573 }
1574
1575 if (objcg)
1576 count_objcg_events(objcg, ZSWPOUT, nr_pages);
1577
1578 count_vm_events(ZSWPOUT, nr_pages);
1579
1580 ret = true;
1581
1582put_pool:
1583 zswap_pool_put(pool);
1584put_objcg:
1585 obj_cgroup_put(objcg);
1586 if (!ret && zswap_pool_reached_full)
1587 queue_work(shrink_wq, &zswap_shrink_work);
1588check_old:
1589 /*
1590 * If the zswap store fails or zswap is disabled, we must invalidate
1591 * the possibly stale entries which were previously stored at the
1592 * offsets corresponding to each page of the folio. Otherwise,
1593 * writeback could overwrite the new data in the swapfile.
1594 */
1595 if (!ret) {
1596 unsigned type = swp_type(swp);
1597 pgoff_t offset = swp_offset(swp);
1598 struct zswap_entry *entry;
1599 struct xarray *tree;
1600
1601 for (index = 0; index < nr_pages; ++index) {
1602 tree = swap_zswap_tree(swp_entry(type, offset + index));
1603 entry = xa_erase(tree, offset + index);
1604 if (entry)
1605 zswap_entry_free(entry);
1606 }
1607 }
1608
1609 return ret;
1610}
1611
1612bool zswap_load(struct folio *folio)
1613{
1614 swp_entry_t swp = folio->swap;
1615 pgoff_t offset = swp_offset(swp);
1616 bool swapcache = folio_test_swapcache(folio);
1617 struct xarray *tree = swap_zswap_tree(swp);
1618 struct zswap_entry *entry;
1619
1620 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1621
1622 if (zswap_never_enabled())
1623 return false;
1624
1625 /*
1626 * Large folios should not be swapped in while zswap is being used, as
1627 * they are not properly handled. Zswap does not properly load large
1628 * folios, and a large folio may only be partially in zswap.
1629 *
1630 * Return true without marking the folio uptodate so that an IO error is
1631 * emitted (e.g. do_swap_page() will sigbus).
1632 */
1633 if (WARN_ON_ONCE(folio_test_large(folio)))
1634 return true;
1635
1636 /*
1637 * When reading into the swapcache, invalidate our entry. The
1638 * swapcache can be the authoritative owner of the page and
1639 * its mappings, and the pressure that results from having two
1640 * in-memory copies outweighs any benefits of caching the
1641 * compression work.
1642 *
1643 * (Most swapins go through the swapcache. The notable
1644 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1645 * files, which reads into a private page and may free it if
1646 * the fault fails. We remain the primary owner of the entry.)
1647 */
1648 if (swapcache)
1649 entry = xa_erase(tree, offset);
1650 else
1651 entry = xa_load(tree, offset);
1652
1653 if (!entry)
1654 return false;
1655
1656 zswap_decompress(entry, folio);
1657
1658 count_vm_event(ZSWPIN);
1659 if (entry->objcg)
1660 count_objcg_events(entry->objcg, ZSWPIN, 1);
1661
1662 if (swapcache) {
1663 zswap_entry_free(entry);
1664 folio_mark_dirty(folio);
1665 }
1666
1667 folio_mark_uptodate(folio);
1668 return true;
1669}
1670
1671void zswap_invalidate(swp_entry_t swp)
1672{
1673 pgoff_t offset = swp_offset(swp);
1674 struct xarray *tree = swap_zswap_tree(swp);
1675 struct zswap_entry *entry;
1676
1677 if (xa_empty(tree))
1678 return;
1679
1680 entry = xa_erase(tree, offset);
1681 if (entry)
1682 zswap_entry_free(entry);
1683}
1684
1685int zswap_swapon(int type, unsigned long nr_pages)
1686{
1687 struct xarray *trees, *tree;
1688 unsigned int nr, i;
1689
1690 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1691 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1692 if (!trees) {
1693 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1694 return -ENOMEM;
1695 }
1696
1697 for (i = 0; i < nr; i++)
1698 xa_init(trees + i);
1699
1700 nr_zswap_trees[type] = nr;
1701 zswap_trees[type] = trees;
1702 return 0;
1703}
1704
1705void zswap_swapoff(int type)
1706{
1707 struct xarray *trees = zswap_trees[type];
1708 unsigned int i;
1709
1710 if (!trees)
1711 return;
1712
1713 /* try_to_unuse() invalidated all the entries already */
1714 for (i = 0; i < nr_zswap_trees[type]; i++)
1715 WARN_ON_ONCE(!xa_empty(trees + i));
1716
1717 kvfree(trees);
1718 nr_zswap_trees[type] = 0;
1719 zswap_trees[type] = NULL;
1720}
1721
1722/*********************************
1723* debugfs functions
1724**********************************/
1725#ifdef CONFIG_DEBUG_FS
1726#include <linux/debugfs.h>
1727
1728static struct dentry *zswap_debugfs_root;
1729
1730static int debugfs_get_total_size(void *data, u64 *val)
1731{
1732 *val = zswap_total_pages() * PAGE_SIZE;
1733 return 0;
1734}
1735DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1736
1737static int debugfs_get_stored_pages(void *data, u64 *val)
1738{
1739 *val = atomic_long_read(&zswap_stored_pages);
1740 return 0;
1741}
1742DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n");
1743
1744static int zswap_debugfs_init(void)
1745{
1746 if (!debugfs_initialized())
1747 return -ENODEV;
1748
1749 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1750
1751 debugfs_create_u64("pool_limit_hit", 0444,
1752 zswap_debugfs_root, &zswap_pool_limit_hit);
1753 debugfs_create_u64("reject_reclaim_fail", 0444,
1754 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1755 debugfs_create_u64("reject_alloc_fail", 0444,
1756 zswap_debugfs_root, &zswap_reject_alloc_fail);
1757 debugfs_create_u64("reject_kmemcache_fail", 0444,
1758 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1759 debugfs_create_u64("reject_compress_fail", 0444,
1760 zswap_debugfs_root, &zswap_reject_compress_fail);
1761 debugfs_create_u64("reject_compress_poor", 0444,
1762 zswap_debugfs_root, &zswap_reject_compress_poor);
1763 debugfs_create_u64("written_back_pages", 0444,
1764 zswap_debugfs_root, &zswap_written_back_pages);
1765 debugfs_create_file("pool_total_size", 0444,
1766 zswap_debugfs_root, NULL, &total_size_fops);
1767 debugfs_create_file("stored_pages", 0444,
1768 zswap_debugfs_root, NULL, &stored_pages_fops);
1769
1770 return 0;
1771}
1772#else
1773static int zswap_debugfs_init(void)
1774{
1775 return 0;
1776}
1777#endif
1778
1779/*********************************
1780* module init and exit
1781**********************************/
1782static int zswap_setup(void)
1783{
1784 struct zswap_pool *pool;
1785 int ret;
1786
1787 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1788 if (!zswap_entry_cache) {
1789 pr_err("entry cache creation failed\n");
1790 goto cache_fail;
1791 }
1792
1793 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1794 "mm/zswap_pool:prepare",
1795 zswap_cpu_comp_prepare,
1796 zswap_cpu_comp_dead);
1797 if (ret)
1798 goto hp_fail;
1799
1800 shrink_wq = alloc_workqueue("zswap-shrink",
1801 WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1802 if (!shrink_wq)
1803 goto shrink_wq_fail;
1804
1805 zswap_shrinker = zswap_alloc_shrinker();
1806 if (!zswap_shrinker)
1807 goto shrinker_fail;
1808 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1809 goto lru_fail;
1810 shrinker_register(zswap_shrinker);
1811
1812 INIT_WORK(&zswap_shrink_work, shrink_worker);
1813
1814 pool = __zswap_pool_create_fallback();
1815 if (pool) {
1816 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1817 zpool_get_type(pool->zpool));
1818 list_add(&pool->list, &zswap_pools);
1819 zswap_has_pool = true;
1820 static_branch_enable(&zswap_ever_enabled);
1821 } else {
1822 pr_err("pool creation failed\n");
1823 zswap_enabled = false;
1824 }
1825
1826 if (zswap_debugfs_init())
1827 pr_warn("debugfs initialization failed\n");
1828 zswap_init_state = ZSWAP_INIT_SUCCEED;
1829 return 0;
1830
1831lru_fail:
1832 shrinker_free(zswap_shrinker);
1833shrinker_fail:
1834 destroy_workqueue(shrink_wq);
1835shrink_wq_fail:
1836 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1837hp_fail:
1838 kmem_cache_destroy(zswap_entry_cache);
1839cache_fail:
1840 /* if built-in, we aren't unloaded on failure; don't allow use */
1841 zswap_init_state = ZSWAP_INIT_FAILED;
1842 zswap_enabled = false;
1843 return -ENOMEM;
1844}
1845
1846static int __init zswap_init(void)
1847{
1848 if (!zswap_enabled)
1849 return 0;
1850 return zswap_setup();
1851}
1852/* must be late so crypto has time to come up */
1853late_initcall(zswap_init);
1854
1855MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1856MODULE_DESCRIPTION("Compressed cache for swap pages");