Loading...
1/*
2 * zswap.c - zswap driver file
3 *
4 * zswap is a backend for frontswap that takes pages that are in the process
5 * of being swapped out and attempts to compress and store them in a
6 * RAM-based memory pool. This can result in a significant I/O reduction on
7 * the swap device and, in the case where decompressing from RAM is faster
8 * than reading from the swap device, can also improve workload performance.
9 *
10 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21*/
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/module.h>
26#include <linux/cpu.h>
27#include <linux/highmem.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <linux/types.h>
31#include <linux/atomic.h>
32#include <linux/frontswap.h>
33#include <linux/rbtree.h>
34#include <linux/swap.h>
35#include <linux/crypto.h>
36#include <linux/mempool.h>
37#include <linux/zpool.h>
38
39#include <linux/mm_types.h>
40#include <linux/page-flags.h>
41#include <linux/swapops.h>
42#include <linux/writeback.h>
43#include <linux/pagemap.h>
44
45/*********************************
46* statistics
47**********************************/
48/* Total bytes used by the compressed storage */
49static u64 zswap_pool_total_size;
50/* The number of compressed pages currently stored in zswap */
51static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
52
53/*
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
58*/
59
60/* Pool limit was hit (see zswap_max_pool_percent) */
61static u64 zswap_pool_limit_hit;
62/* Pages written back when pool limit was reached */
63static u64 zswap_written_back_pages;
64/* Store failed due to a reclaim failure after pool limit was reached */
65static u64 zswap_reject_reclaim_fail;
66/* Compressed page was too big for the allocator to (optimally) store */
67static u64 zswap_reject_compress_poor;
68/* Store failed because underlying allocator could not get memory */
69static u64 zswap_reject_alloc_fail;
70/* Store failed because the entry metadata could not be allocated (rare) */
71static u64 zswap_reject_kmemcache_fail;
72/* Duplicate store was encountered (rare) */
73static u64 zswap_duplicate_entry;
74
75/*********************************
76* tunables
77**********************************/
78
79/* Enable/disable zswap (disabled by default) */
80static bool zswap_enabled;
81module_param_named(enabled, zswap_enabled, bool, 0644);
82
83/* Crypto compressor to use */
84#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
85static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
86static int zswap_compressor_param_set(const char *,
87 const struct kernel_param *);
88static struct kernel_param_ops zswap_compressor_param_ops = {
89 .set = zswap_compressor_param_set,
90 .get = param_get_charp,
91 .free = param_free_charp,
92};
93module_param_cb(compressor, &zswap_compressor_param_ops,
94 &zswap_compressor, 0644);
95
96/* Compressed storage zpool to use */
97#define ZSWAP_ZPOOL_DEFAULT "zbud"
98static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
99static int zswap_zpool_param_set(const char *, const struct kernel_param *);
100static struct kernel_param_ops zswap_zpool_param_ops = {
101 .set = zswap_zpool_param_set,
102 .get = param_get_charp,
103 .free = param_free_charp,
104};
105module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
106
107/* The maximum percentage of memory that the compressed pool can occupy */
108static unsigned int zswap_max_pool_percent = 20;
109module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
110
111/*********************************
112* data structures
113**********************************/
114
115struct zswap_pool {
116 struct zpool *zpool;
117 struct crypto_comp * __percpu *tfm;
118 struct kref kref;
119 struct list_head list;
120 struct rcu_head rcu_head;
121 struct notifier_block notifier;
122 char tfm_name[CRYPTO_MAX_ALG_NAME];
123};
124
125/*
126 * struct zswap_entry
127 *
128 * This structure contains the metadata for tracking a single compressed
129 * page within zswap.
130 *
131 * rbnode - links the entry into red-black tree for the appropriate swap type
132 * offset - the swap offset for the entry. Index into the red-black tree.
133 * refcount - the number of outstanding reference to the entry. This is needed
134 * to protect against premature freeing of the entry by code
135 * concurrent calls to load, invalidate, and writeback. The lock
136 * for the zswap_tree structure that contains the entry must
137 * be held while changing the refcount. Since the lock must
138 * be held, there is no reason to also make refcount atomic.
139 * length - the length in bytes of the compressed page data. Needed during
140 * decompression
141 * pool - the zswap_pool the entry's data is in
142 * handle - zpool allocation handle that stores the compressed page data
143 */
144struct zswap_entry {
145 struct rb_node rbnode;
146 pgoff_t offset;
147 int refcount;
148 unsigned int length;
149 struct zswap_pool *pool;
150 unsigned long handle;
151};
152
153struct zswap_header {
154 swp_entry_t swpentry;
155};
156
157/*
158 * The tree lock in the zswap_tree struct protects a few things:
159 * - the rbtree
160 * - the refcount field of each entry in the tree
161 */
162struct zswap_tree {
163 struct rb_root rbroot;
164 spinlock_t lock;
165};
166
167static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
168
169/* RCU-protected iteration */
170static LIST_HEAD(zswap_pools);
171/* protects zswap_pools list modification */
172static DEFINE_SPINLOCK(zswap_pools_lock);
173/* pool counter to provide unique names to zpool */
174static atomic_t zswap_pools_count = ATOMIC_INIT(0);
175
176/* used by param callback function */
177static bool zswap_init_started;
178
179/*********************************
180* helpers and fwd declarations
181**********************************/
182
183#define zswap_pool_debug(msg, p) \
184 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
185 zpool_get_type((p)->zpool))
186
187static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
188static int zswap_pool_get(struct zswap_pool *pool);
189static void zswap_pool_put(struct zswap_pool *pool);
190
191static const struct zpool_ops zswap_zpool_ops = {
192 .evict = zswap_writeback_entry
193};
194
195static bool zswap_is_full(void)
196{
197 return totalram_pages * zswap_max_pool_percent / 100 <
198 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
199}
200
201static void zswap_update_total_size(void)
202{
203 struct zswap_pool *pool;
204 u64 total = 0;
205
206 rcu_read_lock();
207
208 list_for_each_entry_rcu(pool, &zswap_pools, list)
209 total += zpool_get_total_size(pool->zpool);
210
211 rcu_read_unlock();
212
213 zswap_pool_total_size = total;
214}
215
216/*********************************
217* zswap entry functions
218**********************************/
219static struct kmem_cache *zswap_entry_cache;
220
221static int __init zswap_entry_cache_create(void)
222{
223 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
224 return zswap_entry_cache == NULL;
225}
226
227static void __init zswap_entry_cache_destroy(void)
228{
229 kmem_cache_destroy(zswap_entry_cache);
230}
231
232static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
233{
234 struct zswap_entry *entry;
235 entry = kmem_cache_alloc(zswap_entry_cache, gfp);
236 if (!entry)
237 return NULL;
238 entry->refcount = 1;
239 RB_CLEAR_NODE(&entry->rbnode);
240 return entry;
241}
242
243static void zswap_entry_cache_free(struct zswap_entry *entry)
244{
245 kmem_cache_free(zswap_entry_cache, entry);
246}
247
248/*********************************
249* rbtree functions
250**********************************/
251static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
252{
253 struct rb_node *node = root->rb_node;
254 struct zswap_entry *entry;
255
256 while (node) {
257 entry = rb_entry(node, struct zswap_entry, rbnode);
258 if (entry->offset > offset)
259 node = node->rb_left;
260 else if (entry->offset < offset)
261 node = node->rb_right;
262 else
263 return entry;
264 }
265 return NULL;
266}
267
268/*
269 * In the case that a entry with the same offset is found, a pointer to
270 * the existing entry is stored in dupentry and the function returns -EEXIST
271 */
272static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
273 struct zswap_entry **dupentry)
274{
275 struct rb_node **link = &root->rb_node, *parent = NULL;
276 struct zswap_entry *myentry;
277
278 while (*link) {
279 parent = *link;
280 myentry = rb_entry(parent, struct zswap_entry, rbnode);
281 if (myentry->offset > entry->offset)
282 link = &(*link)->rb_left;
283 else if (myentry->offset < entry->offset)
284 link = &(*link)->rb_right;
285 else {
286 *dupentry = myentry;
287 return -EEXIST;
288 }
289 }
290 rb_link_node(&entry->rbnode, parent, link);
291 rb_insert_color(&entry->rbnode, root);
292 return 0;
293}
294
295static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
296{
297 if (!RB_EMPTY_NODE(&entry->rbnode)) {
298 rb_erase(&entry->rbnode, root);
299 RB_CLEAR_NODE(&entry->rbnode);
300 }
301}
302
303/*
304 * Carries out the common pattern of freeing and entry's zpool allocation,
305 * freeing the entry itself, and decrementing the number of stored pages.
306 */
307static void zswap_free_entry(struct zswap_entry *entry)
308{
309 zpool_free(entry->pool->zpool, entry->handle);
310 zswap_pool_put(entry->pool);
311 zswap_entry_cache_free(entry);
312 atomic_dec(&zswap_stored_pages);
313 zswap_update_total_size();
314}
315
316/* caller must hold the tree lock */
317static void zswap_entry_get(struct zswap_entry *entry)
318{
319 entry->refcount++;
320}
321
322/* caller must hold the tree lock
323* remove from the tree and free it, if nobody reference the entry
324*/
325static void zswap_entry_put(struct zswap_tree *tree,
326 struct zswap_entry *entry)
327{
328 int refcount = --entry->refcount;
329
330 BUG_ON(refcount < 0);
331 if (refcount == 0) {
332 zswap_rb_erase(&tree->rbroot, entry);
333 zswap_free_entry(entry);
334 }
335}
336
337/* caller must hold the tree lock */
338static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
339 pgoff_t offset)
340{
341 struct zswap_entry *entry;
342
343 entry = zswap_rb_search(root, offset);
344 if (entry)
345 zswap_entry_get(entry);
346
347 return entry;
348}
349
350/*********************************
351* per-cpu code
352**********************************/
353static DEFINE_PER_CPU(u8 *, zswap_dstmem);
354
355static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu)
356{
357 u8 *dst;
358
359 switch (action) {
360 case CPU_UP_PREPARE:
361 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
362 if (!dst) {
363 pr_err("can't allocate compressor buffer\n");
364 return NOTIFY_BAD;
365 }
366 per_cpu(zswap_dstmem, cpu) = dst;
367 break;
368 case CPU_DEAD:
369 case CPU_UP_CANCELED:
370 dst = per_cpu(zswap_dstmem, cpu);
371 kfree(dst);
372 per_cpu(zswap_dstmem, cpu) = NULL;
373 break;
374 default:
375 break;
376 }
377 return NOTIFY_OK;
378}
379
380static int zswap_cpu_dstmem_notifier(struct notifier_block *nb,
381 unsigned long action, void *pcpu)
382{
383 return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu);
384}
385
386static struct notifier_block zswap_dstmem_notifier = {
387 .notifier_call = zswap_cpu_dstmem_notifier,
388};
389
390static int __init zswap_cpu_dstmem_init(void)
391{
392 unsigned long cpu;
393
394 cpu_notifier_register_begin();
395 for_each_online_cpu(cpu)
396 if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) ==
397 NOTIFY_BAD)
398 goto cleanup;
399 __register_cpu_notifier(&zswap_dstmem_notifier);
400 cpu_notifier_register_done();
401 return 0;
402
403cleanup:
404 for_each_online_cpu(cpu)
405 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
406 cpu_notifier_register_done();
407 return -ENOMEM;
408}
409
410static void zswap_cpu_dstmem_destroy(void)
411{
412 unsigned long cpu;
413
414 cpu_notifier_register_begin();
415 for_each_online_cpu(cpu)
416 __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
417 __unregister_cpu_notifier(&zswap_dstmem_notifier);
418 cpu_notifier_register_done();
419}
420
421static int __zswap_cpu_comp_notifier(struct zswap_pool *pool,
422 unsigned long action, unsigned long cpu)
423{
424 struct crypto_comp *tfm;
425
426 switch (action) {
427 case CPU_UP_PREPARE:
428 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
429 break;
430 tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
431 if (IS_ERR_OR_NULL(tfm)) {
432 pr_err("could not alloc crypto comp %s : %ld\n",
433 pool->tfm_name, PTR_ERR(tfm));
434 return NOTIFY_BAD;
435 }
436 *per_cpu_ptr(pool->tfm, cpu) = tfm;
437 break;
438 case CPU_DEAD:
439 case CPU_UP_CANCELED:
440 tfm = *per_cpu_ptr(pool->tfm, cpu);
441 if (!IS_ERR_OR_NULL(tfm))
442 crypto_free_comp(tfm);
443 *per_cpu_ptr(pool->tfm, cpu) = NULL;
444 break;
445 default:
446 break;
447 }
448 return NOTIFY_OK;
449}
450
451static int zswap_cpu_comp_notifier(struct notifier_block *nb,
452 unsigned long action, void *pcpu)
453{
454 unsigned long cpu = (unsigned long)pcpu;
455 struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier);
456
457 return __zswap_cpu_comp_notifier(pool, action, cpu);
458}
459
460static int zswap_cpu_comp_init(struct zswap_pool *pool)
461{
462 unsigned long cpu;
463
464 memset(&pool->notifier, 0, sizeof(pool->notifier));
465 pool->notifier.notifier_call = zswap_cpu_comp_notifier;
466
467 cpu_notifier_register_begin();
468 for_each_online_cpu(cpu)
469 if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) ==
470 NOTIFY_BAD)
471 goto cleanup;
472 __register_cpu_notifier(&pool->notifier);
473 cpu_notifier_register_done();
474 return 0;
475
476cleanup:
477 for_each_online_cpu(cpu)
478 __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
479 cpu_notifier_register_done();
480 return -ENOMEM;
481}
482
483static void zswap_cpu_comp_destroy(struct zswap_pool *pool)
484{
485 unsigned long cpu;
486
487 cpu_notifier_register_begin();
488 for_each_online_cpu(cpu)
489 __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
490 __unregister_cpu_notifier(&pool->notifier);
491 cpu_notifier_register_done();
492}
493
494/*********************************
495* pool functions
496**********************************/
497
498static struct zswap_pool *__zswap_pool_current(void)
499{
500 struct zswap_pool *pool;
501
502 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
503 WARN_ON(!pool);
504
505 return pool;
506}
507
508static struct zswap_pool *zswap_pool_current(void)
509{
510 assert_spin_locked(&zswap_pools_lock);
511
512 return __zswap_pool_current();
513}
514
515static struct zswap_pool *zswap_pool_current_get(void)
516{
517 struct zswap_pool *pool;
518
519 rcu_read_lock();
520
521 pool = __zswap_pool_current();
522 if (!pool || !zswap_pool_get(pool))
523 pool = NULL;
524
525 rcu_read_unlock();
526
527 return pool;
528}
529
530static struct zswap_pool *zswap_pool_last_get(void)
531{
532 struct zswap_pool *pool, *last = NULL;
533
534 rcu_read_lock();
535
536 list_for_each_entry_rcu(pool, &zswap_pools, list)
537 last = pool;
538 if (!WARN_ON(!last) && !zswap_pool_get(last))
539 last = NULL;
540
541 rcu_read_unlock();
542
543 return last;
544}
545
546/* type and compressor must be null-terminated */
547static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
548{
549 struct zswap_pool *pool;
550
551 assert_spin_locked(&zswap_pools_lock);
552
553 list_for_each_entry_rcu(pool, &zswap_pools, list) {
554 if (strcmp(pool->tfm_name, compressor))
555 continue;
556 if (strcmp(zpool_get_type(pool->zpool), type))
557 continue;
558 /* if we can't get it, it's about to be destroyed */
559 if (!zswap_pool_get(pool))
560 continue;
561 return pool;
562 }
563
564 return NULL;
565}
566
567static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
568{
569 struct zswap_pool *pool;
570 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
571 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
572
573 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
574 if (!pool) {
575 pr_err("pool alloc failed\n");
576 return NULL;
577 }
578
579 /* unique name for each pool specifically required by zsmalloc */
580 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
581
582 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
583 if (!pool->zpool) {
584 pr_err("%s zpool not available\n", type);
585 goto error;
586 }
587 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
588
589 strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
590 pool->tfm = alloc_percpu(struct crypto_comp *);
591 if (!pool->tfm) {
592 pr_err("percpu alloc failed\n");
593 goto error;
594 }
595
596 if (zswap_cpu_comp_init(pool))
597 goto error;
598 pr_debug("using %s compressor\n", pool->tfm_name);
599
600 /* being the current pool takes 1 ref; this func expects the
601 * caller to always add the new pool as the current pool
602 */
603 kref_init(&pool->kref);
604 INIT_LIST_HEAD(&pool->list);
605
606 zswap_pool_debug("created", pool);
607
608 return pool;
609
610error:
611 free_percpu(pool->tfm);
612 if (pool->zpool)
613 zpool_destroy_pool(pool->zpool);
614 kfree(pool);
615 return NULL;
616}
617
618static __init struct zswap_pool *__zswap_pool_create_fallback(void)
619{
620 if (!crypto_has_comp(zswap_compressor, 0, 0)) {
621 if (!strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
622 pr_err("default compressor %s not available\n",
623 zswap_compressor);
624 return NULL;
625 }
626 pr_err("compressor %s not available, using default %s\n",
627 zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
628 param_free_charp(&zswap_compressor);
629 zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
630 }
631 if (!zpool_has_pool(zswap_zpool_type)) {
632 if (!strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
633 pr_err("default zpool %s not available\n",
634 zswap_zpool_type);
635 return NULL;
636 }
637 pr_err("zpool %s not available, using default %s\n",
638 zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
639 param_free_charp(&zswap_zpool_type);
640 zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
641 }
642
643 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
644}
645
646static void zswap_pool_destroy(struct zswap_pool *pool)
647{
648 zswap_pool_debug("destroying", pool);
649
650 zswap_cpu_comp_destroy(pool);
651 free_percpu(pool->tfm);
652 zpool_destroy_pool(pool->zpool);
653 kfree(pool);
654}
655
656static int __must_check zswap_pool_get(struct zswap_pool *pool)
657{
658 return kref_get_unless_zero(&pool->kref);
659}
660
661static void __zswap_pool_release(struct rcu_head *head)
662{
663 struct zswap_pool *pool = container_of(head, typeof(*pool), rcu_head);
664
665 /* nobody should have been able to get a kref... */
666 WARN_ON(kref_get_unless_zero(&pool->kref));
667
668 /* pool is now off zswap_pools list and has no references. */
669 zswap_pool_destroy(pool);
670}
671
672static void __zswap_pool_empty(struct kref *kref)
673{
674 struct zswap_pool *pool;
675
676 pool = container_of(kref, typeof(*pool), kref);
677
678 spin_lock(&zswap_pools_lock);
679
680 WARN_ON(pool == zswap_pool_current());
681
682 list_del_rcu(&pool->list);
683 call_rcu(&pool->rcu_head, __zswap_pool_release);
684
685 spin_unlock(&zswap_pools_lock);
686}
687
688static void zswap_pool_put(struct zswap_pool *pool)
689{
690 kref_put(&pool->kref, __zswap_pool_empty);
691}
692
693/*********************************
694* param callbacks
695**********************************/
696
697/* val must be a null-terminated string */
698static int __zswap_param_set(const char *val, const struct kernel_param *kp,
699 char *type, char *compressor)
700{
701 struct zswap_pool *pool, *put_pool = NULL;
702 char *s = strstrip((char *)val);
703 int ret;
704
705 /* no change required */
706 if (!strcmp(s, *(char **)kp->arg))
707 return 0;
708
709 /* if this is load-time (pre-init) param setting,
710 * don't create a pool; that's done during init.
711 */
712 if (!zswap_init_started)
713 return param_set_charp(s, kp);
714
715 if (!type) {
716 if (!zpool_has_pool(s)) {
717 pr_err("zpool %s not available\n", s);
718 return -ENOENT;
719 }
720 type = s;
721 } else if (!compressor) {
722 if (!crypto_has_comp(s, 0, 0)) {
723 pr_err("compressor %s not available\n", s);
724 return -ENOENT;
725 }
726 compressor = s;
727 } else {
728 WARN_ON(1);
729 return -EINVAL;
730 }
731
732 spin_lock(&zswap_pools_lock);
733
734 pool = zswap_pool_find_get(type, compressor);
735 if (pool) {
736 zswap_pool_debug("using existing", pool);
737 list_del_rcu(&pool->list);
738 } else {
739 spin_unlock(&zswap_pools_lock);
740 pool = zswap_pool_create(type, compressor);
741 spin_lock(&zswap_pools_lock);
742 }
743
744 if (pool)
745 ret = param_set_charp(s, kp);
746 else
747 ret = -EINVAL;
748
749 if (!ret) {
750 put_pool = zswap_pool_current();
751 list_add_rcu(&pool->list, &zswap_pools);
752 } else if (pool) {
753 /* add the possibly pre-existing pool to the end of the pools
754 * list; if it's new (and empty) then it'll be removed and
755 * destroyed by the put after we drop the lock
756 */
757 list_add_tail_rcu(&pool->list, &zswap_pools);
758 put_pool = pool;
759 }
760
761 spin_unlock(&zswap_pools_lock);
762
763 /* drop the ref from either the old current pool,
764 * or the new pool we failed to add
765 */
766 if (put_pool)
767 zswap_pool_put(put_pool);
768
769 return ret;
770}
771
772static int zswap_compressor_param_set(const char *val,
773 const struct kernel_param *kp)
774{
775 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
776}
777
778static int zswap_zpool_param_set(const char *val,
779 const struct kernel_param *kp)
780{
781 return __zswap_param_set(val, kp, NULL, zswap_compressor);
782}
783
784/*********************************
785* writeback code
786**********************************/
787/* return enum for zswap_get_swap_cache_page */
788enum zswap_get_swap_ret {
789 ZSWAP_SWAPCACHE_NEW,
790 ZSWAP_SWAPCACHE_EXIST,
791 ZSWAP_SWAPCACHE_FAIL,
792};
793
794/*
795 * zswap_get_swap_cache_page
796 *
797 * This is an adaption of read_swap_cache_async()
798 *
799 * This function tries to find a page with the given swap entry
800 * in the swapper_space address space (the swap cache). If the page
801 * is found, it is returned in retpage. Otherwise, a page is allocated,
802 * added to the swap cache, and returned in retpage.
803 *
804 * If success, the swap cache page is returned in retpage
805 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
806 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
807 * the new page is added to swapcache and locked
808 * Returns ZSWAP_SWAPCACHE_FAIL on error
809 */
810static int zswap_get_swap_cache_page(swp_entry_t entry,
811 struct page **retpage)
812{
813 bool page_was_allocated;
814
815 *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
816 NULL, 0, &page_was_allocated);
817 if (page_was_allocated)
818 return ZSWAP_SWAPCACHE_NEW;
819 if (!*retpage)
820 return ZSWAP_SWAPCACHE_FAIL;
821 return ZSWAP_SWAPCACHE_EXIST;
822}
823
824/*
825 * Attempts to free an entry by adding a page to the swap cache,
826 * decompressing the entry data into the page, and issuing a
827 * bio write to write the page back to the swap device.
828 *
829 * This can be thought of as a "resumed writeback" of the page
830 * to the swap device. We are basically resuming the same swap
831 * writeback path that was intercepted with the frontswap_store()
832 * in the first place. After the page has been decompressed into
833 * the swap cache, the compressed version stored by zswap can be
834 * freed.
835 */
836static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
837{
838 struct zswap_header *zhdr;
839 swp_entry_t swpentry;
840 struct zswap_tree *tree;
841 pgoff_t offset;
842 struct zswap_entry *entry;
843 struct page *page;
844 struct crypto_comp *tfm;
845 u8 *src, *dst;
846 unsigned int dlen;
847 int ret;
848 struct writeback_control wbc = {
849 .sync_mode = WB_SYNC_NONE,
850 };
851
852 /* extract swpentry from data */
853 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
854 swpentry = zhdr->swpentry; /* here */
855 zpool_unmap_handle(pool, handle);
856 tree = zswap_trees[swp_type(swpentry)];
857 offset = swp_offset(swpentry);
858
859 /* find and ref zswap entry */
860 spin_lock(&tree->lock);
861 entry = zswap_entry_find_get(&tree->rbroot, offset);
862 if (!entry) {
863 /* entry was invalidated */
864 spin_unlock(&tree->lock);
865 return 0;
866 }
867 spin_unlock(&tree->lock);
868 BUG_ON(offset != entry->offset);
869
870 /* try to allocate swap cache page */
871 switch (zswap_get_swap_cache_page(swpentry, &page)) {
872 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
873 ret = -ENOMEM;
874 goto fail;
875
876 case ZSWAP_SWAPCACHE_EXIST:
877 /* page is already in the swap cache, ignore for now */
878 put_page(page);
879 ret = -EEXIST;
880 goto fail;
881
882 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
883 /* decompress */
884 dlen = PAGE_SIZE;
885 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
886 ZPOOL_MM_RO) + sizeof(struct zswap_header);
887 dst = kmap_atomic(page);
888 tfm = *get_cpu_ptr(entry->pool->tfm);
889 ret = crypto_comp_decompress(tfm, src, entry->length,
890 dst, &dlen);
891 put_cpu_ptr(entry->pool->tfm);
892 kunmap_atomic(dst);
893 zpool_unmap_handle(entry->pool->zpool, entry->handle);
894 BUG_ON(ret);
895 BUG_ON(dlen != PAGE_SIZE);
896
897 /* page is up to date */
898 SetPageUptodate(page);
899 }
900
901 /* move it to the tail of the inactive list after end_writeback */
902 SetPageReclaim(page);
903
904 /* start writeback */
905 __swap_writepage(page, &wbc, end_swap_bio_write);
906 put_page(page);
907 zswap_written_back_pages++;
908
909 spin_lock(&tree->lock);
910 /* drop local reference */
911 zswap_entry_put(tree, entry);
912
913 /*
914 * There are two possible situations for entry here:
915 * (1) refcount is 1(normal case), entry is valid and on the tree
916 * (2) refcount is 0, entry is freed and not on the tree
917 * because invalidate happened during writeback
918 * search the tree and free the entry if find entry
919 */
920 if (entry == zswap_rb_search(&tree->rbroot, offset))
921 zswap_entry_put(tree, entry);
922 spin_unlock(&tree->lock);
923
924 goto end;
925
926 /*
927 * if we get here due to ZSWAP_SWAPCACHE_EXIST
928 * a load may happening concurrently
929 * it is safe and okay to not free the entry
930 * if we free the entry in the following put
931 * it it either okay to return !0
932 */
933fail:
934 spin_lock(&tree->lock);
935 zswap_entry_put(tree, entry);
936 spin_unlock(&tree->lock);
937
938end:
939 return ret;
940}
941
942static int zswap_shrink(void)
943{
944 struct zswap_pool *pool;
945 int ret;
946
947 pool = zswap_pool_last_get();
948 if (!pool)
949 return -ENOENT;
950
951 ret = zpool_shrink(pool->zpool, 1, NULL);
952
953 zswap_pool_put(pool);
954
955 return ret;
956}
957
958/*********************************
959* frontswap hooks
960**********************************/
961/* attempts to compress and store an single page */
962static int zswap_frontswap_store(unsigned type, pgoff_t offset,
963 struct page *page)
964{
965 struct zswap_tree *tree = zswap_trees[type];
966 struct zswap_entry *entry, *dupentry;
967 struct crypto_comp *tfm;
968 int ret;
969 unsigned int dlen = PAGE_SIZE, len;
970 unsigned long handle;
971 char *buf;
972 u8 *src, *dst;
973 struct zswap_header *zhdr;
974
975 if (!zswap_enabled || !tree) {
976 ret = -ENODEV;
977 goto reject;
978 }
979
980 /* reclaim space if needed */
981 if (zswap_is_full()) {
982 zswap_pool_limit_hit++;
983 if (zswap_shrink()) {
984 zswap_reject_reclaim_fail++;
985 ret = -ENOMEM;
986 goto reject;
987 }
988 }
989
990 /* allocate entry */
991 entry = zswap_entry_cache_alloc(GFP_KERNEL);
992 if (!entry) {
993 zswap_reject_kmemcache_fail++;
994 ret = -ENOMEM;
995 goto reject;
996 }
997
998 /* if entry is successfully added, it keeps the reference */
999 entry->pool = zswap_pool_current_get();
1000 if (!entry->pool) {
1001 ret = -EINVAL;
1002 goto freepage;
1003 }
1004
1005 /* compress */
1006 dst = get_cpu_var(zswap_dstmem);
1007 tfm = *get_cpu_ptr(entry->pool->tfm);
1008 src = kmap_atomic(page);
1009 ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1010 kunmap_atomic(src);
1011 put_cpu_ptr(entry->pool->tfm);
1012 if (ret) {
1013 ret = -EINVAL;
1014 goto put_dstmem;
1015 }
1016
1017 /* store */
1018 len = dlen + sizeof(struct zswap_header);
1019 ret = zpool_malloc(entry->pool->zpool, len,
1020 __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
1021 &handle);
1022 if (ret == -ENOSPC) {
1023 zswap_reject_compress_poor++;
1024 goto put_dstmem;
1025 }
1026 if (ret) {
1027 zswap_reject_alloc_fail++;
1028 goto put_dstmem;
1029 }
1030 zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1031 zhdr->swpentry = swp_entry(type, offset);
1032 buf = (u8 *)(zhdr + 1);
1033 memcpy(buf, dst, dlen);
1034 zpool_unmap_handle(entry->pool->zpool, handle);
1035 put_cpu_var(zswap_dstmem);
1036
1037 /* populate entry */
1038 entry->offset = offset;
1039 entry->handle = handle;
1040 entry->length = dlen;
1041
1042 /* map */
1043 spin_lock(&tree->lock);
1044 do {
1045 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1046 if (ret == -EEXIST) {
1047 zswap_duplicate_entry++;
1048 /* remove from rbtree */
1049 zswap_rb_erase(&tree->rbroot, dupentry);
1050 zswap_entry_put(tree, dupentry);
1051 }
1052 } while (ret == -EEXIST);
1053 spin_unlock(&tree->lock);
1054
1055 /* update stats */
1056 atomic_inc(&zswap_stored_pages);
1057 zswap_update_total_size();
1058
1059 return 0;
1060
1061put_dstmem:
1062 put_cpu_var(zswap_dstmem);
1063 zswap_pool_put(entry->pool);
1064freepage:
1065 zswap_entry_cache_free(entry);
1066reject:
1067 return ret;
1068}
1069
1070/*
1071 * returns 0 if the page was successfully decompressed
1072 * return -1 on entry not found or error
1073*/
1074static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1075 struct page *page)
1076{
1077 struct zswap_tree *tree = zswap_trees[type];
1078 struct zswap_entry *entry;
1079 struct crypto_comp *tfm;
1080 u8 *src, *dst;
1081 unsigned int dlen;
1082 int ret;
1083
1084 /* find */
1085 spin_lock(&tree->lock);
1086 entry = zswap_entry_find_get(&tree->rbroot, offset);
1087 if (!entry) {
1088 /* entry was written back */
1089 spin_unlock(&tree->lock);
1090 return -1;
1091 }
1092 spin_unlock(&tree->lock);
1093
1094 /* decompress */
1095 dlen = PAGE_SIZE;
1096 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
1097 ZPOOL_MM_RO) + sizeof(struct zswap_header);
1098 dst = kmap_atomic(page);
1099 tfm = *get_cpu_ptr(entry->pool->tfm);
1100 ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1101 put_cpu_ptr(entry->pool->tfm);
1102 kunmap_atomic(dst);
1103 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1104 BUG_ON(ret);
1105
1106 spin_lock(&tree->lock);
1107 zswap_entry_put(tree, entry);
1108 spin_unlock(&tree->lock);
1109
1110 return 0;
1111}
1112
1113/* frees an entry in zswap */
1114static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1115{
1116 struct zswap_tree *tree = zswap_trees[type];
1117 struct zswap_entry *entry;
1118
1119 /* find */
1120 spin_lock(&tree->lock);
1121 entry = zswap_rb_search(&tree->rbroot, offset);
1122 if (!entry) {
1123 /* entry was written back */
1124 spin_unlock(&tree->lock);
1125 return;
1126 }
1127
1128 /* remove from rbtree */
1129 zswap_rb_erase(&tree->rbroot, entry);
1130
1131 /* drop the initial reference from entry creation */
1132 zswap_entry_put(tree, entry);
1133
1134 spin_unlock(&tree->lock);
1135}
1136
1137/* frees all zswap entries for the given swap type */
1138static void zswap_frontswap_invalidate_area(unsigned type)
1139{
1140 struct zswap_tree *tree = zswap_trees[type];
1141 struct zswap_entry *entry, *n;
1142
1143 if (!tree)
1144 return;
1145
1146 /* walk the tree and free everything */
1147 spin_lock(&tree->lock);
1148 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1149 zswap_free_entry(entry);
1150 tree->rbroot = RB_ROOT;
1151 spin_unlock(&tree->lock);
1152 kfree(tree);
1153 zswap_trees[type] = NULL;
1154}
1155
1156static void zswap_frontswap_init(unsigned type)
1157{
1158 struct zswap_tree *tree;
1159
1160 tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
1161 if (!tree) {
1162 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1163 return;
1164 }
1165
1166 tree->rbroot = RB_ROOT;
1167 spin_lock_init(&tree->lock);
1168 zswap_trees[type] = tree;
1169}
1170
1171static struct frontswap_ops zswap_frontswap_ops = {
1172 .store = zswap_frontswap_store,
1173 .load = zswap_frontswap_load,
1174 .invalidate_page = zswap_frontswap_invalidate_page,
1175 .invalidate_area = zswap_frontswap_invalidate_area,
1176 .init = zswap_frontswap_init
1177};
1178
1179/*********************************
1180* debugfs functions
1181**********************************/
1182#ifdef CONFIG_DEBUG_FS
1183#include <linux/debugfs.h>
1184
1185static struct dentry *zswap_debugfs_root;
1186
1187static int __init zswap_debugfs_init(void)
1188{
1189 if (!debugfs_initialized())
1190 return -ENODEV;
1191
1192 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1193 if (!zswap_debugfs_root)
1194 return -ENOMEM;
1195
1196 debugfs_create_u64("pool_limit_hit", S_IRUGO,
1197 zswap_debugfs_root, &zswap_pool_limit_hit);
1198 debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
1199 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1200 debugfs_create_u64("reject_alloc_fail", S_IRUGO,
1201 zswap_debugfs_root, &zswap_reject_alloc_fail);
1202 debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
1203 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1204 debugfs_create_u64("reject_compress_poor", S_IRUGO,
1205 zswap_debugfs_root, &zswap_reject_compress_poor);
1206 debugfs_create_u64("written_back_pages", S_IRUGO,
1207 zswap_debugfs_root, &zswap_written_back_pages);
1208 debugfs_create_u64("duplicate_entry", S_IRUGO,
1209 zswap_debugfs_root, &zswap_duplicate_entry);
1210 debugfs_create_u64("pool_total_size", S_IRUGO,
1211 zswap_debugfs_root, &zswap_pool_total_size);
1212 debugfs_create_atomic_t("stored_pages", S_IRUGO,
1213 zswap_debugfs_root, &zswap_stored_pages);
1214
1215 return 0;
1216}
1217
1218static void __exit zswap_debugfs_exit(void)
1219{
1220 debugfs_remove_recursive(zswap_debugfs_root);
1221}
1222#else
1223static int __init zswap_debugfs_init(void)
1224{
1225 return 0;
1226}
1227
1228static void __exit zswap_debugfs_exit(void) { }
1229#endif
1230
1231/*********************************
1232* module init and exit
1233**********************************/
1234static int __init init_zswap(void)
1235{
1236 struct zswap_pool *pool;
1237
1238 zswap_init_started = true;
1239
1240 if (zswap_entry_cache_create()) {
1241 pr_err("entry cache creation failed\n");
1242 goto cache_fail;
1243 }
1244
1245 if (zswap_cpu_dstmem_init()) {
1246 pr_err("dstmem alloc failed\n");
1247 goto dstmem_fail;
1248 }
1249
1250 pool = __zswap_pool_create_fallback();
1251 if (!pool) {
1252 pr_err("pool creation failed\n");
1253 goto pool_fail;
1254 }
1255 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1256 zpool_get_type(pool->zpool));
1257
1258 list_add(&pool->list, &zswap_pools);
1259
1260 frontswap_register_ops(&zswap_frontswap_ops);
1261 if (zswap_debugfs_init())
1262 pr_warn("debugfs initialization failed\n");
1263 return 0;
1264
1265pool_fail:
1266 zswap_cpu_dstmem_destroy();
1267dstmem_fail:
1268 zswap_entry_cache_destroy();
1269cache_fail:
1270 return -ENOMEM;
1271}
1272/* must be late so crypto has time to come up */
1273late_initcall(init_zswap);
1274
1275MODULE_LICENSE("GPL");
1276MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1277MODULE_DESCRIPTION("Compressed cache for swap pages");
1/*
2 * zswap.c - zswap driver file
3 *
4 * zswap is a backend for frontswap that takes pages that are in the process
5 * of being swapped out and attempts to compress and store them in a
6 * RAM-based memory pool. This can result in a significant I/O reduction on
7 * the swap device and, in the case where decompressing from RAM is faster
8 * than reading from the swap device, can also improve workload performance.
9 *
10 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21*/
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/module.h>
26#include <linux/cpu.h>
27#include <linux/highmem.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <linux/types.h>
31#include <linux/atomic.h>
32#include <linux/frontswap.h>
33#include <linux/rbtree.h>
34#include <linux/swap.h>
35#include <linux/crypto.h>
36#include <linux/mempool.h>
37#include <linux/zpool.h>
38
39#include <linux/mm_types.h>
40#include <linux/page-flags.h>
41#include <linux/swapops.h>
42#include <linux/writeback.h>
43#include <linux/pagemap.h>
44
45/*********************************
46* statistics
47**********************************/
48/* Total bytes used by the compressed storage */
49static u64 zswap_pool_total_size;
50/* The number of compressed pages currently stored in zswap */
51static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
52/* The number of same-value filled pages currently stored in zswap */
53static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
54
55/*
56 * The statistics below are not protected from concurrent access for
57 * performance reasons so they may not be a 100% accurate. However,
58 * they do provide useful information on roughly how many times a
59 * certain event is occurring.
60*/
61
62/* Pool limit was hit (see zswap_max_pool_percent) */
63static u64 zswap_pool_limit_hit;
64/* Pages written back when pool limit was reached */
65static u64 zswap_written_back_pages;
66/* Store failed due to a reclaim failure after pool limit was reached */
67static u64 zswap_reject_reclaim_fail;
68/* Compressed page was too big for the allocator to (optimally) store */
69static u64 zswap_reject_compress_poor;
70/* Store failed because underlying allocator could not get memory */
71static u64 zswap_reject_alloc_fail;
72/* Store failed because the entry metadata could not be allocated (rare) */
73static u64 zswap_reject_kmemcache_fail;
74/* Duplicate store was encountered (rare) */
75static u64 zswap_duplicate_entry;
76
77/*********************************
78* tunables
79**********************************/
80
81#define ZSWAP_PARAM_UNSET ""
82
83/* Enable/disable zswap (disabled by default) */
84static bool zswap_enabled;
85static int zswap_enabled_param_set(const char *,
86 const struct kernel_param *);
87static struct kernel_param_ops zswap_enabled_param_ops = {
88 .set = zswap_enabled_param_set,
89 .get = param_get_bool,
90};
91module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
92
93/* Crypto compressor to use */
94#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
95static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
96static int zswap_compressor_param_set(const char *,
97 const struct kernel_param *);
98static struct kernel_param_ops zswap_compressor_param_ops = {
99 .set = zswap_compressor_param_set,
100 .get = param_get_charp,
101 .free = param_free_charp,
102};
103module_param_cb(compressor, &zswap_compressor_param_ops,
104 &zswap_compressor, 0644);
105
106/* Compressed storage zpool to use */
107#define ZSWAP_ZPOOL_DEFAULT "zbud"
108static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
109static int zswap_zpool_param_set(const char *, const struct kernel_param *);
110static struct kernel_param_ops zswap_zpool_param_ops = {
111 .set = zswap_zpool_param_set,
112 .get = param_get_charp,
113 .free = param_free_charp,
114};
115module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
116
117/* The maximum percentage of memory that the compressed pool can occupy */
118static unsigned int zswap_max_pool_percent = 20;
119module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
120
121/* Enable/disable handling same-value filled pages (enabled by default) */
122static bool zswap_same_filled_pages_enabled = true;
123module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
124 bool, 0644);
125
126/*********************************
127* data structures
128**********************************/
129
130struct zswap_pool {
131 struct zpool *zpool;
132 struct crypto_comp * __percpu *tfm;
133 struct kref kref;
134 struct list_head list;
135 struct work_struct work;
136 struct hlist_node node;
137 char tfm_name[CRYPTO_MAX_ALG_NAME];
138};
139
140/*
141 * struct zswap_entry
142 *
143 * This structure contains the metadata for tracking a single compressed
144 * page within zswap.
145 *
146 * rbnode - links the entry into red-black tree for the appropriate swap type
147 * offset - the swap offset for the entry. Index into the red-black tree.
148 * refcount - the number of outstanding reference to the entry. This is needed
149 * to protect against premature freeing of the entry by code
150 * concurrent calls to load, invalidate, and writeback. The lock
151 * for the zswap_tree structure that contains the entry must
152 * be held while changing the refcount. Since the lock must
153 * be held, there is no reason to also make refcount atomic.
154 * length - the length in bytes of the compressed page data. Needed during
155 * decompression. For a same value filled page length is 0.
156 * pool - the zswap_pool the entry's data is in
157 * handle - zpool allocation handle that stores the compressed page data
158 * value - value of the same-value filled pages which have same content
159 */
160struct zswap_entry {
161 struct rb_node rbnode;
162 pgoff_t offset;
163 int refcount;
164 unsigned int length;
165 struct zswap_pool *pool;
166 union {
167 unsigned long handle;
168 unsigned long value;
169 };
170};
171
172struct zswap_header {
173 swp_entry_t swpentry;
174};
175
176/*
177 * The tree lock in the zswap_tree struct protects a few things:
178 * - the rbtree
179 * - the refcount field of each entry in the tree
180 */
181struct zswap_tree {
182 struct rb_root rbroot;
183 spinlock_t lock;
184};
185
186static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
187
188/* RCU-protected iteration */
189static LIST_HEAD(zswap_pools);
190/* protects zswap_pools list modification */
191static DEFINE_SPINLOCK(zswap_pools_lock);
192/* pool counter to provide unique names to zpool */
193static atomic_t zswap_pools_count = ATOMIC_INIT(0);
194
195/* used by param callback function */
196static bool zswap_init_started;
197
198/* fatal error during init */
199static bool zswap_init_failed;
200
201/* init completed, but couldn't create the initial pool */
202static bool zswap_has_pool;
203
204/*********************************
205* helpers and fwd declarations
206**********************************/
207
208#define zswap_pool_debug(msg, p) \
209 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
210 zpool_get_type((p)->zpool))
211
212static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
213static int zswap_pool_get(struct zswap_pool *pool);
214static void zswap_pool_put(struct zswap_pool *pool);
215
216static const struct zpool_ops zswap_zpool_ops = {
217 .evict = zswap_writeback_entry
218};
219
220static bool zswap_is_full(void)
221{
222 return totalram_pages * zswap_max_pool_percent / 100 <
223 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
224}
225
226static void zswap_update_total_size(void)
227{
228 struct zswap_pool *pool;
229 u64 total = 0;
230
231 rcu_read_lock();
232
233 list_for_each_entry_rcu(pool, &zswap_pools, list)
234 total += zpool_get_total_size(pool->zpool);
235
236 rcu_read_unlock();
237
238 zswap_pool_total_size = total;
239}
240
241/*********************************
242* zswap entry functions
243**********************************/
244static struct kmem_cache *zswap_entry_cache;
245
246static int __init zswap_entry_cache_create(void)
247{
248 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
249 return zswap_entry_cache == NULL;
250}
251
252static void __init zswap_entry_cache_destroy(void)
253{
254 kmem_cache_destroy(zswap_entry_cache);
255}
256
257static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
258{
259 struct zswap_entry *entry;
260 entry = kmem_cache_alloc(zswap_entry_cache, gfp);
261 if (!entry)
262 return NULL;
263 entry->refcount = 1;
264 RB_CLEAR_NODE(&entry->rbnode);
265 return entry;
266}
267
268static void zswap_entry_cache_free(struct zswap_entry *entry)
269{
270 kmem_cache_free(zswap_entry_cache, entry);
271}
272
273/*********************************
274* rbtree functions
275**********************************/
276static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
277{
278 struct rb_node *node = root->rb_node;
279 struct zswap_entry *entry;
280
281 while (node) {
282 entry = rb_entry(node, struct zswap_entry, rbnode);
283 if (entry->offset > offset)
284 node = node->rb_left;
285 else if (entry->offset < offset)
286 node = node->rb_right;
287 else
288 return entry;
289 }
290 return NULL;
291}
292
293/*
294 * In the case that a entry with the same offset is found, a pointer to
295 * the existing entry is stored in dupentry and the function returns -EEXIST
296 */
297static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
298 struct zswap_entry **dupentry)
299{
300 struct rb_node **link = &root->rb_node, *parent = NULL;
301 struct zswap_entry *myentry;
302
303 while (*link) {
304 parent = *link;
305 myentry = rb_entry(parent, struct zswap_entry, rbnode);
306 if (myentry->offset > entry->offset)
307 link = &(*link)->rb_left;
308 else if (myentry->offset < entry->offset)
309 link = &(*link)->rb_right;
310 else {
311 *dupentry = myentry;
312 return -EEXIST;
313 }
314 }
315 rb_link_node(&entry->rbnode, parent, link);
316 rb_insert_color(&entry->rbnode, root);
317 return 0;
318}
319
320static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
321{
322 if (!RB_EMPTY_NODE(&entry->rbnode)) {
323 rb_erase(&entry->rbnode, root);
324 RB_CLEAR_NODE(&entry->rbnode);
325 }
326}
327
328/*
329 * Carries out the common pattern of freeing and entry's zpool allocation,
330 * freeing the entry itself, and decrementing the number of stored pages.
331 */
332static void zswap_free_entry(struct zswap_entry *entry)
333{
334 if (!entry->length)
335 atomic_dec(&zswap_same_filled_pages);
336 else {
337 zpool_free(entry->pool->zpool, entry->handle);
338 zswap_pool_put(entry->pool);
339 }
340 zswap_entry_cache_free(entry);
341 atomic_dec(&zswap_stored_pages);
342 zswap_update_total_size();
343}
344
345/* caller must hold the tree lock */
346static void zswap_entry_get(struct zswap_entry *entry)
347{
348 entry->refcount++;
349}
350
351/* caller must hold the tree lock
352* remove from the tree and free it, if nobody reference the entry
353*/
354static void zswap_entry_put(struct zswap_tree *tree,
355 struct zswap_entry *entry)
356{
357 int refcount = --entry->refcount;
358
359 BUG_ON(refcount < 0);
360 if (refcount == 0) {
361 zswap_rb_erase(&tree->rbroot, entry);
362 zswap_free_entry(entry);
363 }
364}
365
366/* caller must hold the tree lock */
367static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
368 pgoff_t offset)
369{
370 struct zswap_entry *entry;
371
372 entry = zswap_rb_search(root, offset);
373 if (entry)
374 zswap_entry_get(entry);
375
376 return entry;
377}
378
379/*********************************
380* per-cpu code
381**********************************/
382static DEFINE_PER_CPU(u8 *, zswap_dstmem);
383
384static int zswap_dstmem_prepare(unsigned int cpu)
385{
386 u8 *dst;
387
388 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
389 if (!dst)
390 return -ENOMEM;
391
392 per_cpu(zswap_dstmem, cpu) = dst;
393 return 0;
394}
395
396static int zswap_dstmem_dead(unsigned int cpu)
397{
398 u8 *dst;
399
400 dst = per_cpu(zswap_dstmem, cpu);
401 kfree(dst);
402 per_cpu(zswap_dstmem, cpu) = NULL;
403
404 return 0;
405}
406
407static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
408{
409 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
410 struct crypto_comp *tfm;
411
412 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
413 return 0;
414
415 tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
416 if (IS_ERR_OR_NULL(tfm)) {
417 pr_err("could not alloc crypto comp %s : %ld\n",
418 pool->tfm_name, PTR_ERR(tfm));
419 return -ENOMEM;
420 }
421 *per_cpu_ptr(pool->tfm, cpu) = tfm;
422 return 0;
423}
424
425static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
426{
427 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
428 struct crypto_comp *tfm;
429
430 tfm = *per_cpu_ptr(pool->tfm, cpu);
431 if (!IS_ERR_OR_NULL(tfm))
432 crypto_free_comp(tfm);
433 *per_cpu_ptr(pool->tfm, cpu) = NULL;
434 return 0;
435}
436
437/*********************************
438* pool functions
439**********************************/
440
441static struct zswap_pool *__zswap_pool_current(void)
442{
443 struct zswap_pool *pool;
444
445 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
446 WARN_ONCE(!pool && zswap_has_pool,
447 "%s: no page storage pool!\n", __func__);
448
449 return pool;
450}
451
452static struct zswap_pool *zswap_pool_current(void)
453{
454 assert_spin_locked(&zswap_pools_lock);
455
456 return __zswap_pool_current();
457}
458
459static struct zswap_pool *zswap_pool_current_get(void)
460{
461 struct zswap_pool *pool;
462
463 rcu_read_lock();
464
465 pool = __zswap_pool_current();
466 if (!zswap_pool_get(pool))
467 pool = NULL;
468
469 rcu_read_unlock();
470
471 return pool;
472}
473
474static struct zswap_pool *zswap_pool_last_get(void)
475{
476 struct zswap_pool *pool, *last = NULL;
477
478 rcu_read_lock();
479
480 list_for_each_entry_rcu(pool, &zswap_pools, list)
481 last = pool;
482 WARN_ONCE(!last && zswap_has_pool,
483 "%s: no page storage pool!\n", __func__);
484 if (!zswap_pool_get(last))
485 last = NULL;
486
487 rcu_read_unlock();
488
489 return last;
490}
491
492/* type and compressor must be null-terminated */
493static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
494{
495 struct zswap_pool *pool;
496
497 assert_spin_locked(&zswap_pools_lock);
498
499 list_for_each_entry_rcu(pool, &zswap_pools, list) {
500 if (strcmp(pool->tfm_name, compressor))
501 continue;
502 if (strcmp(zpool_get_type(pool->zpool), type))
503 continue;
504 /* if we can't get it, it's about to be destroyed */
505 if (!zswap_pool_get(pool))
506 continue;
507 return pool;
508 }
509
510 return NULL;
511}
512
513static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
514{
515 struct zswap_pool *pool;
516 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
517 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
518 int ret;
519
520 if (!zswap_has_pool) {
521 /* if either are unset, pool initialization failed, and we
522 * need both params to be set correctly before trying to
523 * create a pool.
524 */
525 if (!strcmp(type, ZSWAP_PARAM_UNSET))
526 return NULL;
527 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
528 return NULL;
529 }
530
531 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
532 if (!pool)
533 return NULL;
534
535 /* unique name for each pool specifically required by zsmalloc */
536 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
537
538 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
539 if (!pool->zpool) {
540 pr_err("%s zpool not available\n", type);
541 goto error;
542 }
543 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
544
545 strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
546 pool->tfm = alloc_percpu(struct crypto_comp *);
547 if (!pool->tfm) {
548 pr_err("percpu alloc failed\n");
549 goto error;
550 }
551
552 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
553 &pool->node);
554 if (ret)
555 goto error;
556 pr_debug("using %s compressor\n", pool->tfm_name);
557
558 /* being the current pool takes 1 ref; this func expects the
559 * caller to always add the new pool as the current pool
560 */
561 kref_init(&pool->kref);
562 INIT_LIST_HEAD(&pool->list);
563
564 zswap_pool_debug("created", pool);
565
566 return pool;
567
568error:
569 free_percpu(pool->tfm);
570 if (pool->zpool)
571 zpool_destroy_pool(pool->zpool);
572 kfree(pool);
573 return NULL;
574}
575
576static __init struct zswap_pool *__zswap_pool_create_fallback(void)
577{
578 bool has_comp, has_zpool;
579
580 has_comp = crypto_has_comp(zswap_compressor, 0, 0);
581 if (!has_comp && strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
582 pr_err("compressor %s not available, using default %s\n",
583 zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
584 param_free_charp(&zswap_compressor);
585 zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
586 has_comp = crypto_has_comp(zswap_compressor, 0, 0);
587 }
588 if (!has_comp) {
589 pr_err("default compressor %s not available\n",
590 zswap_compressor);
591 param_free_charp(&zswap_compressor);
592 zswap_compressor = ZSWAP_PARAM_UNSET;
593 }
594
595 has_zpool = zpool_has_pool(zswap_zpool_type);
596 if (!has_zpool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
597 pr_err("zpool %s not available, using default %s\n",
598 zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
599 param_free_charp(&zswap_zpool_type);
600 zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
601 has_zpool = zpool_has_pool(zswap_zpool_type);
602 }
603 if (!has_zpool) {
604 pr_err("default zpool %s not available\n",
605 zswap_zpool_type);
606 param_free_charp(&zswap_zpool_type);
607 zswap_zpool_type = ZSWAP_PARAM_UNSET;
608 }
609
610 if (!has_comp || !has_zpool)
611 return NULL;
612
613 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
614}
615
616static void zswap_pool_destroy(struct zswap_pool *pool)
617{
618 zswap_pool_debug("destroying", pool);
619
620 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
621 free_percpu(pool->tfm);
622 zpool_destroy_pool(pool->zpool);
623 kfree(pool);
624}
625
626static int __must_check zswap_pool_get(struct zswap_pool *pool)
627{
628 if (!pool)
629 return 0;
630
631 return kref_get_unless_zero(&pool->kref);
632}
633
634static void __zswap_pool_release(struct work_struct *work)
635{
636 struct zswap_pool *pool = container_of(work, typeof(*pool), work);
637
638 synchronize_rcu();
639
640 /* nobody should have been able to get a kref... */
641 WARN_ON(kref_get_unless_zero(&pool->kref));
642
643 /* pool is now off zswap_pools list and has no references. */
644 zswap_pool_destroy(pool);
645}
646
647static void __zswap_pool_empty(struct kref *kref)
648{
649 struct zswap_pool *pool;
650
651 pool = container_of(kref, typeof(*pool), kref);
652
653 spin_lock(&zswap_pools_lock);
654
655 WARN_ON(pool == zswap_pool_current());
656
657 list_del_rcu(&pool->list);
658
659 INIT_WORK(&pool->work, __zswap_pool_release);
660 schedule_work(&pool->work);
661
662 spin_unlock(&zswap_pools_lock);
663}
664
665static void zswap_pool_put(struct zswap_pool *pool)
666{
667 kref_put(&pool->kref, __zswap_pool_empty);
668}
669
670/*********************************
671* param callbacks
672**********************************/
673
674/* val must be a null-terminated string */
675static int __zswap_param_set(const char *val, const struct kernel_param *kp,
676 char *type, char *compressor)
677{
678 struct zswap_pool *pool, *put_pool = NULL;
679 char *s = strstrip((char *)val);
680 int ret;
681
682 if (zswap_init_failed) {
683 pr_err("can't set param, initialization failed\n");
684 return -ENODEV;
685 }
686
687 /* no change required */
688 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
689 return 0;
690
691 /* if this is load-time (pre-init) param setting,
692 * don't create a pool; that's done during init.
693 */
694 if (!zswap_init_started)
695 return param_set_charp(s, kp);
696
697 if (!type) {
698 if (!zpool_has_pool(s)) {
699 pr_err("zpool %s not available\n", s);
700 return -ENOENT;
701 }
702 type = s;
703 } else if (!compressor) {
704 if (!crypto_has_comp(s, 0, 0)) {
705 pr_err("compressor %s not available\n", s);
706 return -ENOENT;
707 }
708 compressor = s;
709 } else {
710 WARN_ON(1);
711 return -EINVAL;
712 }
713
714 spin_lock(&zswap_pools_lock);
715
716 pool = zswap_pool_find_get(type, compressor);
717 if (pool) {
718 zswap_pool_debug("using existing", pool);
719 WARN_ON(pool == zswap_pool_current());
720 list_del_rcu(&pool->list);
721 }
722
723 spin_unlock(&zswap_pools_lock);
724
725 if (!pool)
726 pool = zswap_pool_create(type, compressor);
727
728 if (pool)
729 ret = param_set_charp(s, kp);
730 else
731 ret = -EINVAL;
732
733 spin_lock(&zswap_pools_lock);
734
735 if (!ret) {
736 put_pool = zswap_pool_current();
737 list_add_rcu(&pool->list, &zswap_pools);
738 zswap_has_pool = true;
739 } else if (pool) {
740 /* add the possibly pre-existing pool to the end of the pools
741 * list; if it's new (and empty) then it'll be removed and
742 * destroyed by the put after we drop the lock
743 */
744 list_add_tail_rcu(&pool->list, &zswap_pools);
745 put_pool = pool;
746 }
747
748 spin_unlock(&zswap_pools_lock);
749
750 if (!zswap_has_pool && !pool) {
751 /* if initial pool creation failed, and this pool creation also
752 * failed, maybe both compressor and zpool params were bad.
753 * Allow changing this param, so pool creation will succeed
754 * when the other param is changed. We already verified this
755 * param is ok in the zpool_has_pool() or crypto_has_comp()
756 * checks above.
757 */
758 ret = param_set_charp(s, kp);
759 }
760
761 /* drop the ref from either the old current pool,
762 * or the new pool we failed to add
763 */
764 if (put_pool)
765 zswap_pool_put(put_pool);
766
767 return ret;
768}
769
770static int zswap_compressor_param_set(const char *val,
771 const struct kernel_param *kp)
772{
773 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
774}
775
776static int zswap_zpool_param_set(const char *val,
777 const struct kernel_param *kp)
778{
779 return __zswap_param_set(val, kp, NULL, zswap_compressor);
780}
781
782static int zswap_enabled_param_set(const char *val,
783 const struct kernel_param *kp)
784{
785 if (zswap_init_failed) {
786 pr_err("can't enable, initialization failed\n");
787 return -ENODEV;
788 }
789 if (!zswap_has_pool && zswap_init_started) {
790 pr_err("can't enable, no pool configured\n");
791 return -ENODEV;
792 }
793
794 return param_set_bool(val, kp);
795}
796
797/*********************************
798* writeback code
799**********************************/
800/* return enum for zswap_get_swap_cache_page */
801enum zswap_get_swap_ret {
802 ZSWAP_SWAPCACHE_NEW,
803 ZSWAP_SWAPCACHE_EXIST,
804 ZSWAP_SWAPCACHE_FAIL,
805};
806
807/*
808 * zswap_get_swap_cache_page
809 *
810 * This is an adaption of read_swap_cache_async()
811 *
812 * This function tries to find a page with the given swap entry
813 * in the swapper_space address space (the swap cache). If the page
814 * is found, it is returned in retpage. Otherwise, a page is allocated,
815 * added to the swap cache, and returned in retpage.
816 *
817 * If success, the swap cache page is returned in retpage
818 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
819 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
820 * the new page is added to swapcache and locked
821 * Returns ZSWAP_SWAPCACHE_FAIL on error
822 */
823static int zswap_get_swap_cache_page(swp_entry_t entry,
824 struct page **retpage)
825{
826 bool page_was_allocated;
827
828 *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
829 NULL, 0, &page_was_allocated);
830 if (page_was_allocated)
831 return ZSWAP_SWAPCACHE_NEW;
832 if (!*retpage)
833 return ZSWAP_SWAPCACHE_FAIL;
834 return ZSWAP_SWAPCACHE_EXIST;
835}
836
837/*
838 * Attempts to free an entry by adding a page to the swap cache,
839 * decompressing the entry data into the page, and issuing a
840 * bio write to write the page back to the swap device.
841 *
842 * This can be thought of as a "resumed writeback" of the page
843 * to the swap device. We are basically resuming the same swap
844 * writeback path that was intercepted with the frontswap_store()
845 * in the first place. After the page has been decompressed into
846 * the swap cache, the compressed version stored by zswap can be
847 * freed.
848 */
849static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
850{
851 struct zswap_header *zhdr;
852 swp_entry_t swpentry;
853 struct zswap_tree *tree;
854 pgoff_t offset;
855 struct zswap_entry *entry;
856 struct page *page;
857 struct crypto_comp *tfm;
858 u8 *src, *dst;
859 unsigned int dlen;
860 int ret;
861 struct writeback_control wbc = {
862 .sync_mode = WB_SYNC_NONE,
863 };
864
865 /* extract swpentry from data */
866 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
867 swpentry = zhdr->swpentry; /* here */
868 zpool_unmap_handle(pool, handle);
869 tree = zswap_trees[swp_type(swpentry)];
870 offset = swp_offset(swpentry);
871
872 /* find and ref zswap entry */
873 spin_lock(&tree->lock);
874 entry = zswap_entry_find_get(&tree->rbroot, offset);
875 if (!entry) {
876 /* entry was invalidated */
877 spin_unlock(&tree->lock);
878 return 0;
879 }
880 spin_unlock(&tree->lock);
881 BUG_ON(offset != entry->offset);
882
883 /* try to allocate swap cache page */
884 switch (zswap_get_swap_cache_page(swpentry, &page)) {
885 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
886 ret = -ENOMEM;
887 goto fail;
888
889 case ZSWAP_SWAPCACHE_EXIST:
890 /* page is already in the swap cache, ignore for now */
891 put_page(page);
892 ret = -EEXIST;
893 goto fail;
894
895 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
896 /* decompress */
897 dlen = PAGE_SIZE;
898 src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
899 ZPOOL_MM_RO) + sizeof(struct zswap_header);
900 dst = kmap_atomic(page);
901 tfm = *get_cpu_ptr(entry->pool->tfm);
902 ret = crypto_comp_decompress(tfm, src, entry->length,
903 dst, &dlen);
904 put_cpu_ptr(entry->pool->tfm);
905 kunmap_atomic(dst);
906 zpool_unmap_handle(entry->pool->zpool, entry->handle);
907 BUG_ON(ret);
908 BUG_ON(dlen != PAGE_SIZE);
909
910 /* page is up to date */
911 SetPageUptodate(page);
912 }
913
914 /* move it to the tail of the inactive list after end_writeback */
915 SetPageReclaim(page);
916
917 /* start writeback */
918 __swap_writepage(page, &wbc, end_swap_bio_write);
919 put_page(page);
920 zswap_written_back_pages++;
921
922 spin_lock(&tree->lock);
923 /* drop local reference */
924 zswap_entry_put(tree, entry);
925
926 /*
927 * There are two possible situations for entry here:
928 * (1) refcount is 1(normal case), entry is valid and on the tree
929 * (2) refcount is 0, entry is freed and not on the tree
930 * because invalidate happened during writeback
931 * search the tree and free the entry if find entry
932 */
933 if (entry == zswap_rb_search(&tree->rbroot, offset))
934 zswap_entry_put(tree, entry);
935 spin_unlock(&tree->lock);
936
937 goto end;
938
939 /*
940 * if we get here due to ZSWAP_SWAPCACHE_EXIST
941 * a load may happening concurrently
942 * it is safe and okay to not free the entry
943 * if we free the entry in the following put
944 * it it either okay to return !0
945 */
946fail:
947 spin_lock(&tree->lock);
948 zswap_entry_put(tree, entry);
949 spin_unlock(&tree->lock);
950
951end:
952 return ret;
953}
954
955static int zswap_shrink(void)
956{
957 struct zswap_pool *pool;
958 int ret;
959
960 pool = zswap_pool_last_get();
961 if (!pool)
962 return -ENOENT;
963
964 ret = zpool_shrink(pool->zpool, 1, NULL);
965
966 zswap_pool_put(pool);
967
968 return ret;
969}
970
971static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
972{
973 unsigned int pos;
974 unsigned long *page;
975
976 page = (unsigned long *)ptr;
977 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
978 if (page[pos] != page[0])
979 return 0;
980 }
981 *value = page[0];
982 return 1;
983}
984
985static void zswap_fill_page(void *ptr, unsigned long value)
986{
987 unsigned long *page;
988
989 page = (unsigned long *)ptr;
990 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
991}
992
993/*********************************
994* frontswap hooks
995**********************************/
996/* attempts to compress and store an single page */
997static int zswap_frontswap_store(unsigned type, pgoff_t offset,
998 struct page *page)
999{
1000 struct zswap_tree *tree = zswap_trees[type];
1001 struct zswap_entry *entry, *dupentry;
1002 struct crypto_comp *tfm;
1003 int ret;
1004 unsigned int hlen, dlen = PAGE_SIZE;
1005 unsigned long handle, value;
1006 char *buf;
1007 u8 *src, *dst;
1008 struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
1009
1010 /* THP isn't supported */
1011 if (PageTransHuge(page)) {
1012 ret = -EINVAL;
1013 goto reject;
1014 }
1015
1016 if (!zswap_enabled || !tree) {
1017 ret = -ENODEV;
1018 goto reject;
1019 }
1020
1021 /* reclaim space if needed */
1022 if (zswap_is_full()) {
1023 zswap_pool_limit_hit++;
1024 if (zswap_shrink()) {
1025 zswap_reject_reclaim_fail++;
1026 ret = -ENOMEM;
1027 goto reject;
1028 }
1029 }
1030
1031 /* allocate entry */
1032 entry = zswap_entry_cache_alloc(GFP_KERNEL);
1033 if (!entry) {
1034 zswap_reject_kmemcache_fail++;
1035 ret = -ENOMEM;
1036 goto reject;
1037 }
1038
1039 if (zswap_same_filled_pages_enabled) {
1040 src = kmap_atomic(page);
1041 if (zswap_is_page_same_filled(src, &value)) {
1042 kunmap_atomic(src);
1043 entry->offset = offset;
1044 entry->length = 0;
1045 entry->value = value;
1046 atomic_inc(&zswap_same_filled_pages);
1047 goto insert_entry;
1048 }
1049 kunmap_atomic(src);
1050 }
1051
1052 /* if entry is successfully added, it keeps the reference */
1053 entry->pool = zswap_pool_current_get();
1054 if (!entry->pool) {
1055 ret = -EINVAL;
1056 goto freepage;
1057 }
1058
1059 /* compress */
1060 dst = get_cpu_var(zswap_dstmem);
1061 tfm = *get_cpu_ptr(entry->pool->tfm);
1062 src = kmap_atomic(page);
1063 ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
1064 kunmap_atomic(src);
1065 put_cpu_ptr(entry->pool->tfm);
1066 if (ret) {
1067 ret = -EINVAL;
1068 goto put_dstmem;
1069 }
1070
1071 /* store */
1072 hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
1073 ret = zpool_malloc(entry->pool->zpool, hlen + dlen,
1074 __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
1075 &handle);
1076 if (ret == -ENOSPC) {
1077 zswap_reject_compress_poor++;
1078 goto put_dstmem;
1079 }
1080 if (ret) {
1081 zswap_reject_alloc_fail++;
1082 goto put_dstmem;
1083 }
1084 buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1085 memcpy(buf, &zhdr, hlen);
1086 memcpy(buf + hlen, dst, dlen);
1087 zpool_unmap_handle(entry->pool->zpool, handle);
1088 put_cpu_var(zswap_dstmem);
1089
1090 /* populate entry */
1091 entry->offset = offset;
1092 entry->handle = handle;
1093 entry->length = dlen;
1094
1095insert_entry:
1096 /* map */
1097 spin_lock(&tree->lock);
1098 do {
1099 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1100 if (ret == -EEXIST) {
1101 zswap_duplicate_entry++;
1102 /* remove from rbtree */
1103 zswap_rb_erase(&tree->rbroot, dupentry);
1104 zswap_entry_put(tree, dupentry);
1105 }
1106 } while (ret == -EEXIST);
1107 spin_unlock(&tree->lock);
1108
1109 /* update stats */
1110 atomic_inc(&zswap_stored_pages);
1111 zswap_update_total_size();
1112
1113 return 0;
1114
1115put_dstmem:
1116 put_cpu_var(zswap_dstmem);
1117 zswap_pool_put(entry->pool);
1118freepage:
1119 zswap_entry_cache_free(entry);
1120reject:
1121 return ret;
1122}
1123
1124/*
1125 * returns 0 if the page was successfully decompressed
1126 * return -1 on entry not found or error
1127*/
1128static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1129 struct page *page)
1130{
1131 struct zswap_tree *tree = zswap_trees[type];
1132 struct zswap_entry *entry;
1133 struct crypto_comp *tfm;
1134 u8 *src, *dst;
1135 unsigned int dlen;
1136 int ret;
1137
1138 /* find */
1139 spin_lock(&tree->lock);
1140 entry = zswap_entry_find_get(&tree->rbroot, offset);
1141 if (!entry) {
1142 /* entry was written back */
1143 spin_unlock(&tree->lock);
1144 return -1;
1145 }
1146 spin_unlock(&tree->lock);
1147
1148 if (!entry->length) {
1149 dst = kmap_atomic(page);
1150 zswap_fill_page(dst, entry->value);
1151 kunmap_atomic(dst);
1152 goto freeentry;
1153 }
1154
1155 /* decompress */
1156 dlen = PAGE_SIZE;
1157 src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1158 if (zpool_evictable(entry->pool->zpool))
1159 src += sizeof(struct zswap_header);
1160 dst = kmap_atomic(page);
1161 tfm = *get_cpu_ptr(entry->pool->tfm);
1162 ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1163 put_cpu_ptr(entry->pool->tfm);
1164 kunmap_atomic(dst);
1165 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1166 BUG_ON(ret);
1167
1168freeentry:
1169 spin_lock(&tree->lock);
1170 zswap_entry_put(tree, entry);
1171 spin_unlock(&tree->lock);
1172
1173 return 0;
1174}
1175
1176/* frees an entry in zswap */
1177static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1178{
1179 struct zswap_tree *tree = zswap_trees[type];
1180 struct zswap_entry *entry;
1181
1182 /* find */
1183 spin_lock(&tree->lock);
1184 entry = zswap_rb_search(&tree->rbroot, offset);
1185 if (!entry) {
1186 /* entry was written back */
1187 spin_unlock(&tree->lock);
1188 return;
1189 }
1190
1191 /* remove from rbtree */
1192 zswap_rb_erase(&tree->rbroot, entry);
1193
1194 /* drop the initial reference from entry creation */
1195 zswap_entry_put(tree, entry);
1196
1197 spin_unlock(&tree->lock);
1198}
1199
1200/* frees all zswap entries for the given swap type */
1201static void zswap_frontswap_invalidate_area(unsigned type)
1202{
1203 struct zswap_tree *tree = zswap_trees[type];
1204 struct zswap_entry *entry, *n;
1205
1206 if (!tree)
1207 return;
1208
1209 /* walk the tree and free everything */
1210 spin_lock(&tree->lock);
1211 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1212 zswap_free_entry(entry);
1213 tree->rbroot = RB_ROOT;
1214 spin_unlock(&tree->lock);
1215 kfree(tree);
1216 zswap_trees[type] = NULL;
1217}
1218
1219static void zswap_frontswap_init(unsigned type)
1220{
1221 struct zswap_tree *tree;
1222
1223 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1224 if (!tree) {
1225 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1226 return;
1227 }
1228
1229 tree->rbroot = RB_ROOT;
1230 spin_lock_init(&tree->lock);
1231 zswap_trees[type] = tree;
1232}
1233
1234static struct frontswap_ops zswap_frontswap_ops = {
1235 .store = zswap_frontswap_store,
1236 .load = zswap_frontswap_load,
1237 .invalidate_page = zswap_frontswap_invalidate_page,
1238 .invalidate_area = zswap_frontswap_invalidate_area,
1239 .init = zswap_frontswap_init
1240};
1241
1242/*********************************
1243* debugfs functions
1244**********************************/
1245#ifdef CONFIG_DEBUG_FS
1246#include <linux/debugfs.h>
1247
1248static struct dentry *zswap_debugfs_root;
1249
1250static int __init zswap_debugfs_init(void)
1251{
1252 if (!debugfs_initialized())
1253 return -ENODEV;
1254
1255 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1256 if (!zswap_debugfs_root)
1257 return -ENOMEM;
1258
1259 debugfs_create_u64("pool_limit_hit", S_IRUGO,
1260 zswap_debugfs_root, &zswap_pool_limit_hit);
1261 debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
1262 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1263 debugfs_create_u64("reject_alloc_fail", S_IRUGO,
1264 zswap_debugfs_root, &zswap_reject_alloc_fail);
1265 debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
1266 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1267 debugfs_create_u64("reject_compress_poor", S_IRUGO,
1268 zswap_debugfs_root, &zswap_reject_compress_poor);
1269 debugfs_create_u64("written_back_pages", S_IRUGO,
1270 zswap_debugfs_root, &zswap_written_back_pages);
1271 debugfs_create_u64("duplicate_entry", S_IRUGO,
1272 zswap_debugfs_root, &zswap_duplicate_entry);
1273 debugfs_create_u64("pool_total_size", S_IRUGO,
1274 zswap_debugfs_root, &zswap_pool_total_size);
1275 debugfs_create_atomic_t("stored_pages", S_IRUGO,
1276 zswap_debugfs_root, &zswap_stored_pages);
1277 debugfs_create_atomic_t("same_filled_pages", 0444,
1278 zswap_debugfs_root, &zswap_same_filled_pages);
1279
1280 return 0;
1281}
1282
1283static void __exit zswap_debugfs_exit(void)
1284{
1285 debugfs_remove_recursive(zswap_debugfs_root);
1286}
1287#else
1288static int __init zswap_debugfs_init(void)
1289{
1290 return 0;
1291}
1292
1293static void __exit zswap_debugfs_exit(void) { }
1294#endif
1295
1296/*********************************
1297* module init and exit
1298**********************************/
1299static int __init init_zswap(void)
1300{
1301 struct zswap_pool *pool;
1302 int ret;
1303
1304 zswap_init_started = true;
1305
1306 if (zswap_entry_cache_create()) {
1307 pr_err("entry cache creation failed\n");
1308 goto cache_fail;
1309 }
1310
1311 ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1312 zswap_dstmem_prepare, zswap_dstmem_dead);
1313 if (ret) {
1314 pr_err("dstmem alloc failed\n");
1315 goto dstmem_fail;
1316 }
1317
1318 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1319 "mm/zswap_pool:prepare",
1320 zswap_cpu_comp_prepare,
1321 zswap_cpu_comp_dead);
1322 if (ret)
1323 goto hp_fail;
1324
1325 pool = __zswap_pool_create_fallback();
1326 if (pool) {
1327 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1328 zpool_get_type(pool->zpool));
1329 list_add(&pool->list, &zswap_pools);
1330 zswap_has_pool = true;
1331 } else {
1332 pr_err("pool creation failed\n");
1333 zswap_enabled = false;
1334 }
1335
1336 frontswap_register_ops(&zswap_frontswap_ops);
1337 if (zswap_debugfs_init())
1338 pr_warn("debugfs initialization failed\n");
1339 return 0;
1340
1341hp_fail:
1342 cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1343dstmem_fail:
1344 zswap_entry_cache_destroy();
1345cache_fail:
1346 /* if built-in, we aren't unloaded on failure; don't allow use */
1347 zswap_init_failed = true;
1348 zswap_enabled = false;
1349 return -ENOMEM;
1350}
1351/* must be late so crypto has time to come up */
1352late_initcall(init_zswap);
1353
1354MODULE_LICENSE("GPL");
1355MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1356MODULE_DESCRIPTION("Compressed cache for swap pages");