Loading...
1/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 */
13
14/*
15 * Following is how we use various fields and flags of underlying
16 * struct page(s) to form a zspage.
17 *
18 * Usage of struct page fields:
19 * page->private: points to zspage
20 * page->freelist(index): links together all component pages of a zspage
21 * For the huge page, this is always 0, so we use this field
22 * to store handle.
23 * page->units: first object offset in a subpage of zspage
24 *
25 * Usage of struct page flags:
26 * PG_private: identifies the first component page
27 * PG_owner_priv_1: identifies the huge component page
28 *
29 */
30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/magic.h>
37#include <linux/bitops.h>
38#include <linux/errno.h>
39#include <linux/highmem.h>
40#include <linux/string.h>
41#include <linux/slab.h>
42#include <asm/tlbflush.h>
43#include <asm/pgtable.h>
44#include <linux/cpumask.h>
45#include <linux/cpu.h>
46#include <linux/vmalloc.h>
47#include <linux/preempt.h>
48#include <linux/spinlock.h>
49#include <linux/shrinker.h>
50#include <linux/types.h>
51#include <linux/debugfs.h>
52#include <linux/zsmalloc.h>
53#include <linux/zpool.h>
54#include <linux/mount.h>
55#include <linux/migrate.h>
56#include <linux/pagemap.h>
57#include <linux/fs.h>
58
59#define ZSPAGE_MAGIC 0x58
60
61/*
62 * This must be power of 2 and greater than of equal to sizeof(link_free).
63 * These two conditions ensure that any 'struct link_free' itself doesn't
64 * span more than 1 page which avoids complex case of mapping 2 pages simply
65 * to restore link_free pointer values.
66 */
67#define ZS_ALIGN 8
68
69/*
70 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
71 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
72 */
73#define ZS_MAX_ZSPAGE_ORDER 2
74#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
75
76#define ZS_HANDLE_SIZE (sizeof(unsigned long))
77
78/*
79 * Object location (<PFN>, <obj_idx>) is encoded as
80 * as single (unsigned long) handle value.
81 *
82 * Note that object index <obj_idx> starts from 0.
83 *
84 * This is made more complicated by various memory models and PAE.
85 */
86
87#ifndef MAX_POSSIBLE_PHYSMEM_BITS
88#ifdef MAX_PHYSMEM_BITS
89#define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS
90#else
91/*
92 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
93 * be PAGE_SHIFT
94 */
95#define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG
96#endif
97#endif
98
99#define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
100
101/*
102 * Memory for allocating for handle keeps object position by
103 * encoding <page, obj_idx> and the encoded value has a room
104 * in least bit(ie, look at obj_to_location).
105 * We use the bit to synchronize between object access by
106 * user and migration.
107 */
108#define HANDLE_PIN_BIT 0
109
110/*
111 * Head in allocated object should have OBJ_ALLOCATED_TAG
112 * to identify the object was allocated or not.
113 * It's okay to add the status bit in the least bit because
114 * header keeps handle which is 4byte-aligned address so we
115 * have room for two bit at least.
116 */
117#define OBJ_ALLOCATED_TAG 1
118#define OBJ_TAG_BITS 1
119#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
120#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
121
122#define FULLNESS_BITS 2
123#define CLASS_BITS 8
124#define ISOLATED_BITS 3
125#define MAGIC_VAL_BITS 8
126
127#define MAX(a, b) ((a) >= (b) ? (a) : (b))
128/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
129#define ZS_MIN_ALLOC_SIZE \
130 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
131/* each chunk includes extra space to keep handle */
132#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
133
134/*
135 * On systems with 4K page size, this gives 255 size classes! There is a
136 * trader-off here:
137 * - Large number of size classes is potentially wasteful as free page are
138 * spread across these classes
139 * - Small number of size classes causes large internal fragmentation
140 * - Probably its better to use specific size classes (empirically
141 * determined). NOTE: all those class sizes must be set as multiple of
142 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
143 *
144 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
145 * (reason above)
146 */
147#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
148#define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
149 ZS_SIZE_CLASS_DELTA) + 1)
150
151enum fullness_group {
152 ZS_EMPTY,
153 ZS_ALMOST_EMPTY,
154 ZS_ALMOST_FULL,
155 ZS_FULL,
156 NR_ZS_FULLNESS,
157};
158
159enum zs_stat_type {
160 CLASS_EMPTY,
161 CLASS_ALMOST_EMPTY,
162 CLASS_ALMOST_FULL,
163 CLASS_FULL,
164 OBJ_ALLOCATED,
165 OBJ_USED,
166 NR_ZS_STAT_TYPE,
167};
168
169struct zs_size_stat {
170 unsigned long objs[NR_ZS_STAT_TYPE];
171};
172
173#ifdef CONFIG_ZSMALLOC_STAT
174static struct dentry *zs_stat_root;
175#endif
176
177#ifdef CONFIG_COMPACTION
178static struct vfsmount *zsmalloc_mnt;
179#endif
180
181/*
182 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
183 * n <= N / f, where
184 * n = number of allocated objects
185 * N = total number of objects zspage can store
186 * f = fullness_threshold_frac
187 *
188 * Similarly, we assign zspage to:
189 * ZS_ALMOST_FULL when n > N / f
190 * ZS_EMPTY when n == 0
191 * ZS_FULL when n == N
192 *
193 * (see: fix_fullness_group())
194 */
195static const int fullness_threshold_frac = 4;
196static size_t huge_class_size;
197
198struct size_class {
199 spinlock_t lock;
200 struct list_head fullness_list[NR_ZS_FULLNESS];
201 /*
202 * Size of objects stored in this class. Must be multiple
203 * of ZS_ALIGN.
204 */
205 int size;
206 int objs_per_zspage;
207 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
208 int pages_per_zspage;
209
210 unsigned int index;
211 struct zs_size_stat stats;
212};
213
214/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
215static void SetPageHugeObject(struct page *page)
216{
217 SetPageOwnerPriv1(page);
218}
219
220static void ClearPageHugeObject(struct page *page)
221{
222 ClearPageOwnerPriv1(page);
223}
224
225static int PageHugeObject(struct page *page)
226{
227 return PageOwnerPriv1(page);
228}
229
230/*
231 * Placed within free objects to form a singly linked list.
232 * For every zspage, zspage->freeobj gives head of this list.
233 *
234 * This must be power of 2 and less than or equal to ZS_ALIGN
235 */
236struct link_free {
237 union {
238 /*
239 * Free object index;
240 * It's valid for non-allocated object
241 */
242 unsigned long next;
243 /*
244 * Handle of allocated object.
245 */
246 unsigned long handle;
247 };
248};
249
250struct zs_pool {
251 const char *name;
252
253 struct size_class *size_class[ZS_SIZE_CLASSES];
254 struct kmem_cache *handle_cachep;
255 struct kmem_cache *zspage_cachep;
256
257 atomic_long_t pages_allocated;
258
259 struct zs_pool_stats stats;
260
261 /* Compact classes */
262 struct shrinker shrinker;
263
264#ifdef CONFIG_ZSMALLOC_STAT
265 struct dentry *stat_dentry;
266#endif
267#ifdef CONFIG_COMPACTION
268 struct inode *inode;
269 struct work_struct free_work;
270#endif
271};
272
273struct zspage {
274 struct {
275 unsigned int fullness:FULLNESS_BITS;
276 unsigned int class:CLASS_BITS + 1;
277 unsigned int isolated:ISOLATED_BITS;
278 unsigned int magic:MAGIC_VAL_BITS;
279 };
280 unsigned int inuse;
281 unsigned int freeobj;
282 struct page *first_page;
283 struct list_head list; /* fullness list */
284#ifdef CONFIG_COMPACTION
285 rwlock_t lock;
286#endif
287};
288
289struct mapping_area {
290#ifdef CONFIG_PGTABLE_MAPPING
291 struct vm_struct *vm; /* vm area for mapping object that span pages */
292#else
293 char *vm_buf; /* copy buffer for objects that span pages */
294#endif
295 char *vm_addr; /* address of kmap_atomic()'ed pages */
296 enum zs_mapmode vm_mm; /* mapping mode */
297};
298
299#ifdef CONFIG_COMPACTION
300static int zs_register_migration(struct zs_pool *pool);
301static void zs_unregister_migration(struct zs_pool *pool);
302static void migrate_lock_init(struct zspage *zspage);
303static void migrate_read_lock(struct zspage *zspage);
304static void migrate_read_unlock(struct zspage *zspage);
305static void kick_deferred_free(struct zs_pool *pool);
306static void init_deferred_free(struct zs_pool *pool);
307static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
308#else
309static int zsmalloc_mount(void) { return 0; }
310static void zsmalloc_unmount(void) {}
311static int zs_register_migration(struct zs_pool *pool) { return 0; }
312static void zs_unregister_migration(struct zs_pool *pool) {}
313static void migrate_lock_init(struct zspage *zspage) {}
314static void migrate_read_lock(struct zspage *zspage) {}
315static void migrate_read_unlock(struct zspage *zspage) {}
316static void kick_deferred_free(struct zs_pool *pool) {}
317static void init_deferred_free(struct zs_pool *pool) {}
318static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
319#endif
320
321static int create_cache(struct zs_pool *pool)
322{
323 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
324 0, 0, NULL);
325 if (!pool->handle_cachep)
326 return 1;
327
328 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
329 0, 0, NULL);
330 if (!pool->zspage_cachep) {
331 kmem_cache_destroy(pool->handle_cachep);
332 pool->handle_cachep = NULL;
333 return 1;
334 }
335
336 return 0;
337}
338
339static void destroy_cache(struct zs_pool *pool)
340{
341 kmem_cache_destroy(pool->handle_cachep);
342 kmem_cache_destroy(pool->zspage_cachep);
343}
344
345static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
346{
347 return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
348 gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
349}
350
351static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
352{
353 kmem_cache_free(pool->handle_cachep, (void *)handle);
354}
355
356static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
357{
358 return kmem_cache_alloc(pool->zspage_cachep,
359 flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
360}
361
362static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
363{
364 kmem_cache_free(pool->zspage_cachep, zspage);
365}
366
367static void record_obj(unsigned long handle, unsigned long obj)
368{
369 /*
370 * lsb of @obj represents handle lock while other bits
371 * represent object value the handle is pointing so
372 * updating shouldn't do store tearing.
373 */
374 WRITE_ONCE(*(unsigned long *)handle, obj);
375}
376
377/* zpool driver */
378
379#ifdef CONFIG_ZPOOL
380
381static void *zs_zpool_create(const char *name, gfp_t gfp,
382 const struct zpool_ops *zpool_ops,
383 struct zpool *zpool)
384{
385 /*
386 * Ignore global gfp flags: zs_malloc() may be invoked from
387 * different contexts and its caller must provide a valid
388 * gfp mask.
389 */
390 return zs_create_pool(name);
391}
392
393static void zs_zpool_destroy(void *pool)
394{
395 zs_destroy_pool(pool);
396}
397
398static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
399 unsigned long *handle)
400{
401 *handle = zs_malloc(pool, size, gfp);
402 return *handle ? 0 : -1;
403}
404static void zs_zpool_free(void *pool, unsigned long handle)
405{
406 zs_free(pool, handle);
407}
408
409static void *zs_zpool_map(void *pool, unsigned long handle,
410 enum zpool_mapmode mm)
411{
412 enum zs_mapmode zs_mm;
413
414 switch (mm) {
415 case ZPOOL_MM_RO:
416 zs_mm = ZS_MM_RO;
417 break;
418 case ZPOOL_MM_WO:
419 zs_mm = ZS_MM_WO;
420 break;
421 case ZPOOL_MM_RW: /* fallthru */
422 default:
423 zs_mm = ZS_MM_RW;
424 break;
425 }
426
427 return zs_map_object(pool, handle, zs_mm);
428}
429static void zs_zpool_unmap(void *pool, unsigned long handle)
430{
431 zs_unmap_object(pool, handle);
432}
433
434static u64 zs_zpool_total_size(void *pool)
435{
436 return zs_get_total_pages(pool) << PAGE_SHIFT;
437}
438
439static struct zpool_driver zs_zpool_driver = {
440 .type = "zsmalloc",
441 .owner = THIS_MODULE,
442 .create = zs_zpool_create,
443 .destroy = zs_zpool_destroy,
444 .malloc = zs_zpool_malloc,
445 .free = zs_zpool_free,
446 .map = zs_zpool_map,
447 .unmap = zs_zpool_unmap,
448 .total_size = zs_zpool_total_size,
449};
450
451MODULE_ALIAS("zpool-zsmalloc");
452#endif /* CONFIG_ZPOOL */
453
454/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
455static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
456
457static bool is_zspage_isolated(struct zspage *zspage)
458{
459 return zspage->isolated;
460}
461
462static __maybe_unused int is_first_page(struct page *page)
463{
464 return PagePrivate(page);
465}
466
467/* Protected by class->lock */
468static inline int get_zspage_inuse(struct zspage *zspage)
469{
470 return zspage->inuse;
471}
472
473static inline void set_zspage_inuse(struct zspage *zspage, int val)
474{
475 zspage->inuse = val;
476}
477
478static inline void mod_zspage_inuse(struct zspage *zspage, int val)
479{
480 zspage->inuse += val;
481}
482
483static inline struct page *get_first_page(struct zspage *zspage)
484{
485 struct page *first_page = zspage->first_page;
486
487 VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
488 return first_page;
489}
490
491static inline int get_first_obj_offset(struct page *page)
492{
493 return page->units;
494}
495
496static inline void set_first_obj_offset(struct page *page, int offset)
497{
498 page->units = offset;
499}
500
501static inline unsigned int get_freeobj(struct zspage *zspage)
502{
503 return zspage->freeobj;
504}
505
506static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
507{
508 zspage->freeobj = obj;
509}
510
511static void get_zspage_mapping(struct zspage *zspage,
512 unsigned int *class_idx,
513 enum fullness_group *fullness)
514{
515 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
516
517 *fullness = zspage->fullness;
518 *class_idx = zspage->class;
519}
520
521static void set_zspage_mapping(struct zspage *zspage,
522 unsigned int class_idx,
523 enum fullness_group fullness)
524{
525 zspage->class = class_idx;
526 zspage->fullness = fullness;
527}
528
529/*
530 * zsmalloc divides the pool into various size classes where each
531 * class maintains a list of zspages where each zspage is divided
532 * into equal sized chunks. Each allocation falls into one of these
533 * classes depending on its size. This function returns index of the
534 * size class which has chunk size big enough to hold the give size.
535 */
536static int get_size_class_index(int size)
537{
538 int idx = 0;
539
540 if (likely(size > ZS_MIN_ALLOC_SIZE))
541 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
542 ZS_SIZE_CLASS_DELTA);
543
544 return min_t(int, ZS_SIZE_CLASSES - 1, idx);
545}
546
547/* type can be of enum type zs_stat_type or fullness_group */
548static inline void zs_stat_inc(struct size_class *class,
549 int type, unsigned long cnt)
550{
551 class->stats.objs[type] += cnt;
552}
553
554/* type can be of enum type zs_stat_type or fullness_group */
555static inline void zs_stat_dec(struct size_class *class,
556 int type, unsigned long cnt)
557{
558 class->stats.objs[type] -= cnt;
559}
560
561/* type can be of enum type zs_stat_type or fullness_group */
562static inline unsigned long zs_stat_get(struct size_class *class,
563 int type)
564{
565 return class->stats.objs[type];
566}
567
568#ifdef CONFIG_ZSMALLOC_STAT
569
570static void __init zs_stat_init(void)
571{
572 if (!debugfs_initialized()) {
573 pr_warn("debugfs not available, stat dir not created\n");
574 return;
575 }
576
577 zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
578 if (!zs_stat_root)
579 pr_warn("debugfs 'zsmalloc' stat dir creation failed\n");
580}
581
582static void __exit zs_stat_exit(void)
583{
584 debugfs_remove_recursive(zs_stat_root);
585}
586
587static unsigned long zs_can_compact(struct size_class *class);
588
589static int zs_stats_size_show(struct seq_file *s, void *v)
590{
591 int i;
592 struct zs_pool *pool = s->private;
593 struct size_class *class;
594 int objs_per_zspage;
595 unsigned long class_almost_full, class_almost_empty;
596 unsigned long obj_allocated, obj_used, pages_used, freeable;
597 unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
598 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
599 unsigned long total_freeable = 0;
600
601 seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
602 "class", "size", "almost_full", "almost_empty",
603 "obj_allocated", "obj_used", "pages_used",
604 "pages_per_zspage", "freeable");
605
606 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
607 class = pool->size_class[i];
608
609 if (class->index != i)
610 continue;
611
612 spin_lock(&class->lock);
613 class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
614 class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
615 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
616 obj_used = zs_stat_get(class, OBJ_USED);
617 freeable = zs_can_compact(class);
618 spin_unlock(&class->lock);
619
620 objs_per_zspage = class->objs_per_zspage;
621 pages_used = obj_allocated / objs_per_zspage *
622 class->pages_per_zspage;
623
624 seq_printf(s, " %5u %5u %11lu %12lu %13lu"
625 " %10lu %10lu %16d %8lu\n",
626 i, class->size, class_almost_full, class_almost_empty,
627 obj_allocated, obj_used, pages_used,
628 class->pages_per_zspage, freeable);
629
630 total_class_almost_full += class_almost_full;
631 total_class_almost_empty += class_almost_empty;
632 total_objs += obj_allocated;
633 total_used_objs += obj_used;
634 total_pages += pages_used;
635 total_freeable += freeable;
636 }
637
638 seq_puts(s, "\n");
639 seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
640 "Total", "", total_class_almost_full,
641 total_class_almost_empty, total_objs,
642 total_used_objs, total_pages, "", total_freeable);
643
644 return 0;
645}
646DEFINE_SHOW_ATTRIBUTE(zs_stats_size);
647
648static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
649{
650 struct dentry *entry;
651
652 if (!zs_stat_root) {
653 pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
654 return;
655 }
656
657 entry = debugfs_create_dir(name, zs_stat_root);
658 if (!entry) {
659 pr_warn("debugfs dir <%s> creation failed\n", name);
660 return;
661 }
662 pool->stat_dentry = entry;
663
664 entry = debugfs_create_file("classes", S_IFREG | S_IRUGO,
665 pool->stat_dentry, pool, &zs_stats_size_fops);
666 if (!entry) {
667 pr_warn("%s: debugfs file entry <%s> creation failed\n",
668 name, "classes");
669 debugfs_remove_recursive(pool->stat_dentry);
670 pool->stat_dentry = NULL;
671 }
672}
673
674static void zs_pool_stat_destroy(struct zs_pool *pool)
675{
676 debugfs_remove_recursive(pool->stat_dentry);
677}
678
679#else /* CONFIG_ZSMALLOC_STAT */
680static void __init zs_stat_init(void)
681{
682}
683
684static void __exit zs_stat_exit(void)
685{
686}
687
688static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
689{
690}
691
692static inline void zs_pool_stat_destroy(struct zs_pool *pool)
693{
694}
695#endif
696
697
698/*
699 * For each size class, zspages are divided into different groups
700 * depending on how "full" they are. This was done so that we could
701 * easily find empty or nearly empty zspages when we try to shrink
702 * the pool (not yet implemented). This function returns fullness
703 * status of the given page.
704 */
705static enum fullness_group get_fullness_group(struct size_class *class,
706 struct zspage *zspage)
707{
708 int inuse, objs_per_zspage;
709 enum fullness_group fg;
710
711 inuse = get_zspage_inuse(zspage);
712 objs_per_zspage = class->objs_per_zspage;
713
714 if (inuse == 0)
715 fg = ZS_EMPTY;
716 else if (inuse == objs_per_zspage)
717 fg = ZS_FULL;
718 else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac)
719 fg = ZS_ALMOST_EMPTY;
720 else
721 fg = ZS_ALMOST_FULL;
722
723 return fg;
724}
725
726/*
727 * Each size class maintains various freelists and zspages are assigned
728 * to one of these freelists based on the number of live objects they
729 * have. This functions inserts the given zspage into the freelist
730 * identified by <class, fullness_group>.
731 */
732static void insert_zspage(struct size_class *class,
733 struct zspage *zspage,
734 enum fullness_group fullness)
735{
736 struct zspage *head;
737
738 zs_stat_inc(class, fullness, 1);
739 head = list_first_entry_or_null(&class->fullness_list[fullness],
740 struct zspage, list);
741 /*
742 * We want to see more ZS_FULL pages and less almost empty/full.
743 * Put pages with higher ->inuse first.
744 */
745 if (head) {
746 if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) {
747 list_add(&zspage->list, &head->list);
748 return;
749 }
750 }
751 list_add(&zspage->list, &class->fullness_list[fullness]);
752}
753
754/*
755 * This function removes the given zspage from the freelist identified
756 * by <class, fullness_group>.
757 */
758static void remove_zspage(struct size_class *class,
759 struct zspage *zspage,
760 enum fullness_group fullness)
761{
762 VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
763 VM_BUG_ON(is_zspage_isolated(zspage));
764
765 list_del_init(&zspage->list);
766 zs_stat_dec(class, fullness, 1);
767}
768
769/*
770 * Each size class maintains zspages in different fullness groups depending
771 * on the number of live objects they contain. When allocating or freeing
772 * objects, the fullness status of the page can change, say, from ALMOST_FULL
773 * to ALMOST_EMPTY when freeing an object. This function checks if such
774 * a status change has occurred for the given page and accordingly moves the
775 * page from the freelist of the old fullness group to that of the new
776 * fullness group.
777 */
778static enum fullness_group fix_fullness_group(struct size_class *class,
779 struct zspage *zspage)
780{
781 int class_idx;
782 enum fullness_group currfg, newfg;
783
784 get_zspage_mapping(zspage, &class_idx, &currfg);
785 newfg = get_fullness_group(class, zspage);
786 if (newfg == currfg)
787 goto out;
788
789 if (!is_zspage_isolated(zspage)) {
790 remove_zspage(class, zspage, currfg);
791 insert_zspage(class, zspage, newfg);
792 }
793
794 set_zspage_mapping(zspage, class_idx, newfg);
795
796out:
797 return newfg;
798}
799
800/*
801 * We have to decide on how many pages to link together
802 * to form a zspage for each size class. This is important
803 * to reduce wastage due to unusable space left at end of
804 * each zspage which is given as:
805 * wastage = Zp % class_size
806 * usage = Zp - wastage
807 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
808 *
809 * For example, for size class of 3/8 * PAGE_SIZE, we should
810 * link together 3 PAGE_SIZE sized pages to form a zspage
811 * since then we can perfectly fit in 8 such objects.
812 */
813static int get_pages_per_zspage(int class_size)
814{
815 int i, max_usedpc = 0;
816 /* zspage order which gives maximum used size per KB */
817 int max_usedpc_order = 1;
818
819 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
820 int zspage_size;
821 int waste, usedpc;
822
823 zspage_size = i * PAGE_SIZE;
824 waste = zspage_size % class_size;
825 usedpc = (zspage_size - waste) * 100 / zspage_size;
826
827 if (usedpc > max_usedpc) {
828 max_usedpc = usedpc;
829 max_usedpc_order = i;
830 }
831 }
832
833 return max_usedpc_order;
834}
835
836static struct zspage *get_zspage(struct page *page)
837{
838 struct zspage *zspage = (struct zspage *)page->private;
839
840 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
841 return zspage;
842}
843
844static struct page *get_next_page(struct page *page)
845{
846 if (unlikely(PageHugeObject(page)))
847 return NULL;
848
849 return page->freelist;
850}
851
852/**
853 * obj_to_location - get (<page>, <obj_idx>) from encoded object value
854 * @obj: the encoded object value
855 * @page: page object resides in zspage
856 * @obj_idx: object index
857 */
858static void obj_to_location(unsigned long obj, struct page **page,
859 unsigned int *obj_idx)
860{
861 obj >>= OBJ_TAG_BITS;
862 *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
863 *obj_idx = (obj & OBJ_INDEX_MASK);
864}
865
866/**
867 * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
868 * @page: page object resides in zspage
869 * @obj_idx: object index
870 */
871static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
872{
873 unsigned long obj;
874
875 obj = page_to_pfn(page) << OBJ_INDEX_BITS;
876 obj |= obj_idx & OBJ_INDEX_MASK;
877 obj <<= OBJ_TAG_BITS;
878
879 return obj;
880}
881
882static unsigned long handle_to_obj(unsigned long handle)
883{
884 return *(unsigned long *)handle;
885}
886
887static unsigned long obj_to_head(struct page *page, void *obj)
888{
889 if (unlikely(PageHugeObject(page))) {
890 VM_BUG_ON_PAGE(!is_first_page(page), page);
891 return page->index;
892 } else
893 return *(unsigned long *)obj;
894}
895
896static inline int testpin_tag(unsigned long handle)
897{
898 return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
899}
900
901static inline int trypin_tag(unsigned long handle)
902{
903 return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
904}
905
906static void pin_tag(unsigned long handle)
907{
908 bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
909}
910
911static void unpin_tag(unsigned long handle)
912{
913 bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
914}
915
916static void reset_page(struct page *page)
917{
918 __ClearPageMovable(page);
919 ClearPagePrivate(page);
920 set_page_private(page, 0);
921 page_mapcount_reset(page);
922 ClearPageHugeObject(page);
923 page->freelist = NULL;
924}
925
926/*
927 * To prevent zspage destroy during migration, zspage freeing should
928 * hold locks of all pages in the zspage.
929 */
930void lock_zspage(struct zspage *zspage)
931{
932 struct page *page = get_first_page(zspage);
933
934 do {
935 lock_page(page);
936 } while ((page = get_next_page(page)) != NULL);
937}
938
939int trylock_zspage(struct zspage *zspage)
940{
941 struct page *cursor, *fail;
942
943 for (cursor = get_first_page(zspage); cursor != NULL; cursor =
944 get_next_page(cursor)) {
945 if (!trylock_page(cursor)) {
946 fail = cursor;
947 goto unlock;
948 }
949 }
950
951 return 1;
952unlock:
953 for (cursor = get_first_page(zspage); cursor != fail; cursor =
954 get_next_page(cursor))
955 unlock_page(cursor);
956
957 return 0;
958}
959
960static void __free_zspage(struct zs_pool *pool, struct size_class *class,
961 struct zspage *zspage)
962{
963 struct page *page, *next;
964 enum fullness_group fg;
965 unsigned int class_idx;
966
967 get_zspage_mapping(zspage, &class_idx, &fg);
968
969 assert_spin_locked(&class->lock);
970
971 VM_BUG_ON(get_zspage_inuse(zspage));
972 VM_BUG_ON(fg != ZS_EMPTY);
973
974 next = page = get_first_page(zspage);
975 do {
976 VM_BUG_ON_PAGE(!PageLocked(page), page);
977 next = get_next_page(page);
978 reset_page(page);
979 unlock_page(page);
980 dec_zone_page_state(page, NR_ZSPAGES);
981 put_page(page);
982 page = next;
983 } while (page != NULL);
984
985 cache_free_zspage(pool, zspage);
986
987 zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
988 atomic_long_sub(class->pages_per_zspage,
989 &pool->pages_allocated);
990}
991
992static void free_zspage(struct zs_pool *pool, struct size_class *class,
993 struct zspage *zspage)
994{
995 VM_BUG_ON(get_zspage_inuse(zspage));
996 VM_BUG_ON(list_empty(&zspage->list));
997
998 if (!trylock_zspage(zspage)) {
999 kick_deferred_free(pool);
1000 return;
1001 }
1002
1003 remove_zspage(class, zspage, ZS_EMPTY);
1004 __free_zspage(pool, class, zspage);
1005}
1006
1007/* Initialize a newly allocated zspage */
1008static void init_zspage(struct size_class *class, struct zspage *zspage)
1009{
1010 unsigned int freeobj = 1;
1011 unsigned long off = 0;
1012 struct page *page = get_first_page(zspage);
1013
1014 while (page) {
1015 struct page *next_page;
1016 struct link_free *link;
1017 void *vaddr;
1018
1019 set_first_obj_offset(page, off);
1020
1021 vaddr = kmap_atomic(page);
1022 link = (struct link_free *)vaddr + off / sizeof(*link);
1023
1024 while ((off += class->size) < PAGE_SIZE) {
1025 link->next = freeobj++ << OBJ_TAG_BITS;
1026 link += class->size / sizeof(*link);
1027 }
1028
1029 /*
1030 * We now come to the last (full or partial) object on this
1031 * page, which must point to the first object on the next
1032 * page (if present)
1033 */
1034 next_page = get_next_page(page);
1035 if (next_page) {
1036 link->next = freeobj++ << OBJ_TAG_BITS;
1037 } else {
1038 /*
1039 * Reset OBJ_TAG_BITS bit to last link to tell
1040 * whether it's allocated object or not.
1041 */
1042 link->next = -1UL << OBJ_TAG_BITS;
1043 }
1044 kunmap_atomic(vaddr);
1045 page = next_page;
1046 off %= PAGE_SIZE;
1047 }
1048
1049 set_freeobj(zspage, 0);
1050}
1051
1052static void create_page_chain(struct size_class *class, struct zspage *zspage,
1053 struct page *pages[])
1054{
1055 int i;
1056 struct page *page;
1057 struct page *prev_page = NULL;
1058 int nr_pages = class->pages_per_zspage;
1059
1060 /*
1061 * Allocate individual pages and link them together as:
1062 * 1. all pages are linked together using page->freelist
1063 * 2. each sub-page point to zspage using page->private
1064 *
1065 * we set PG_private to identify the first page (i.e. no other sub-page
1066 * has this flag set).
1067 */
1068 for (i = 0; i < nr_pages; i++) {
1069 page = pages[i];
1070 set_page_private(page, (unsigned long)zspage);
1071 page->freelist = NULL;
1072 if (i == 0) {
1073 zspage->first_page = page;
1074 SetPagePrivate(page);
1075 if (unlikely(class->objs_per_zspage == 1 &&
1076 class->pages_per_zspage == 1))
1077 SetPageHugeObject(page);
1078 } else {
1079 prev_page->freelist = page;
1080 }
1081 prev_page = page;
1082 }
1083}
1084
1085/*
1086 * Allocate a zspage for the given size class
1087 */
1088static struct zspage *alloc_zspage(struct zs_pool *pool,
1089 struct size_class *class,
1090 gfp_t gfp)
1091{
1092 int i;
1093 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
1094 struct zspage *zspage = cache_alloc_zspage(pool, gfp);
1095
1096 if (!zspage)
1097 return NULL;
1098
1099 memset(zspage, 0, sizeof(struct zspage));
1100 zspage->magic = ZSPAGE_MAGIC;
1101 migrate_lock_init(zspage);
1102
1103 for (i = 0; i < class->pages_per_zspage; i++) {
1104 struct page *page;
1105
1106 page = alloc_page(gfp);
1107 if (!page) {
1108 while (--i >= 0) {
1109 dec_zone_page_state(pages[i], NR_ZSPAGES);
1110 __free_page(pages[i]);
1111 }
1112 cache_free_zspage(pool, zspage);
1113 return NULL;
1114 }
1115
1116 inc_zone_page_state(page, NR_ZSPAGES);
1117 pages[i] = page;
1118 }
1119
1120 create_page_chain(class, zspage, pages);
1121 init_zspage(class, zspage);
1122
1123 return zspage;
1124}
1125
1126static struct zspage *find_get_zspage(struct size_class *class)
1127{
1128 int i;
1129 struct zspage *zspage;
1130
1131 for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) {
1132 zspage = list_first_entry_or_null(&class->fullness_list[i],
1133 struct zspage, list);
1134 if (zspage)
1135 break;
1136 }
1137
1138 return zspage;
1139}
1140
1141#ifdef CONFIG_PGTABLE_MAPPING
1142static inline int __zs_cpu_up(struct mapping_area *area)
1143{
1144 /*
1145 * Make sure we don't leak memory if a cpu UP notification
1146 * and zs_init() race and both call zs_cpu_up() on the same cpu
1147 */
1148 if (area->vm)
1149 return 0;
1150 area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
1151 if (!area->vm)
1152 return -ENOMEM;
1153 return 0;
1154}
1155
1156static inline void __zs_cpu_down(struct mapping_area *area)
1157{
1158 if (area->vm)
1159 free_vm_area(area->vm);
1160 area->vm = NULL;
1161}
1162
1163static inline void *__zs_map_object(struct mapping_area *area,
1164 struct page *pages[2], int off, int size)
1165{
1166 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
1167 area->vm_addr = area->vm->addr;
1168 return area->vm_addr + off;
1169}
1170
1171static inline void __zs_unmap_object(struct mapping_area *area,
1172 struct page *pages[2], int off, int size)
1173{
1174 unsigned long addr = (unsigned long)area->vm_addr;
1175
1176 unmap_kernel_range(addr, PAGE_SIZE * 2);
1177}
1178
1179#else /* CONFIG_PGTABLE_MAPPING */
1180
1181static inline int __zs_cpu_up(struct mapping_area *area)
1182{
1183 /*
1184 * Make sure we don't leak memory if a cpu UP notification
1185 * and zs_init() race and both call zs_cpu_up() on the same cpu
1186 */
1187 if (area->vm_buf)
1188 return 0;
1189 area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
1190 if (!area->vm_buf)
1191 return -ENOMEM;
1192 return 0;
1193}
1194
1195static inline void __zs_cpu_down(struct mapping_area *area)
1196{
1197 kfree(area->vm_buf);
1198 area->vm_buf = NULL;
1199}
1200
1201static void *__zs_map_object(struct mapping_area *area,
1202 struct page *pages[2], int off, int size)
1203{
1204 int sizes[2];
1205 void *addr;
1206 char *buf = area->vm_buf;
1207
1208 /* disable page faults to match kmap_atomic() return conditions */
1209 pagefault_disable();
1210
1211 /* no read fastpath */
1212 if (area->vm_mm == ZS_MM_WO)
1213 goto out;
1214
1215 sizes[0] = PAGE_SIZE - off;
1216 sizes[1] = size - sizes[0];
1217
1218 /* copy object to per-cpu buffer */
1219 addr = kmap_atomic(pages[0]);
1220 memcpy(buf, addr + off, sizes[0]);
1221 kunmap_atomic(addr);
1222 addr = kmap_atomic(pages[1]);
1223 memcpy(buf + sizes[0], addr, sizes[1]);
1224 kunmap_atomic(addr);
1225out:
1226 return area->vm_buf;
1227}
1228
1229static void __zs_unmap_object(struct mapping_area *area,
1230 struct page *pages[2], int off, int size)
1231{
1232 int sizes[2];
1233 void *addr;
1234 char *buf;
1235
1236 /* no write fastpath */
1237 if (area->vm_mm == ZS_MM_RO)
1238 goto out;
1239
1240 buf = area->vm_buf;
1241 buf = buf + ZS_HANDLE_SIZE;
1242 size -= ZS_HANDLE_SIZE;
1243 off += ZS_HANDLE_SIZE;
1244
1245 sizes[0] = PAGE_SIZE - off;
1246 sizes[1] = size - sizes[0];
1247
1248 /* copy per-cpu buffer to object */
1249 addr = kmap_atomic(pages[0]);
1250 memcpy(addr + off, buf, sizes[0]);
1251 kunmap_atomic(addr);
1252 addr = kmap_atomic(pages[1]);
1253 memcpy(addr, buf + sizes[0], sizes[1]);
1254 kunmap_atomic(addr);
1255
1256out:
1257 /* enable page faults to match kunmap_atomic() return conditions */
1258 pagefault_enable();
1259}
1260
1261#endif /* CONFIG_PGTABLE_MAPPING */
1262
1263static int zs_cpu_prepare(unsigned int cpu)
1264{
1265 struct mapping_area *area;
1266
1267 area = &per_cpu(zs_map_area, cpu);
1268 return __zs_cpu_up(area);
1269}
1270
1271static int zs_cpu_dead(unsigned int cpu)
1272{
1273 struct mapping_area *area;
1274
1275 area = &per_cpu(zs_map_area, cpu);
1276 __zs_cpu_down(area);
1277 return 0;
1278}
1279
1280static bool can_merge(struct size_class *prev, int pages_per_zspage,
1281 int objs_per_zspage)
1282{
1283 if (prev->pages_per_zspage == pages_per_zspage &&
1284 prev->objs_per_zspage == objs_per_zspage)
1285 return true;
1286
1287 return false;
1288}
1289
1290static bool zspage_full(struct size_class *class, struct zspage *zspage)
1291{
1292 return get_zspage_inuse(zspage) == class->objs_per_zspage;
1293}
1294
1295unsigned long zs_get_total_pages(struct zs_pool *pool)
1296{
1297 return atomic_long_read(&pool->pages_allocated);
1298}
1299EXPORT_SYMBOL_GPL(zs_get_total_pages);
1300
1301/**
1302 * zs_map_object - get address of allocated object from handle.
1303 * @pool: pool from which the object was allocated
1304 * @handle: handle returned from zs_malloc
1305 * @mm: maping mode to use
1306 *
1307 * Before using an object allocated from zs_malloc, it must be mapped using
1308 * this function. When done with the object, it must be unmapped using
1309 * zs_unmap_object.
1310 *
1311 * Only one object can be mapped per cpu at a time. There is no protection
1312 * against nested mappings.
1313 *
1314 * This function returns with preemption and page faults disabled.
1315 */
1316void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1317 enum zs_mapmode mm)
1318{
1319 struct zspage *zspage;
1320 struct page *page;
1321 unsigned long obj, off;
1322 unsigned int obj_idx;
1323
1324 unsigned int class_idx;
1325 enum fullness_group fg;
1326 struct size_class *class;
1327 struct mapping_area *area;
1328 struct page *pages[2];
1329 void *ret;
1330
1331 /*
1332 * Because we use per-cpu mapping areas shared among the
1333 * pools/users, we can't allow mapping in interrupt context
1334 * because it can corrupt another users mappings.
1335 */
1336 BUG_ON(in_interrupt());
1337
1338 /* From now on, migration cannot move the object */
1339 pin_tag(handle);
1340
1341 obj = handle_to_obj(handle);
1342 obj_to_location(obj, &page, &obj_idx);
1343 zspage = get_zspage(page);
1344
1345 /* migration cannot move any subpage in this zspage */
1346 migrate_read_lock(zspage);
1347
1348 get_zspage_mapping(zspage, &class_idx, &fg);
1349 class = pool->size_class[class_idx];
1350 off = (class->size * obj_idx) & ~PAGE_MASK;
1351
1352 area = &get_cpu_var(zs_map_area);
1353 area->vm_mm = mm;
1354 if (off + class->size <= PAGE_SIZE) {
1355 /* this object is contained entirely within a page */
1356 area->vm_addr = kmap_atomic(page);
1357 ret = area->vm_addr + off;
1358 goto out;
1359 }
1360
1361 /* this object spans two pages */
1362 pages[0] = page;
1363 pages[1] = get_next_page(page);
1364 BUG_ON(!pages[1]);
1365
1366 ret = __zs_map_object(area, pages, off, class->size);
1367out:
1368 if (likely(!PageHugeObject(page)))
1369 ret += ZS_HANDLE_SIZE;
1370
1371 return ret;
1372}
1373EXPORT_SYMBOL_GPL(zs_map_object);
1374
1375void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1376{
1377 struct zspage *zspage;
1378 struct page *page;
1379 unsigned long obj, off;
1380 unsigned int obj_idx;
1381
1382 unsigned int class_idx;
1383 enum fullness_group fg;
1384 struct size_class *class;
1385 struct mapping_area *area;
1386
1387 obj = handle_to_obj(handle);
1388 obj_to_location(obj, &page, &obj_idx);
1389 zspage = get_zspage(page);
1390 get_zspage_mapping(zspage, &class_idx, &fg);
1391 class = pool->size_class[class_idx];
1392 off = (class->size * obj_idx) & ~PAGE_MASK;
1393
1394 area = this_cpu_ptr(&zs_map_area);
1395 if (off + class->size <= PAGE_SIZE)
1396 kunmap_atomic(area->vm_addr);
1397 else {
1398 struct page *pages[2];
1399
1400 pages[0] = page;
1401 pages[1] = get_next_page(page);
1402 BUG_ON(!pages[1]);
1403
1404 __zs_unmap_object(area, pages, off, class->size);
1405 }
1406 put_cpu_var(zs_map_area);
1407
1408 migrate_read_unlock(zspage);
1409 unpin_tag(handle);
1410}
1411EXPORT_SYMBOL_GPL(zs_unmap_object);
1412
1413/**
1414 * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1415 * zsmalloc &size_class.
1416 * @pool: zsmalloc pool to use
1417 *
1418 * The function returns the size of the first huge class - any object of equal
1419 * or bigger size will be stored in zspage consisting of a single physical
1420 * page.
1421 *
1422 * Context: Any context.
1423 *
1424 * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1425 */
1426size_t zs_huge_class_size(struct zs_pool *pool)
1427{
1428 return huge_class_size;
1429}
1430EXPORT_SYMBOL_GPL(zs_huge_class_size);
1431
1432static unsigned long obj_malloc(struct size_class *class,
1433 struct zspage *zspage, unsigned long handle)
1434{
1435 int i, nr_page, offset;
1436 unsigned long obj;
1437 struct link_free *link;
1438
1439 struct page *m_page;
1440 unsigned long m_offset;
1441 void *vaddr;
1442
1443 handle |= OBJ_ALLOCATED_TAG;
1444 obj = get_freeobj(zspage);
1445
1446 offset = obj * class->size;
1447 nr_page = offset >> PAGE_SHIFT;
1448 m_offset = offset & ~PAGE_MASK;
1449 m_page = get_first_page(zspage);
1450
1451 for (i = 0; i < nr_page; i++)
1452 m_page = get_next_page(m_page);
1453
1454 vaddr = kmap_atomic(m_page);
1455 link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1456 set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1457 if (likely(!PageHugeObject(m_page)))
1458 /* record handle in the header of allocated chunk */
1459 link->handle = handle;
1460 else
1461 /* record handle to page->index */
1462 zspage->first_page->index = handle;
1463
1464 kunmap_atomic(vaddr);
1465 mod_zspage_inuse(zspage, 1);
1466 zs_stat_inc(class, OBJ_USED, 1);
1467
1468 obj = location_to_obj(m_page, obj);
1469
1470 return obj;
1471}
1472
1473
1474/**
1475 * zs_malloc - Allocate block of given size from pool.
1476 * @pool: pool to allocate from
1477 * @size: size of block to allocate
1478 * @gfp: gfp flags when allocating object
1479 *
1480 * On success, handle to the allocated object is returned,
1481 * otherwise 0.
1482 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1483 */
1484unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1485{
1486 unsigned long handle, obj;
1487 struct size_class *class;
1488 enum fullness_group newfg;
1489 struct zspage *zspage;
1490
1491 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
1492 return 0;
1493
1494 handle = cache_alloc_handle(pool, gfp);
1495 if (!handle)
1496 return 0;
1497
1498 /* extra space in chunk to keep the handle */
1499 size += ZS_HANDLE_SIZE;
1500 class = pool->size_class[get_size_class_index(size)];
1501
1502 spin_lock(&class->lock);
1503 zspage = find_get_zspage(class);
1504 if (likely(zspage)) {
1505 obj = obj_malloc(class, zspage, handle);
1506 /* Now move the zspage to another fullness group, if required */
1507 fix_fullness_group(class, zspage);
1508 record_obj(handle, obj);
1509 spin_unlock(&class->lock);
1510
1511 return handle;
1512 }
1513
1514 spin_unlock(&class->lock);
1515
1516 zspage = alloc_zspage(pool, class, gfp);
1517 if (!zspage) {
1518 cache_free_handle(pool, handle);
1519 return 0;
1520 }
1521
1522 spin_lock(&class->lock);
1523 obj = obj_malloc(class, zspage, handle);
1524 newfg = get_fullness_group(class, zspage);
1525 insert_zspage(class, zspage, newfg);
1526 set_zspage_mapping(zspage, class->index, newfg);
1527 record_obj(handle, obj);
1528 atomic_long_add(class->pages_per_zspage,
1529 &pool->pages_allocated);
1530 zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
1531
1532 /* We completely set up zspage so mark them as movable */
1533 SetZsPageMovable(pool, zspage);
1534 spin_unlock(&class->lock);
1535
1536 return handle;
1537}
1538EXPORT_SYMBOL_GPL(zs_malloc);
1539
1540static void obj_free(struct size_class *class, unsigned long obj)
1541{
1542 struct link_free *link;
1543 struct zspage *zspage;
1544 struct page *f_page;
1545 unsigned long f_offset;
1546 unsigned int f_objidx;
1547 void *vaddr;
1548
1549 obj &= ~OBJ_ALLOCATED_TAG;
1550 obj_to_location(obj, &f_page, &f_objidx);
1551 f_offset = (class->size * f_objidx) & ~PAGE_MASK;
1552 zspage = get_zspage(f_page);
1553
1554 vaddr = kmap_atomic(f_page);
1555
1556 /* Insert this object in containing zspage's freelist */
1557 link = (struct link_free *)(vaddr + f_offset);
1558 link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1559 kunmap_atomic(vaddr);
1560 set_freeobj(zspage, f_objidx);
1561 mod_zspage_inuse(zspage, -1);
1562 zs_stat_dec(class, OBJ_USED, 1);
1563}
1564
1565void zs_free(struct zs_pool *pool, unsigned long handle)
1566{
1567 struct zspage *zspage;
1568 struct page *f_page;
1569 unsigned long obj;
1570 unsigned int f_objidx;
1571 int class_idx;
1572 struct size_class *class;
1573 enum fullness_group fullness;
1574 bool isolated;
1575
1576 if (unlikely(!handle))
1577 return;
1578
1579 pin_tag(handle);
1580 obj = handle_to_obj(handle);
1581 obj_to_location(obj, &f_page, &f_objidx);
1582 zspage = get_zspage(f_page);
1583
1584 migrate_read_lock(zspage);
1585
1586 get_zspage_mapping(zspage, &class_idx, &fullness);
1587 class = pool->size_class[class_idx];
1588
1589 spin_lock(&class->lock);
1590 obj_free(class, obj);
1591 fullness = fix_fullness_group(class, zspage);
1592 if (fullness != ZS_EMPTY) {
1593 migrate_read_unlock(zspage);
1594 goto out;
1595 }
1596
1597 isolated = is_zspage_isolated(zspage);
1598 migrate_read_unlock(zspage);
1599 /* If zspage is isolated, zs_page_putback will free the zspage */
1600 if (likely(!isolated))
1601 free_zspage(pool, class, zspage);
1602out:
1603
1604 spin_unlock(&class->lock);
1605 unpin_tag(handle);
1606 cache_free_handle(pool, handle);
1607}
1608EXPORT_SYMBOL_GPL(zs_free);
1609
1610static void zs_object_copy(struct size_class *class, unsigned long dst,
1611 unsigned long src)
1612{
1613 struct page *s_page, *d_page;
1614 unsigned int s_objidx, d_objidx;
1615 unsigned long s_off, d_off;
1616 void *s_addr, *d_addr;
1617 int s_size, d_size, size;
1618 int written = 0;
1619
1620 s_size = d_size = class->size;
1621
1622 obj_to_location(src, &s_page, &s_objidx);
1623 obj_to_location(dst, &d_page, &d_objidx);
1624
1625 s_off = (class->size * s_objidx) & ~PAGE_MASK;
1626 d_off = (class->size * d_objidx) & ~PAGE_MASK;
1627
1628 if (s_off + class->size > PAGE_SIZE)
1629 s_size = PAGE_SIZE - s_off;
1630
1631 if (d_off + class->size > PAGE_SIZE)
1632 d_size = PAGE_SIZE - d_off;
1633
1634 s_addr = kmap_atomic(s_page);
1635 d_addr = kmap_atomic(d_page);
1636
1637 while (1) {
1638 size = min(s_size, d_size);
1639 memcpy(d_addr + d_off, s_addr + s_off, size);
1640 written += size;
1641
1642 if (written == class->size)
1643 break;
1644
1645 s_off += size;
1646 s_size -= size;
1647 d_off += size;
1648 d_size -= size;
1649
1650 if (s_off >= PAGE_SIZE) {
1651 kunmap_atomic(d_addr);
1652 kunmap_atomic(s_addr);
1653 s_page = get_next_page(s_page);
1654 s_addr = kmap_atomic(s_page);
1655 d_addr = kmap_atomic(d_page);
1656 s_size = class->size - written;
1657 s_off = 0;
1658 }
1659
1660 if (d_off >= PAGE_SIZE) {
1661 kunmap_atomic(d_addr);
1662 d_page = get_next_page(d_page);
1663 d_addr = kmap_atomic(d_page);
1664 d_size = class->size - written;
1665 d_off = 0;
1666 }
1667 }
1668
1669 kunmap_atomic(d_addr);
1670 kunmap_atomic(s_addr);
1671}
1672
1673/*
1674 * Find alloced object in zspage from index object and
1675 * return handle.
1676 */
1677static unsigned long find_alloced_obj(struct size_class *class,
1678 struct page *page, int *obj_idx)
1679{
1680 unsigned long head;
1681 int offset = 0;
1682 int index = *obj_idx;
1683 unsigned long handle = 0;
1684 void *addr = kmap_atomic(page);
1685
1686 offset = get_first_obj_offset(page);
1687 offset += class->size * index;
1688
1689 while (offset < PAGE_SIZE) {
1690 head = obj_to_head(page, addr + offset);
1691 if (head & OBJ_ALLOCATED_TAG) {
1692 handle = head & ~OBJ_ALLOCATED_TAG;
1693 if (trypin_tag(handle))
1694 break;
1695 handle = 0;
1696 }
1697
1698 offset += class->size;
1699 index++;
1700 }
1701
1702 kunmap_atomic(addr);
1703
1704 *obj_idx = index;
1705
1706 return handle;
1707}
1708
1709struct zs_compact_control {
1710 /* Source spage for migration which could be a subpage of zspage */
1711 struct page *s_page;
1712 /* Destination page for migration which should be a first page
1713 * of zspage. */
1714 struct page *d_page;
1715 /* Starting object index within @s_page which used for live object
1716 * in the subpage. */
1717 int obj_idx;
1718};
1719
1720static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
1721 struct zs_compact_control *cc)
1722{
1723 unsigned long used_obj, free_obj;
1724 unsigned long handle;
1725 struct page *s_page = cc->s_page;
1726 struct page *d_page = cc->d_page;
1727 int obj_idx = cc->obj_idx;
1728 int ret = 0;
1729
1730 while (1) {
1731 handle = find_alloced_obj(class, s_page, &obj_idx);
1732 if (!handle) {
1733 s_page = get_next_page(s_page);
1734 if (!s_page)
1735 break;
1736 obj_idx = 0;
1737 continue;
1738 }
1739
1740 /* Stop if there is no more space */
1741 if (zspage_full(class, get_zspage(d_page))) {
1742 unpin_tag(handle);
1743 ret = -ENOMEM;
1744 break;
1745 }
1746
1747 used_obj = handle_to_obj(handle);
1748 free_obj = obj_malloc(class, get_zspage(d_page), handle);
1749 zs_object_copy(class, free_obj, used_obj);
1750 obj_idx++;
1751 /*
1752 * record_obj updates handle's value to free_obj and it will
1753 * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
1754 * breaks synchronization using pin_tag(e,g, zs_free) so
1755 * let's keep the lock bit.
1756 */
1757 free_obj |= BIT(HANDLE_PIN_BIT);
1758 record_obj(handle, free_obj);
1759 unpin_tag(handle);
1760 obj_free(class, used_obj);
1761 }
1762
1763 /* Remember last position in this iteration */
1764 cc->s_page = s_page;
1765 cc->obj_idx = obj_idx;
1766
1767 return ret;
1768}
1769
1770static struct zspage *isolate_zspage(struct size_class *class, bool source)
1771{
1772 int i;
1773 struct zspage *zspage;
1774 enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL};
1775
1776 if (!source) {
1777 fg[0] = ZS_ALMOST_FULL;
1778 fg[1] = ZS_ALMOST_EMPTY;
1779 }
1780
1781 for (i = 0; i < 2; i++) {
1782 zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
1783 struct zspage, list);
1784 if (zspage) {
1785 VM_BUG_ON(is_zspage_isolated(zspage));
1786 remove_zspage(class, zspage, fg[i]);
1787 return zspage;
1788 }
1789 }
1790
1791 return zspage;
1792}
1793
1794/*
1795 * putback_zspage - add @zspage into right class's fullness list
1796 * @class: destination class
1797 * @zspage: target page
1798 *
1799 * Return @zspage's fullness_group
1800 */
1801static enum fullness_group putback_zspage(struct size_class *class,
1802 struct zspage *zspage)
1803{
1804 enum fullness_group fullness;
1805
1806 VM_BUG_ON(is_zspage_isolated(zspage));
1807
1808 fullness = get_fullness_group(class, zspage);
1809 insert_zspage(class, zspage, fullness);
1810 set_zspage_mapping(zspage, class->index, fullness);
1811
1812 return fullness;
1813}
1814
1815#ifdef CONFIG_COMPACTION
1816static struct dentry *zs_mount(struct file_system_type *fs_type,
1817 int flags, const char *dev_name, void *data)
1818{
1819 static const struct dentry_operations ops = {
1820 .d_dname = simple_dname,
1821 };
1822
1823 return mount_pseudo(fs_type, "zsmalloc:", NULL, &ops, ZSMALLOC_MAGIC);
1824}
1825
1826static struct file_system_type zsmalloc_fs = {
1827 .name = "zsmalloc",
1828 .mount = zs_mount,
1829 .kill_sb = kill_anon_super,
1830};
1831
1832static int zsmalloc_mount(void)
1833{
1834 int ret = 0;
1835
1836 zsmalloc_mnt = kern_mount(&zsmalloc_fs);
1837 if (IS_ERR(zsmalloc_mnt))
1838 ret = PTR_ERR(zsmalloc_mnt);
1839
1840 return ret;
1841}
1842
1843static void zsmalloc_unmount(void)
1844{
1845 kern_unmount(zsmalloc_mnt);
1846}
1847
1848static void migrate_lock_init(struct zspage *zspage)
1849{
1850 rwlock_init(&zspage->lock);
1851}
1852
1853static void migrate_read_lock(struct zspage *zspage)
1854{
1855 read_lock(&zspage->lock);
1856}
1857
1858static void migrate_read_unlock(struct zspage *zspage)
1859{
1860 read_unlock(&zspage->lock);
1861}
1862
1863static void migrate_write_lock(struct zspage *zspage)
1864{
1865 write_lock(&zspage->lock);
1866}
1867
1868static void migrate_write_unlock(struct zspage *zspage)
1869{
1870 write_unlock(&zspage->lock);
1871}
1872
1873/* Number of isolated subpage for *page migration* in this zspage */
1874static void inc_zspage_isolation(struct zspage *zspage)
1875{
1876 zspage->isolated++;
1877}
1878
1879static void dec_zspage_isolation(struct zspage *zspage)
1880{
1881 zspage->isolated--;
1882}
1883
1884static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1885 struct page *newpage, struct page *oldpage)
1886{
1887 struct page *page;
1888 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
1889 int idx = 0;
1890
1891 page = get_first_page(zspage);
1892 do {
1893 if (page == oldpage)
1894 pages[idx] = newpage;
1895 else
1896 pages[idx] = page;
1897 idx++;
1898 } while ((page = get_next_page(page)) != NULL);
1899
1900 create_page_chain(class, zspage, pages);
1901 set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
1902 if (unlikely(PageHugeObject(oldpage)))
1903 newpage->index = oldpage->index;
1904 __SetPageMovable(newpage, page_mapping(oldpage));
1905}
1906
1907bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1908{
1909 struct zs_pool *pool;
1910 struct size_class *class;
1911 int class_idx;
1912 enum fullness_group fullness;
1913 struct zspage *zspage;
1914 struct address_space *mapping;
1915
1916 /*
1917 * Page is locked so zspage couldn't be destroyed. For detail, look at
1918 * lock_zspage in free_zspage.
1919 */
1920 VM_BUG_ON_PAGE(!PageMovable(page), page);
1921 VM_BUG_ON_PAGE(PageIsolated(page), page);
1922
1923 zspage = get_zspage(page);
1924
1925 /*
1926 * Without class lock, fullness could be stale while class_idx is okay
1927 * because class_idx is constant unless page is freed so we should get
1928 * fullness again under class lock.
1929 */
1930 get_zspage_mapping(zspage, &class_idx, &fullness);
1931 mapping = page_mapping(page);
1932 pool = mapping->private_data;
1933 class = pool->size_class[class_idx];
1934
1935 spin_lock(&class->lock);
1936 if (get_zspage_inuse(zspage) == 0) {
1937 spin_unlock(&class->lock);
1938 return false;
1939 }
1940
1941 /* zspage is isolated for object migration */
1942 if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1943 spin_unlock(&class->lock);
1944 return false;
1945 }
1946
1947 /*
1948 * If this is first time isolation for the zspage, isolate zspage from
1949 * size_class to prevent further object allocation from the zspage.
1950 */
1951 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1952 get_zspage_mapping(zspage, &class_idx, &fullness);
1953 remove_zspage(class, zspage, fullness);
1954 }
1955
1956 inc_zspage_isolation(zspage);
1957 spin_unlock(&class->lock);
1958
1959 return true;
1960}
1961
1962int zs_page_migrate(struct address_space *mapping, struct page *newpage,
1963 struct page *page, enum migrate_mode mode)
1964{
1965 struct zs_pool *pool;
1966 struct size_class *class;
1967 int class_idx;
1968 enum fullness_group fullness;
1969 struct zspage *zspage;
1970 struct page *dummy;
1971 void *s_addr, *d_addr, *addr;
1972 int offset, pos;
1973 unsigned long handle, head;
1974 unsigned long old_obj, new_obj;
1975 unsigned int obj_idx;
1976 int ret = -EAGAIN;
1977
1978 /*
1979 * We cannot support the _NO_COPY case here, because copy needs to
1980 * happen under the zs lock, which does not work with
1981 * MIGRATE_SYNC_NO_COPY workflow.
1982 */
1983 if (mode == MIGRATE_SYNC_NO_COPY)
1984 return -EINVAL;
1985
1986 VM_BUG_ON_PAGE(!PageMovable(page), page);
1987 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1988
1989 zspage = get_zspage(page);
1990
1991 /* Concurrent compactor cannot migrate any subpage in zspage */
1992 migrate_write_lock(zspage);
1993 get_zspage_mapping(zspage, &class_idx, &fullness);
1994 pool = mapping->private_data;
1995 class = pool->size_class[class_idx];
1996 offset = get_first_obj_offset(page);
1997
1998 spin_lock(&class->lock);
1999 if (!get_zspage_inuse(zspage)) {
2000 /*
2001 * Set "offset" to end of the page so that every loops
2002 * skips unnecessary object scanning.
2003 */
2004 offset = PAGE_SIZE;
2005 }
2006
2007 pos = offset;
2008 s_addr = kmap_atomic(page);
2009 while (pos < PAGE_SIZE) {
2010 head = obj_to_head(page, s_addr + pos);
2011 if (head & OBJ_ALLOCATED_TAG) {
2012 handle = head & ~OBJ_ALLOCATED_TAG;
2013 if (!trypin_tag(handle))
2014 goto unpin_objects;
2015 }
2016 pos += class->size;
2017 }
2018
2019 /*
2020 * Here, any user cannot access all objects in the zspage so let's move.
2021 */
2022 d_addr = kmap_atomic(newpage);
2023 memcpy(d_addr, s_addr, PAGE_SIZE);
2024 kunmap_atomic(d_addr);
2025
2026 for (addr = s_addr + offset; addr < s_addr + pos;
2027 addr += class->size) {
2028 head = obj_to_head(page, addr);
2029 if (head & OBJ_ALLOCATED_TAG) {
2030 handle = head & ~OBJ_ALLOCATED_TAG;
2031 if (!testpin_tag(handle))
2032 BUG();
2033
2034 old_obj = handle_to_obj(handle);
2035 obj_to_location(old_obj, &dummy, &obj_idx);
2036 new_obj = (unsigned long)location_to_obj(newpage,
2037 obj_idx);
2038 new_obj |= BIT(HANDLE_PIN_BIT);
2039 record_obj(handle, new_obj);
2040 }
2041 }
2042
2043 replace_sub_page(class, zspage, newpage, page);
2044 get_page(newpage);
2045
2046 dec_zspage_isolation(zspage);
2047
2048 /*
2049 * Page migration is done so let's putback isolated zspage to
2050 * the list if @page is final isolated subpage in the zspage.
2051 */
2052 if (!is_zspage_isolated(zspage))
2053 putback_zspage(class, zspage);
2054
2055 reset_page(page);
2056 put_page(page);
2057 page = newpage;
2058
2059 ret = MIGRATEPAGE_SUCCESS;
2060unpin_objects:
2061 for (addr = s_addr + offset; addr < s_addr + pos;
2062 addr += class->size) {
2063 head = obj_to_head(page, addr);
2064 if (head & OBJ_ALLOCATED_TAG) {
2065 handle = head & ~OBJ_ALLOCATED_TAG;
2066 if (!testpin_tag(handle))
2067 BUG();
2068 unpin_tag(handle);
2069 }
2070 }
2071 kunmap_atomic(s_addr);
2072 spin_unlock(&class->lock);
2073 migrate_write_unlock(zspage);
2074
2075 return ret;
2076}
2077
2078void zs_page_putback(struct page *page)
2079{
2080 struct zs_pool *pool;
2081 struct size_class *class;
2082 int class_idx;
2083 enum fullness_group fg;
2084 struct address_space *mapping;
2085 struct zspage *zspage;
2086
2087 VM_BUG_ON_PAGE(!PageMovable(page), page);
2088 VM_BUG_ON_PAGE(!PageIsolated(page), page);
2089
2090 zspage = get_zspage(page);
2091 get_zspage_mapping(zspage, &class_idx, &fg);
2092 mapping = page_mapping(page);
2093 pool = mapping->private_data;
2094 class = pool->size_class[class_idx];
2095
2096 spin_lock(&class->lock);
2097 dec_zspage_isolation(zspage);
2098 if (!is_zspage_isolated(zspage)) {
2099 fg = putback_zspage(class, zspage);
2100 /*
2101 * Due to page_lock, we cannot free zspage immediately
2102 * so let's defer.
2103 */
2104 if (fg == ZS_EMPTY)
2105 schedule_work(&pool->free_work);
2106 }
2107 spin_unlock(&class->lock);
2108}
2109
2110const struct address_space_operations zsmalloc_aops = {
2111 .isolate_page = zs_page_isolate,
2112 .migratepage = zs_page_migrate,
2113 .putback_page = zs_page_putback,
2114};
2115
2116static int zs_register_migration(struct zs_pool *pool)
2117{
2118 pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
2119 if (IS_ERR(pool->inode)) {
2120 pool->inode = NULL;
2121 return 1;
2122 }
2123
2124 pool->inode->i_mapping->private_data = pool;
2125 pool->inode->i_mapping->a_ops = &zsmalloc_aops;
2126 return 0;
2127}
2128
2129static void zs_unregister_migration(struct zs_pool *pool)
2130{
2131 flush_work(&pool->free_work);
2132 iput(pool->inode);
2133}
2134
2135/*
2136 * Caller should hold page_lock of all pages in the zspage
2137 * In here, we cannot use zspage meta data.
2138 */
2139static void async_free_zspage(struct work_struct *work)
2140{
2141 int i;
2142 struct size_class *class;
2143 unsigned int class_idx;
2144 enum fullness_group fullness;
2145 struct zspage *zspage, *tmp;
2146 LIST_HEAD(free_pages);
2147 struct zs_pool *pool = container_of(work, struct zs_pool,
2148 free_work);
2149
2150 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2151 class = pool->size_class[i];
2152 if (class->index != i)
2153 continue;
2154
2155 spin_lock(&class->lock);
2156 list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
2157 spin_unlock(&class->lock);
2158 }
2159
2160
2161 list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
2162 list_del(&zspage->list);
2163 lock_zspage(zspage);
2164
2165 get_zspage_mapping(zspage, &class_idx, &fullness);
2166 VM_BUG_ON(fullness != ZS_EMPTY);
2167 class = pool->size_class[class_idx];
2168 spin_lock(&class->lock);
2169 __free_zspage(pool, pool->size_class[class_idx], zspage);
2170 spin_unlock(&class->lock);
2171 }
2172};
2173
2174static void kick_deferred_free(struct zs_pool *pool)
2175{
2176 schedule_work(&pool->free_work);
2177}
2178
2179static void init_deferred_free(struct zs_pool *pool)
2180{
2181 INIT_WORK(&pool->free_work, async_free_zspage);
2182}
2183
2184static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
2185{
2186 struct page *page = get_first_page(zspage);
2187
2188 do {
2189 WARN_ON(!trylock_page(page));
2190 __SetPageMovable(page, pool->inode->i_mapping);
2191 unlock_page(page);
2192 } while ((page = get_next_page(page)) != NULL);
2193}
2194#endif
2195
2196/*
2197 *
2198 * Based on the number of unused allocated objects calculate
2199 * and return the number of pages that we can free.
2200 */
2201static unsigned long zs_can_compact(struct size_class *class)
2202{
2203 unsigned long obj_wasted;
2204 unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
2205 unsigned long obj_used = zs_stat_get(class, OBJ_USED);
2206
2207 if (obj_allocated <= obj_used)
2208 return 0;
2209
2210 obj_wasted = obj_allocated - obj_used;
2211 obj_wasted /= class->objs_per_zspage;
2212
2213 return obj_wasted * class->pages_per_zspage;
2214}
2215
2216static void __zs_compact(struct zs_pool *pool, struct size_class *class)
2217{
2218 struct zs_compact_control cc;
2219 struct zspage *src_zspage;
2220 struct zspage *dst_zspage = NULL;
2221
2222 spin_lock(&class->lock);
2223 while ((src_zspage = isolate_zspage(class, true))) {
2224
2225 if (!zs_can_compact(class))
2226 break;
2227
2228 cc.obj_idx = 0;
2229 cc.s_page = get_first_page(src_zspage);
2230
2231 while ((dst_zspage = isolate_zspage(class, false))) {
2232 cc.d_page = get_first_page(dst_zspage);
2233 /*
2234 * If there is no more space in dst_page, resched
2235 * and see if anyone had allocated another zspage.
2236 */
2237 if (!migrate_zspage(pool, class, &cc))
2238 break;
2239
2240 putback_zspage(class, dst_zspage);
2241 }
2242
2243 /* Stop if we couldn't find slot */
2244 if (dst_zspage == NULL)
2245 break;
2246
2247 putback_zspage(class, dst_zspage);
2248 if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
2249 free_zspage(pool, class, src_zspage);
2250 pool->stats.pages_compacted += class->pages_per_zspage;
2251 }
2252 spin_unlock(&class->lock);
2253 cond_resched();
2254 spin_lock(&class->lock);
2255 }
2256
2257 if (src_zspage)
2258 putback_zspage(class, src_zspage);
2259
2260 spin_unlock(&class->lock);
2261}
2262
2263unsigned long zs_compact(struct zs_pool *pool)
2264{
2265 int i;
2266 struct size_class *class;
2267
2268 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2269 class = pool->size_class[i];
2270 if (!class)
2271 continue;
2272 if (class->index != i)
2273 continue;
2274 __zs_compact(pool, class);
2275 }
2276
2277 return pool->stats.pages_compacted;
2278}
2279EXPORT_SYMBOL_GPL(zs_compact);
2280
2281void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
2282{
2283 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
2284}
2285EXPORT_SYMBOL_GPL(zs_pool_stats);
2286
2287static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
2288 struct shrink_control *sc)
2289{
2290 unsigned long pages_freed;
2291 struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2292 shrinker);
2293
2294 pages_freed = pool->stats.pages_compacted;
2295 /*
2296 * Compact classes and calculate compaction delta.
2297 * Can run concurrently with a manually triggered
2298 * (by user) compaction.
2299 */
2300 pages_freed = zs_compact(pool) - pages_freed;
2301
2302 return pages_freed ? pages_freed : SHRINK_STOP;
2303}
2304
2305static unsigned long zs_shrinker_count(struct shrinker *shrinker,
2306 struct shrink_control *sc)
2307{
2308 int i;
2309 struct size_class *class;
2310 unsigned long pages_to_free = 0;
2311 struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2312 shrinker);
2313
2314 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2315 class = pool->size_class[i];
2316 if (!class)
2317 continue;
2318 if (class->index != i)
2319 continue;
2320
2321 pages_to_free += zs_can_compact(class);
2322 }
2323
2324 return pages_to_free;
2325}
2326
2327static void zs_unregister_shrinker(struct zs_pool *pool)
2328{
2329 unregister_shrinker(&pool->shrinker);
2330}
2331
2332static int zs_register_shrinker(struct zs_pool *pool)
2333{
2334 pool->shrinker.scan_objects = zs_shrinker_scan;
2335 pool->shrinker.count_objects = zs_shrinker_count;
2336 pool->shrinker.batch = 0;
2337 pool->shrinker.seeks = DEFAULT_SEEKS;
2338
2339 return register_shrinker(&pool->shrinker);
2340}
2341
2342/**
2343 * zs_create_pool - Creates an allocation pool to work from.
2344 * @name: pool name to be created
2345 *
2346 * This function must be called before anything when using
2347 * the zsmalloc allocator.
2348 *
2349 * On success, a pointer to the newly created pool is returned,
2350 * otherwise NULL.
2351 */
2352struct zs_pool *zs_create_pool(const char *name)
2353{
2354 int i;
2355 struct zs_pool *pool;
2356 struct size_class *prev_class = NULL;
2357
2358 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2359 if (!pool)
2360 return NULL;
2361
2362 init_deferred_free(pool);
2363
2364 pool->name = kstrdup(name, GFP_KERNEL);
2365 if (!pool->name)
2366 goto err;
2367
2368 if (create_cache(pool))
2369 goto err;
2370
2371 /*
2372 * Iterate reversely, because, size of size_class that we want to use
2373 * for merging should be larger or equal to current size.
2374 */
2375 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2376 int size;
2377 int pages_per_zspage;
2378 int objs_per_zspage;
2379 struct size_class *class;
2380 int fullness = 0;
2381
2382 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
2383 if (size > ZS_MAX_ALLOC_SIZE)
2384 size = ZS_MAX_ALLOC_SIZE;
2385 pages_per_zspage = get_pages_per_zspage(size);
2386 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
2387
2388 /*
2389 * We iterate from biggest down to smallest classes,
2390 * so huge_class_size holds the size of the first huge
2391 * class. Any object bigger than or equal to that will
2392 * endup in the huge class.
2393 */
2394 if (pages_per_zspage != 1 && objs_per_zspage != 1 &&
2395 !huge_class_size) {
2396 huge_class_size = size;
2397 /*
2398 * The object uses ZS_HANDLE_SIZE bytes to store the
2399 * handle. We need to subtract it, because zs_malloc()
2400 * unconditionally adds handle size before it performs
2401 * size class search - so object may be smaller than
2402 * huge class size, yet it still can end up in the huge
2403 * class because it grows by ZS_HANDLE_SIZE extra bytes
2404 * right before class lookup.
2405 */
2406 huge_class_size -= (ZS_HANDLE_SIZE - 1);
2407 }
2408
2409 /*
2410 * size_class is used for normal zsmalloc operation such
2411 * as alloc/free for that size. Although it is natural that we
2412 * have one size_class for each size, there is a chance that we
2413 * can get more memory utilization if we use one size_class for
2414 * many different sizes whose size_class have same
2415 * characteristics. So, we makes size_class point to
2416 * previous size_class if possible.
2417 */
2418 if (prev_class) {
2419 if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
2420 pool->size_class[i] = prev_class;
2421 continue;
2422 }
2423 }
2424
2425 class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
2426 if (!class)
2427 goto err;
2428
2429 class->size = size;
2430 class->index = i;
2431 class->pages_per_zspage = pages_per_zspage;
2432 class->objs_per_zspage = objs_per_zspage;
2433 spin_lock_init(&class->lock);
2434 pool->size_class[i] = class;
2435 for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
2436 fullness++)
2437 INIT_LIST_HEAD(&class->fullness_list[fullness]);
2438
2439 prev_class = class;
2440 }
2441
2442 /* debug only, don't abort if it fails */
2443 zs_pool_stat_create(pool, name);
2444
2445 if (zs_register_migration(pool))
2446 goto err;
2447
2448 /*
2449 * Not critical since shrinker is only used to trigger internal
2450 * defragmentation of the pool which is pretty optional thing. If
2451 * registration fails we still can use the pool normally and user can
2452 * trigger compaction manually. Thus, ignore return code.
2453 */
2454 zs_register_shrinker(pool);
2455
2456 return pool;
2457
2458err:
2459 zs_destroy_pool(pool);
2460 return NULL;
2461}
2462EXPORT_SYMBOL_GPL(zs_create_pool);
2463
2464void zs_destroy_pool(struct zs_pool *pool)
2465{
2466 int i;
2467
2468 zs_unregister_shrinker(pool);
2469 zs_unregister_migration(pool);
2470 zs_pool_stat_destroy(pool);
2471
2472 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2473 int fg;
2474 struct size_class *class = pool->size_class[i];
2475
2476 if (!class)
2477 continue;
2478
2479 if (class->index != i)
2480 continue;
2481
2482 for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) {
2483 if (!list_empty(&class->fullness_list[fg])) {
2484 pr_info("Freeing non-empty class with size %db, fullness group %d\n",
2485 class->size, fg);
2486 }
2487 }
2488 kfree(class);
2489 }
2490
2491 destroy_cache(pool);
2492 kfree(pool->name);
2493 kfree(pool);
2494}
2495EXPORT_SYMBOL_GPL(zs_destroy_pool);
2496
2497static int __init zs_init(void)
2498{
2499 int ret;
2500
2501 ret = zsmalloc_mount();
2502 if (ret)
2503 goto out;
2504
2505 ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
2506 zs_cpu_prepare, zs_cpu_dead);
2507 if (ret)
2508 goto hp_setup_fail;
2509
2510#ifdef CONFIG_ZPOOL
2511 zpool_register_driver(&zs_zpool_driver);
2512#endif
2513
2514 zs_stat_init();
2515
2516 return 0;
2517
2518hp_setup_fail:
2519 zsmalloc_unmount();
2520out:
2521 return ret;
2522}
2523
2524static void __exit zs_exit(void)
2525{
2526#ifdef CONFIG_ZPOOL
2527 zpool_unregister_driver(&zs_zpool_driver);
2528#endif
2529 zsmalloc_unmount();
2530 cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
2531
2532 zs_stat_exit();
2533}
2534
2535module_init(zs_init);
2536module_exit(zs_exit);
2537
2538MODULE_LICENSE("Dual BSD/GPL");
2539MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 */
13
14/*
15 * Following is how we use various fields and flags of underlying
16 * struct page(s) to form a zspage.
17 *
18 * Usage of struct page fields:
19 * page->private: points to zspage
20 * page->index: links together all component pages of a zspage
21 * For the huge page, this is always 0, so we use this field
22 * to store handle.
23 * page->page_type: first object offset in a subpage of zspage
24 *
25 * Usage of struct page flags:
26 * PG_private: identifies the first component page
27 * PG_owner_priv_1: identifies the huge component page
28 *
29 */
30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33/*
34 * lock ordering:
35 * page_lock
36 * pool->lock
37 * zspage->lock
38 */
39
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/sched.h>
43#include <linux/bitops.h>
44#include <linux/errno.h>
45#include <linux/highmem.h>
46#include <linux/string.h>
47#include <linux/slab.h>
48#include <linux/pgtable.h>
49#include <asm/tlbflush.h>
50#include <linux/cpumask.h>
51#include <linux/cpu.h>
52#include <linux/vmalloc.h>
53#include <linux/preempt.h>
54#include <linux/spinlock.h>
55#include <linux/shrinker.h>
56#include <linux/types.h>
57#include <linux/debugfs.h>
58#include <linux/zsmalloc.h>
59#include <linux/zpool.h>
60#include <linux/migrate.h>
61#include <linux/wait.h>
62#include <linux/pagemap.h>
63#include <linux/fs.h>
64#include <linux/local_lock.h>
65
66#define ZSPAGE_MAGIC 0x58
67
68/*
69 * This must be power of 2 and greater than or equal to sizeof(link_free).
70 * These two conditions ensure that any 'struct link_free' itself doesn't
71 * span more than 1 page which avoids complex case of mapping 2 pages simply
72 * to restore link_free pointer values.
73 */
74#define ZS_ALIGN 8
75
76#define ZS_HANDLE_SIZE (sizeof(unsigned long))
77
78/*
79 * Object location (<PFN>, <obj_idx>) is encoded as
80 * a single (unsigned long) handle value.
81 *
82 * Note that object index <obj_idx> starts from 0.
83 *
84 * This is made more complicated by various memory models and PAE.
85 */
86
87#ifndef MAX_POSSIBLE_PHYSMEM_BITS
88#ifdef MAX_PHYSMEM_BITS
89#define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS
90#else
91/*
92 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
93 * be PAGE_SHIFT
94 */
95#define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG
96#endif
97#endif
98
99#define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
100
101/*
102 * Head in allocated object should have OBJ_ALLOCATED_TAG
103 * to identify the object was allocated or not.
104 * It's okay to add the status bit in the least bit because
105 * header keeps handle which is 4byte-aligned address so we
106 * have room for two bit at least.
107 */
108#define OBJ_ALLOCATED_TAG 1
109
110#define OBJ_TAG_BITS 1
111#define OBJ_TAG_MASK OBJ_ALLOCATED_TAG
112
113#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
114#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
115
116#define HUGE_BITS 1
117#define FULLNESS_BITS 4
118#define CLASS_BITS 8
119#define ISOLATED_BITS 5
120#define MAGIC_VAL_BITS 8
121
122#define MAX(a, b) ((a) >= (b) ? (a) : (b))
123
124#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL))
125
126/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
127#define ZS_MIN_ALLOC_SIZE \
128 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
129/* each chunk includes extra space to keep handle */
130#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
131
132/*
133 * On systems with 4K page size, this gives 255 size classes! There is a
134 * trader-off here:
135 * - Large number of size classes is potentially wasteful as free page are
136 * spread across these classes
137 * - Small number of size classes causes large internal fragmentation
138 * - Probably its better to use specific size classes (empirically
139 * determined). NOTE: all those class sizes must be set as multiple of
140 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
141 *
142 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
143 * (reason above)
144 */
145#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
146#define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
147 ZS_SIZE_CLASS_DELTA) + 1)
148
149/*
150 * Pages are distinguished by the ratio of used memory (that is the ratio
151 * of ->inuse objects to all objects that page can store). For example,
152 * INUSE_RATIO_10 means that the ratio of used objects is > 0% and <= 10%.
153 *
154 * The number of fullness groups is not random. It allows us to keep
155 * difference between the least busy page in the group (minimum permitted
156 * number of ->inuse objects) and the most busy page (maximum permitted
157 * number of ->inuse objects) at a reasonable value.
158 */
159enum fullness_group {
160 ZS_INUSE_RATIO_0,
161 ZS_INUSE_RATIO_10,
162 /* NOTE: 8 more fullness groups here */
163 ZS_INUSE_RATIO_99 = 10,
164 ZS_INUSE_RATIO_100,
165 NR_FULLNESS_GROUPS,
166};
167
168enum class_stat_type {
169 /* NOTE: stats for 12 fullness groups here: from inuse 0 to 100 */
170 ZS_OBJS_ALLOCATED = NR_FULLNESS_GROUPS,
171 ZS_OBJS_INUSE,
172 NR_CLASS_STAT_TYPES,
173};
174
175struct zs_size_stat {
176 unsigned long objs[NR_CLASS_STAT_TYPES];
177};
178
179#ifdef CONFIG_ZSMALLOC_STAT
180static struct dentry *zs_stat_root;
181#endif
182
183static size_t huge_class_size;
184
185struct size_class {
186 struct list_head fullness_list[NR_FULLNESS_GROUPS];
187 /*
188 * Size of objects stored in this class. Must be multiple
189 * of ZS_ALIGN.
190 */
191 int size;
192 int objs_per_zspage;
193 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
194 int pages_per_zspage;
195
196 unsigned int index;
197 struct zs_size_stat stats;
198};
199
200/*
201 * Placed within free objects to form a singly linked list.
202 * For every zspage, zspage->freeobj gives head of this list.
203 *
204 * This must be power of 2 and less than or equal to ZS_ALIGN
205 */
206struct link_free {
207 union {
208 /*
209 * Free object index;
210 * It's valid for non-allocated object
211 */
212 unsigned long next;
213 /*
214 * Handle of allocated object.
215 */
216 unsigned long handle;
217 };
218};
219
220struct zs_pool {
221 const char *name;
222
223 struct size_class *size_class[ZS_SIZE_CLASSES];
224 struct kmem_cache *handle_cachep;
225 struct kmem_cache *zspage_cachep;
226
227 atomic_long_t pages_allocated;
228
229 struct zs_pool_stats stats;
230
231 /* Compact classes */
232 struct shrinker *shrinker;
233
234#ifdef CONFIG_ZSMALLOC_STAT
235 struct dentry *stat_dentry;
236#endif
237#ifdef CONFIG_COMPACTION
238 struct work_struct free_work;
239#endif
240 spinlock_t lock;
241 atomic_t compaction_in_progress;
242};
243
244struct zspage {
245 struct {
246 unsigned int huge:HUGE_BITS;
247 unsigned int fullness:FULLNESS_BITS;
248 unsigned int class:CLASS_BITS + 1;
249 unsigned int isolated:ISOLATED_BITS;
250 unsigned int magic:MAGIC_VAL_BITS;
251 };
252 unsigned int inuse;
253 unsigned int freeobj;
254 struct page *first_page;
255 struct list_head list; /* fullness list */
256 struct zs_pool *pool;
257 rwlock_t lock;
258};
259
260struct mapping_area {
261 local_lock_t lock;
262 char *vm_buf; /* copy buffer for objects that span pages */
263 char *vm_addr; /* address of kmap_atomic()'ed pages */
264 enum zs_mapmode vm_mm; /* mapping mode */
265};
266
267/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
268static void SetZsHugePage(struct zspage *zspage)
269{
270 zspage->huge = 1;
271}
272
273static bool ZsHugePage(struct zspage *zspage)
274{
275 return zspage->huge;
276}
277
278static void migrate_lock_init(struct zspage *zspage);
279static void migrate_read_lock(struct zspage *zspage);
280static void migrate_read_unlock(struct zspage *zspage);
281
282#ifdef CONFIG_COMPACTION
283static void migrate_write_lock(struct zspage *zspage);
284static void migrate_write_lock_nested(struct zspage *zspage);
285static void migrate_write_unlock(struct zspage *zspage);
286static void kick_deferred_free(struct zs_pool *pool);
287static void init_deferred_free(struct zs_pool *pool);
288static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
289#else
290static void migrate_write_lock(struct zspage *zspage) {}
291static void migrate_write_lock_nested(struct zspage *zspage) {}
292static void migrate_write_unlock(struct zspage *zspage) {}
293static void kick_deferred_free(struct zs_pool *pool) {}
294static void init_deferred_free(struct zs_pool *pool) {}
295static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
296#endif
297
298static int create_cache(struct zs_pool *pool)
299{
300 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
301 0, 0, NULL);
302 if (!pool->handle_cachep)
303 return 1;
304
305 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
306 0, 0, NULL);
307 if (!pool->zspage_cachep) {
308 kmem_cache_destroy(pool->handle_cachep);
309 pool->handle_cachep = NULL;
310 return 1;
311 }
312
313 return 0;
314}
315
316static void destroy_cache(struct zs_pool *pool)
317{
318 kmem_cache_destroy(pool->handle_cachep);
319 kmem_cache_destroy(pool->zspage_cachep);
320}
321
322static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
323{
324 return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
325 gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
326}
327
328static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
329{
330 kmem_cache_free(pool->handle_cachep, (void *)handle);
331}
332
333static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
334{
335 return kmem_cache_zalloc(pool->zspage_cachep,
336 flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
337}
338
339static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
340{
341 kmem_cache_free(pool->zspage_cachep, zspage);
342}
343
344/* pool->lock(which owns the handle) synchronizes races */
345static void record_obj(unsigned long handle, unsigned long obj)
346{
347 *(unsigned long *)handle = obj;
348}
349
350/* zpool driver */
351
352#ifdef CONFIG_ZPOOL
353
354static void *zs_zpool_create(const char *name, gfp_t gfp)
355{
356 /*
357 * Ignore global gfp flags: zs_malloc() may be invoked from
358 * different contexts and its caller must provide a valid
359 * gfp mask.
360 */
361 return zs_create_pool(name);
362}
363
364static void zs_zpool_destroy(void *pool)
365{
366 zs_destroy_pool(pool);
367}
368
369static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
370 unsigned long *handle)
371{
372 *handle = zs_malloc(pool, size, gfp);
373
374 if (IS_ERR_VALUE(*handle))
375 return PTR_ERR((void *)*handle);
376 return 0;
377}
378static void zs_zpool_free(void *pool, unsigned long handle)
379{
380 zs_free(pool, handle);
381}
382
383static void *zs_zpool_map(void *pool, unsigned long handle,
384 enum zpool_mapmode mm)
385{
386 enum zs_mapmode zs_mm;
387
388 switch (mm) {
389 case ZPOOL_MM_RO:
390 zs_mm = ZS_MM_RO;
391 break;
392 case ZPOOL_MM_WO:
393 zs_mm = ZS_MM_WO;
394 break;
395 case ZPOOL_MM_RW:
396 default:
397 zs_mm = ZS_MM_RW;
398 break;
399 }
400
401 return zs_map_object(pool, handle, zs_mm);
402}
403static void zs_zpool_unmap(void *pool, unsigned long handle)
404{
405 zs_unmap_object(pool, handle);
406}
407
408static u64 zs_zpool_total_size(void *pool)
409{
410 return zs_get_total_pages(pool) << PAGE_SHIFT;
411}
412
413static struct zpool_driver zs_zpool_driver = {
414 .type = "zsmalloc",
415 .owner = THIS_MODULE,
416 .create = zs_zpool_create,
417 .destroy = zs_zpool_destroy,
418 .malloc_support_movable = true,
419 .malloc = zs_zpool_malloc,
420 .free = zs_zpool_free,
421 .map = zs_zpool_map,
422 .unmap = zs_zpool_unmap,
423 .total_size = zs_zpool_total_size,
424};
425
426MODULE_ALIAS("zpool-zsmalloc");
427#endif /* CONFIG_ZPOOL */
428
429/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
430static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
431 .lock = INIT_LOCAL_LOCK(lock),
432};
433
434static __maybe_unused int is_first_page(struct page *page)
435{
436 return PagePrivate(page);
437}
438
439/* Protected by pool->lock */
440static inline int get_zspage_inuse(struct zspage *zspage)
441{
442 return zspage->inuse;
443}
444
445
446static inline void mod_zspage_inuse(struct zspage *zspage, int val)
447{
448 zspage->inuse += val;
449}
450
451static inline struct page *get_first_page(struct zspage *zspage)
452{
453 struct page *first_page = zspage->first_page;
454
455 VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
456 return first_page;
457}
458
459static inline unsigned int get_first_obj_offset(struct page *page)
460{
461 return page->page_type;
462}
463
464static inline void set_first_obj_offset(struct page *page, unsigned int offset)
465{
466 page->page_type = offset;
467}
468
469static inline unsigned int get_freeobj(struct zspage *zspage)
470{
471 return zspage->freeobj;
472}
473
474static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
475{
476 zspage->freeobj = obj;
477}
478
479static void get_zspage_mapping(struct zspage *zspage,
480 unsigned int *class_idx,
481 int *fullness)
482{
483 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
484
485 *fullness = zspage->fullness;
486 *class_idx = zspage->class;
487}
488
489static struct size_class *zspage_class(struct zs_pool *pool,
490 struct zspage *zspage)
491{
492 return pool->size_class[zspage->class];
493}
494
495static void set_zspage_mapping(struct zspage *zspage,
496 unsigned int class_idx,
497 int fullness)
498{
499 zspage->class = class_idx;
500 zspage->fullness = fullness;
501}
502
503/*
504 * zsmalloc divides the pool into various size classes where each
505 * class maintains a list of zspages where each zspage is divided
506 * into equal sized chunks. Each allocation falls into one of these
507 * classes depending on its size. This function returns index of the
508 * size class which has chunk size big enough to hold the given size.
509 */
510static int get_size_class_index(int size)
511{
512 int idx = 0;
513
514 if (likely(size > ZS_MIN_ALLOC_SIZE))
515 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
516 ZS_SIZE_CLASS_DELTA);
517
518 return min_t(int, ZS_SIZE_CLASSES - 1, idx);
519}
520
521static inline void class_stat_inc(struct size_class *class,
522 int type, unsigned long cnt)
523{
524 class->stats.objs[type] += cnt;
525}
526
527static inline void class_stat_dec(struct size_class *class,
528 int type, unsigned long cnt)
529{
530 class->stats.objs[type] -= cnt;
531}
532
533static inline unsigned long zs_stat_get(struct size_class *class, int type)
534{
535 return class->stats.objs[type];
536}
537
538#ifdef CONFIG_ZSMALLOC_STAT
539
540static void __init zs_stat_init(void)
541{
542 if (!debugfs_initialized()) {
543 pr_warn("debugfs not available, stat dir not created\n");
544 return;
545 }
546
547 zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
548}
549
550static void __exit zs_stat_exit(void)
551{
552 debugfs_remove_recursive(zs_stat_root);
553}
554
555static unsigned long zs_can_compact(struct size_class *class);
556
557static int zs_stats_size_show(struct seq_file *s, void *v)
558{
559 int i, fg;
560 struct zs_pool *pool = s->private;
561 struct size_class *class;
562 int objs_per_zspage;
563 unsigned long obj_allocated, obj_used, pages_used, freeable;
564 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
565 unsigned long total_freeable = 0;
566 unsigned long inuse_totals[NR_FULLNESS_GROUPS] = {0, };
567
568 seq_printf(s, " %5s %5s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %13s %10s %10s %16s %8s\n",
569 "class", "size", "10%", "20%", "30%", "40%",
570 "50%", "60%", "70%", "80%", "90%", "99%", "100%",
571 "obj_allocated", "obj_used", "pages_used",
572 "pages_per_zspage", "freeable");
573
574 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
575
576 class = pool->size_class[i];
577
578 if (class->index != i)
579 continue;
580
581 spin_lock(&pool->lock);
582
583 seq_printf(s, " %5u %5u ", i, class->size);
584 for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) {
585 inuse_totals[fg] += zs_stat_get(class, fg);
586 seq_printf(s, "%9lu ", zs_stat_get(class, fg));
587 }
588
589 obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED);
590 obj_used = zs_stat_get(class, ZS_OBJS_INUSE);
591 freeable = zs_can_compact(class);
592 spin_unlock(&pool->lock);
593
594 objs_per_zspage = class->objs_per_zspage;
595 pages_used = obj_allocated / objs_per_zspage *
596 class->pages_per_zspage;
597
598 seq_printf(s, "%13lu %10lu %10lu %16d %8lu\n",
599 obj_allocated, obj_used, pages_used,
600 class->pages_per_zspage, freeable);
601
602 total_objs += obj_allocated;
603 total_used_objs += obj_used;
604 total_pages += pages_used;
605 total_freeable += freeable;
606 }
607
608 seq_puts(s, "\n");
609 seq_printf(s, " %5s %5s ", "Total", "");
610
611 for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++)
612 seq_printf(s, "%9lu ", inuse_totals[fg]);
613
614 seq_printf(s, "%13lu %10lu %10lu %16s %8lu\n",
615 total_objs, total_used_objs, total_pages, "",
616 total_freeable);
617
618 return 0;
619}
620DEFINE_SHOW_ATTRIBUTE(zs_stats_size);
621
622static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
623{
624 if (!zs_stat_root) {
625 pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
626 return;
627 }
628
629 pool->stat_dentry = debugfs_create_dir(name, zs_stat_root);
630
631 debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool,
632 &zs_stats_size_fops);
633}
634
635static void zs_pool_stat_destroy(struct zs_pool *pool)
636{
637 debugfs_remove_recursive(pool->stat_dentry);
638}
639
640#else /* CONFIG_ZSMALLOC_STAT */
641static void __init zs_stat_init(void)
642{
643}
644
645static void __exit zs_stat_exit(void)
646{
647}
648
649static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
650{
651}
652
653static inline void zs_pool_stat_destroy(struct zs_pool *pool)
654{
655}
656#endif
657
658
659/*
660 * For each size class, zspages are divided into different groups
661 * depending on their usage ratio. This function returns fullness
662 * status of the given page.
663 */
664static int get_fullness_group(struct size_class *class, struct zspage *zspage)
665{
666 int inuse, objs_per_zspage, ratio;
667
668 inuse = get_zspage_inuse(zspage);
669 objs_per_zspage = class->objs_per_zspage;
670
671 if (inuse == 0)
672 return ZS_INUSE_RATIO_0;
673 if (inuse == objs_per_zspage)
674 return ZS_INUSE_RATIO_100;
675
676 ratio = 100 * inuse / objs_per_zspage;
677 /*
678 * Take integer division into consideration: a page with one inuse
679 * object out of 127 possible, will end up having 0 usage ratio,
680 * which is wrong as it belongs in ZS_INUSE_RATIO_10 fullness group.
681 */
682 return ratio / 10 + 1;
683}
684
685/*
686 * Each size class maintains various freelists and zspages are assigned
687 * to one of these freelists based on the number of live objects they
688 * have. This functions inserts the given zspage into the freelist
689 * identified by <class, fullness_group>.
690 */
691static void insert_zspage(struct size_class *class,
692 struct zspage *zspage,
693 int fullness)
694{
695 class_stat_inc(class, fullness, 1);
696 list_add(&zspage->list, &class->fullness_list[fullness]);
697}
698
699/*
700 * This function removes the given zspage from the freelist identified
701 * by <class, fullness_group>.
702 */
703static void remove_zspage(struct size_class *class,
704 struct zspage *zspage,
705 int fullness)
706{
707 VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
708
709 list_del_init(&zspage->list);
710 class_stat_dec(class, fullness, 1);
711}
712
713/*
714 * Each size class maintains zspages in different fullness groups depending
715 * on the number of live objects they contain. When allocating or freeing
716 * objects, the fullness status of the page can change, for instance, from
717 * INUSE_RATIO_80 to INUSE_RATIO_70 when freeing an object. This function
718 * checks if such a status change has occurred for the given page and
719 * accordingly moves the page from the list of the old fullness group to that
720 * of the new fullness group.
721 */
722static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
723{
724 int class_idx;
725 int currfg, newfg;
726
727 get_zspage_mapping(zspage, &class_idx, &currfg);
728 newfg = get_fullness_group(class, zspage);
729 if (newfg == currfg)
730 goto out;
731
732 remove_zspage(class, zspage, currfg);
733 insert_zspage(class, zspage, newfg);
734 set_zspage_mapping(zspage, class_idx, newfg);
735out:
736 return newfg;
737}
738
739static struct zspage *get_zspage(struct page *page)
740{
741 struct zspage *zspage = (struct zspage *)page_private(page);
742
743 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
744 return zspage;
745}
746
747static struct page *get_next_page(struct page *page)
748{
749 struct zspage *zspage = get_zspage(page);
750
751 if (unlikely(ZsHugePage(zspage)))
752 return NULL;
753
754 return (struct page *)page->index;
755}
756
757/**
758 * obj_to_location - get (<page>, <obj_idx>) from encoded object value
759 * @obj: the encoded object value
760 * @page: page object resides in zspage
761 * @obj_idx: object index
762 */
763static void obj_to_location(unsigned long obj, struct page **page,
764 unsigned int *obj_idx)
765{
766 obj >>= OBJ_TAG_BITS;
767 *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
768 *obj_idx = (obj & OBJ_INDEX_MASK);
769}
770
771static void obj_to_page(unsigned long obj, struct page **page)
772{
773 obj >>= OBJ_TAG_BITS;
774 *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
775}
776
777/**
778 * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
779 * @page: page object resides in zspage
780 * @obj_idx: object index
781 */
782static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
783{
784 unsigned long obj;
785
786 obj = page_to_pfn(page) << OBJ_INDEX_BITS;
787 obj |= obj_idx & OBJ_INDEX_MASK;
788 obj <<= OBJ_TAG_BITS;
789
790 return obj;
791}
792
793static unsigned long handle_to_obj(unsigned long handle)
794{
795 return *(unsigned long *)handle;
796}
797
798static inline bool obj_allocated(struct page *page, void *obj,
799 unsigned long *phandle)
800{
801 unsigned long handle;
802 struct zspage *zspage = get_zspage(page);
803
804 if (unlikely(ZsHugePage(zspage))) {
805 VM_BUG_ON_PAGE(!is_first_page(page), page);
806 handle = page->index;
807 } else
808 handle = *(unsigned long *)obj;
809
810 if (!(handle & OBJ_ALLOCATED_TAG))
811 return false;
812
813 /* Clear all tags before returning the handle */
814 *phandle = handle & ~OBJ_TAG_MASK;
815 return true;
816}
817
818static void reset_page(struct page *page)
819{
820 __ClearPageMovable(page);
821 ClearPagePrivate(page);
822 set_page_private(page, 0);
823 page_mapcount_reset(page);
824 page->index = 0;
825}
826
827static int trylock_zspage(struct zspage *zspage)
828{
829 struct page *cursor, *fail;
830
831 for (cursor = get_first_page(zspage); cursor != NULL; cursor =
832 get_next_page(cursor)) {
833 if (!trylock_page(cursor)) {
834 fail = cursor;
835 goto unlock;
836 }
837 }
838
839 return 1;
840unlock:
841 for (cursor = get_first_page(zspage); cursor != fail; cursor =
842 get_next_page(cursor))
843 unlock_page(cursor);
844
845 return 0;
846}
847
848static void __free_zspage(struct zs_pool *pool, struct size_class *class,
849 struct zspage *zspage)
850{
851 struct page *page, *next;
852 int fg;
853 unsigned int class_idx;
854
855 get_zspage_mapping(zspage, &class_idx, &fg);
856
857 assert_spin_locked(&pool->lock);
858
859 VM_BUG_ON(get_zspage_inuse(zspage));
860 VM_BUG_ON(fg != ZS_INUSE_RATIO_0);
861
862 next = page = get_first_page(zspage);
863 do {
864 VM_BUG_ON_PAGE(!PageLocked(page), page);
865 next = get_next_page(page);
866 reset_page(page);
867 unlock_page(page);
868 dec_zone_page_state(page, NR_ZSPAGES);
869 put_page(page);
870 page = next;
871 } while (page != NULL);
872
873 cache_free_zspage(pool, zspage);
874
875 class_stat_dec(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
876 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
877}
878
879static void free_zspage(struct zs_pool *pool, struct size_class *class,
880 struct zspage *zspage)
881{
882 VM_BUG_ON(get_zspage_inuse(zspage));
883 VM_BUG_ON(list_empty(&zspage->list));
884
885 /*
886 * Since zs_free couldn't be sleepable, this function cannot call
887 * lock_page. The page locks trylock_zspage got will be released
888 * by __free_zspage.
889 */
890 if (!trylock_zspage(zspage)) {
891 kick_deferred_free(pool);
892 return;
893 }
894
895 remove_zspage(class, zspage, ZS_INUSE_RATIO_0);
896 __free_zspage(pool, class, zspage);
897}
898
899/* Initialize a newly allocated zspage */
900static void init_zspage(struct size_class *class, struct zspage *zspage)
901{
902 unsigned int freeobj = 1;
903 unsigned long off = 0;
904 struct page *page = get_first_page(zspage);
905
906 while (page) {
907 struct page *next_page;
908 struct link_free *link;
909 void *vaddr;
910
911 set_first_obj_offset(page, off);
912
913 vaddr = kmap_atomic(page);
914 link = (struct link_free *)vaddr + off / sizeof(*link);
915
916 while ((off += class->size) < PAGE_SIZE) {
917 link->next = freeobj++ << OBJ_TAG_BITS;
918 link += class->size / sizeof(*link);
919 }
920
921 /*
922 * We now come to the last (full or partial) object on this
923 * page, which must point to the first object on the next
924 * page (if present)
925 */
926 next_page = get_next_page(page);
927 if (next_page) {
928 link->next = freeobj++ << OBJ_TAG_BITS;
929 } else {
930 /*
931 * Reset OBJ_TAG_BITS bit to last link to tell
932 * whether it's allocated object or not.
933 */
934 link->next = -1UL << OBJ_TAG_BITS;
935 }
936 kunmap_atomic(vaddr);
937 page = next_page;
938 off %= PAGE_SIZE;
939 }
940
941 set_freeobj(zspage, 0);
942}
943
944static void create_page_chain(struct size_class *class, struct zspage *zspage,
945 struct page *pages[])
946{
947 int i;
948 struct page *page;
949 struct page *prev_page = NULL;
950 int nr_pages = class->pages_per_zspage;
951
952 /*
953 * Allocate individual pages and link them together as:
954 * 1. all pages are linked together using page->index
955 * 2. each sub-page point to zspage using page->private
956 *
957 * we set PG_private to identify the first page (i.e. no other sub-page
958 * has this flag set).
959 */
960 for (i = 0; i < nr_pages; i++) {
961 page = pages[i];
962 set_page_private(page, (unsigned long)zspage);
963 page->index = 0;
964 if (i == 0) {
965 zspage->first_page = page;
966 SetPagePrivate(page);
967 if (unlikely(class->objs_per_zspage == 1 &&
968 class->pages_per_zspage == 1))
969 SetZsHugePage(zspage);
970 } else {
971 prev_page->index = (unsigned long)page;
972 }
973 prev_page = page;
974 }
975}
976
977/*
978 * Allocate a zspage for the given size class
979 */
980static struct zspage *alloc_zspage(struct zs_pool *pool,
981 struct size_class *class,
982 gfp_t gfp)
983{
984 int i;
985 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
986 struct zspage *zspage = cache_alloc_zspage(pool, gfp);
987
988 if (!zspage)
989 return NULL;
990
991 zspage->magic = ZSPAGE_MAGIC;
992 migrate_lock_init(zspage);
993
994 for (i = 0; i < class->pages_per_zspage; i++) {
995 struct page *page;
996
997 page = alloc_page(gfp);
998 if (!page) {
999 while (--i >= 0) {
1000 dec_zone_page_state(pages[i], NR_ZSPAGES);
1001 __free_page(pages[i]);
1002 }
1003 cache_free_zspage(pool, zspage);
1004 return NULL;
1005 }
1006
1007 inc_zone_page_state(page, NR_ZSPAGES);
1008 pages[i] = page;
1009 }
1010
1011 create_page_chain(class, zspage, pages);
1012 init_zspage(class, zspage);
1013 zspage->pool = pool;
1014
1015 return zspage;
1016}
1017
1018static struct zspage *find_get_zspage(struct size_class *class)
1019{
1020 int i;
1021 struct zspage *zspage;
1022
1023 for (i = ZS_INUSE_RATIO_99; i >= ZS_INUSE_RATIO_0; i--) {
1024 zspage = list_first_entry_or_null(&class->fullness_list[i],
1025 struct zspage, list);
1026 if (zspage)
1027 break;
1028 }
1029
1030 return zspage;
1031}
1032
1033static inline int __zs_cpu_up(struct mapping_area *area)
1034{
1035 /*
1036 * Make sure we don't leak memory if a cpu UP notification
1037 * and zs_init() race and both call zs_cpu_up() on the same cpu
1038 */
1039 if (area->vm_buf)
1040 return 0;
1041 area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
1042 if (!area->vm_buf)
1043 return -ENOMEM;
1044 return 0;
1045}
1046
1047static inline void __zs_cpu_down(struct mapping_area *area)
1048{
1049 kfree(area->vm_buf);
1050 area->vm_buf = NULL;
1051}
1052
1053static void *__zs_map_object(struct mapping_area *area,
1054 struct page *pages[2], int off, int size)
1055{
1056 int sizes[2];
1057 void *addr;
1058 char *buf = area->vm_buf;
1059
1060 /* disable page faults to match kmap_atomic() return conditions */
1061 pagefault_disable();
1062
1063 /* no read fastpath */
1064 if (area->vm_mm == ZS_MM_WO)
1065 goto out;
1066
1067 sizes[0] = PAGE_SIZE - off;
1068 sizes[1] = size - sizes[0];
1069
1070 /* copy object to per-cpu buffer */
1071 addr = kmap_atomic(pages[0]);
1072 memcpy(buf, addr + off, sizes[0]);
1073 kunmap_atomic(addr);
1074 addr = kmap_atomic(pages[1]);
1075 memcpy(buf + sizes[0], addr, sizes[1]);
1076 kunmap_atomic(addr);
1077out:
1078 return area->vm_buf;
1079}
1080
1081static void __zs_unmap_object(struct mapping_area *area,
1082 struct page *pages[2], int off, int size)
1083{
1084 int sizes[2];
1085 void *addr;
1086 char *buf;
1087
1088 /* no write fastpath */
1089 if (area->vm_mm == ZS_MM_RO)
1090 goto out;
1091
1092 buf = area->vm_buf;
1093 buf = buf + ZS_HANDLE_SIZE;
1094 size -= ZS_HANDLE_SIZE;
1095 off += ZS_HANDLE_SIZE;
1096
1097 sizes[0] = PAGE_SIZE - off;
1098 sizes[1] = size - sizes[0];
1099
1100 /* copy per-cpu buffer to object */
1101 addr = kmap_atomic(pages[0]);
1102 memcpy(addr + off, buf, sizes[0]);
1103 kunmap_atomic(addr);
1104 addr = kmap_atomic(pages[1]);
1105 memcpy(addr, buf + sizes[0], sizes[1]);
1106 kunmap_atomic(addr);
1107
1108out:
1109 /* enable page faults to match kunmap_atomic() return conditions */
1110 pagefault_enable();
1111}
1112
1113static int zs_cpu_prepare(unsigned int cpu)
1114{
1115 struct mapping_area *area;
1116
1117 area = &per_cpu(zs_map_area, cpu);
1118 return __zs_cpu_up(area);
1119}
1120
1121static int zs_cpu_dead(unsigned int cpu)
1122{
1123 struct mapping_area *area;
1124
1125 area = &per_cpu(zs_map_area, cpu);
1126 __zs_cpu_down(area);
1127 return 0;
1128}
1129
1130static bool can_merge(struct size_class *prev, int pages_per_zspage,
1131 int objs_per_zspage)
1132{
1133 if (prev->pages_per_zspage == pages_per_zspage &&
1134 prev->objs_per_zspage == objs_per_zspage)
1135 return true;
1136
1137 return false;
1138}
1139
1140static bool zspage_full(struct size_class *class, struct zspage *zspage)
1141{
1142 return get_zspage_inuse(zspage) == class->objs_per_zspage;
1143}
1144
1145static bool zspage_empty(struct zspage *zspage)
1146{
1147 return get_zspage_inuse(zspage) == 0;
1148}
1149
1150/**
1151 * zs_lookup_class_index() - Returns index of the zsmalloc &size_class
1152 * that hold objects of the provided size.
1153 * @pool: zsmalloc pool to use
1154 * @size: object size
1155 *
1156 * Context: Any context.
1157 *
1158 * Return: the index of the zsmalloc &size_class that hold objects of the
1159 * provided size.
1160 */
1161unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
1162{
1163 struct size_class *class;
1164
1165 class = pool->size_class[get_size_class_index(size)];
1166
1167 return class->index;
1168}
1169EXPORT_SYMBOL_GPL(zs_lookup_class_index);
1170
1171unsigned long zs_get_total_pages(struct zs_pool *pool)
1172{
1173 return atomic_long_read(&pool->pages_allocated);
1174}
1175EXPORT_SYMBOL_GPL(zs_get_total_pages);
1176
1177/**
1178 * zs_map_object - get address of allocated object from handle.
1179 * @pool: pool from which the object was allocated
1180 * @handle: handle returned from zs_malloc
1181 * @mm: mapping mode to use
1182 *
1183 * Before using an object allocated from zs_malloc, it must be mapped using
1184 * this function. When done with the object, it must be unmapped using
1185 * zs_unmap_object.
1186 *
1187 * Only one object can be mapped per cpu at a time. There is no protection
1188 * against nested mappings.
1189 *
1190 * This function returns with preemption and page faults disabled.
1191 */
1192void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1193 enum zs_mapmode mm)
1194{
1195 struct zspage *zspage;
1196 struct page *page;
1197 unsigned long obj, off;
1198 unsigned int obj_idx;
1199
1200 struct size_class *class;
1201 struct mapping_area *area;
1202 struct page *pages[2];
1203 void *ret;
1204
1205 /*
1206 * Because we use per-cpu mapping areas shared among the
1207 * pools/users, we can't allow mapping in interrupt context
1208 * because it can corrupt another users mappings.
1209 */
1210 BUG_ON(in_interrupt());
1211
1212 /* It guarantees it can get zspage from handle safely */
1213 spin_lock(&pool->lock);
1214 obj = handle_to_obj(handle);
1215 obj_to_location(obj, &page, &obj_idx);
1216 zspage = get_zspage(page);
1217
1218 /*
1219 * migration cannot move any zpages in this zspage. Here, pool->lock
1220 * is too heavy since callers would take some time until they calls
1221 * zs_unmap_object API so delegate the locking from class to zspage
1222 * which is smaller granularity.
1223 */
1224 migrate_read_lock(zspage);
1225 spin_unlock(&pool->lock);
1226
1227 class = zspage_class(pool, zspage);
1228 off = offset_in_page(class->size * obj_idx);
1229
1230 local_lock(&zs_map_area.lock);
1231 area = this_cpu_ptr(&zs_map_area);
1232 area->vm_mm = mm;
1233 if (off + class->size <= PAGE_SIZE) {
1234 /* this object is contained entirely within a page */
1235 area->vm_addr = kmap_atomic(page);
1236 ret = area->vm_addr + off;
1237 goto out;
1238 }
1239
1240 /* this object spans two pages */
1241 pages[0] = page;
1242 pages[1] = get_next_page(page);
1243 BUG_ON(!pages[1]);
1244
1245 ret = __zs_map_object(area, pages, off, class->size);
1246out:
1247 if (likely(!ZsHugePage(zspage)))
1248 ret += ZS_HANDLE_SIZE;
1249
1250 return ret;
1251}
1252EXPORT_SYMBOL_GPL(zs_map_object);
1253
1254void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1255{
1256 struct zspage *zspage;
1257 struct page *page;
1258 unsigned long obj, off;
1259 unsigned int obj_idx;
1260
1261 struct size_class *class;
1262 struct mapping_area *area;
1263
1264 obj = handle_to_obj(handle);
1265 obj_to_location(obj, &page, &obj_idx);
1266 zspage = get_zspage(page);
1267 class = zspage_class(pool, zspage);
1268 off = offset_in_page(class->size * obj_idx);
1269
1270 area = this_cpu_ptr(&zs_map_area);
1271 if (off + class->size <= PAGE_SIZE)
1272 kunmap_atomic(area->vm_addr);
1273 else {
1274 struct page *pages[2];
1275
1276 pages[0] = page;
1277 pages[1] = get_next_page(page);
1278 BUG_ON(!pages[1]);
1279
1280 __zs_unmap_object(area, pages, off, class->size);
1281 }
1282 local_unlock(&zs_map_area.lock);
1283
1284 migrate_read_unlock(zspage);
1285}
1286EXPORT_SYMBOL_GPL(zs_unmap_object);
1287
1288/**
1289 * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1290 * zsmalloc &size_class.
1291 * @pool: zsmalloc pool to use
1292 *
1293 * The function returns the size of the first huge class - any object of equal
1294 * or bigger size will be stored in zspage consisting of a single physical
1295 * page.
1296 *
1297 * Context: Any context.
1298 *
1299 * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1300 */
1301size_t zs_huge_class_size(struct zs_pool *pool)
1302{
1303 return huge_class_size;
1304}
1305EXPORT_SYMBOL_GPL(zs_huge_class_size);
1306
1307static unsigned long obj_malloc(struct zs_pool *pool,
1308 struct zspage *zspage, unsigned long handle)
1309{
1310 int i, nr_page, offset;
1311 unsigned long obj;
1312 struct link_free *link;
1313 struct size_class *class;
1314
1315 struct page *m_page;
1316 unsigned long m_offset;
1317 void *vaddr;
1318
1319 class = pool->size_class[zspage->class];
1320 handle |= OBJ_ALLOCATED_TAG;
1321 obj = get_freeobj(zspage);
1322
1323 offset = obj * class->size;
1324 nr_page = offset >> PAGE_SHIFT;
1325 m_offset = offset_in_page(offset);
1326 m_page = get_first_page(zspage);
1327
1328 for (i = 0; i < nr_page; i++)
1329 m_page = get_next_page(m_page);
1330
1331 vaddr = kmap_atomic(m_page);
1332 link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1333 set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1334 if (likely(!ZsHugePage(zspage)))
1335 /* record handle in the header of allocated chunk */
1336 link->handle = handle;
1337 else
1338 /* record handle to page->index */
1339 zspage->first_page->index = handle;
1340
1341 kunmap_atomic(vaddr);
1342 mod_zspage_inuse(zspage, 1);
1343
1344 obj = location_to_obj(m_page, obj);
1345
1346 return obj;
1347}
1348
1349
1350/**
1351 * zs_malloc - Allocate block of given size from pool.
1352 * @pool: pool to allocate from
1353 * @size: size of block to allocate
1354 * @gfp: gfp flags when allocating object
1355 *
1356 * On success, handle to the allocated object is returned,
1357 * otherwise an ERR_PTR().
1358 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1359 */
1360unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1361{
1362 unsigned long handle, obj;
1363 struct size_class *class;
1364 int newfg;
1365 struct zspage *zspage;
1366
1367 if (unlikely(!size))
1368 return (unsigned long)ERR_PTR(-EINVAL);
1369
1370 if (unlikely(size > ZS_MAX_ALLOC_SIZE))
1371 return (unsigned long)ERR_PTR(-ENOSPC);
1372
1373 handle = cache_alloc_handle(pool, gfp);
1374 if (!handle)
1375 return (unsigned long)ERR_PTR(-ENOMEM);
1376
1377 /* extra space in chunk to keep the handle */
1378 size += ZS_HANDLE_SIZE;
1379 class = pool->size_class[get_size_class_index(size)];
1380
1381 /* pool->lock effectively protects the zpage migration */
1382 spin_lock(&pool->lock);
1383 zspage = find_get_zspage(class);
1384 if (likely(zspage)) {
1385 obj = obj_malloc(pool, zspage, handle);
1386 /* Now move the zspage to another fullness group, if required */
1387 fix_fullness_group(class, zspage);
1388 record_obj(handle, obj);
1389 class_stat_inc(class, ZS_OBJS_INUSE, 1);
1390
1391 goto out;
1392 }
1393
1394 spin_unlock(&pool->lock);
1395
1396 zspage = alloc_zspage(pool, class, gfp);
1397 if (!zspage) {
1398 cache_free_handle(pool, handle);
1399 return (unsigned long)ERR_PTR(-ENOMEM);
1400 }
1401
1402 spin_lock(&pool->lock);
1403 obj = obj_malloc(pool, zspage, handle);
1404 newfg = get_fullness_group(class, zspage);
1405 insert_zspage(class, zspage, newfg);
1406 set_zspage_mapping(zspage, class->index, newfg);
1407 record_obj(handle, obj);
1408 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
1409 class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
1410 class_stat_inc(class, ZS_OBJS_INUSE, 1);
1411
1412 /* We completely set up zspage so mark them as movable */
1413 SetZsPageMovable(pool, zspage);
1414out:
1415 spin_unlock(&pool->lock);
1416
1417 return handle;
1418}
1419EXPORT_SYMBOL_GPL(zs_malloc);
1420
1421static void obj_free(int class_size, unsigned long obj)
1422{
1423 struct link_free *link;
1424 struct zspage *zspage;
1425 struct page *f_page;
1426 unsigned long f_offset;
1427 unsigned int f_objidx;
1428 void *vaddr;
1429
1430 obj_to_location(obj, &f_page, &f_objidx);
1431 f_offset = offset_in_page(class_size * f_objidx);
1432 zspage = get_zspage(f_page);
1433
1434 vaddr = kmap_atomic(f_page);
1435 link = (struct link_free *)(vaddr + f_offset);
1436
1437 /* Insert this object in containing zspage's freelist */
1438 if (likely(!ZsHugePage(zspage)))
1439 link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1440 else
1441 f_page->index = 0;
1442 set_freeobj(zspage, f_objidx);
1443
1444 kunmap_atomic(vaddr);
1445 mod_zspage_inuse(zspage, -1);
1446}
1447
1448void zs_free(struct zs_pool *pool, unsigned long handle)
1449{
1450 struct zspage *zspage;
1451 struct page *f_page;
1452 unsigned long obj;
1453 struct size_class *class;
1454 int fullness;
1455
1456 if (IS_ERR_OR_NULL((void *)handle))
1457 return;
1458
1459 /*
1460 * The pool->lock protects the race with zpage's migration
1461 * so it's safe to get the page from handle.
1462 */
1463 spin_lock(&pool->lock);
1464 obj = handle_to_obj(handle);
1465 obj_to_page(obj, &f_page);
1466 zspage = get_zspage(f_page);
1467 class = zspage_class(pool, zspage);
1468
1469 class_stat_dec(class, ZS_OBJS_INUSE, 1);
1470 obj_free(class->size, obj);
1471
1472 fullness = fix_fullness_group(class, zspage);
1473 if (fullness == ZS_INUSE_RATIO_0)
1474 free_zspage(pool, class, zspage);
1475
1476 spin_unlock(&pool->lock);
1477 cache_free_handle(pool, handle);
1478}
1479EXPORT_SYMBOL_GPL(zs_free);
1480
1481static void zs_object_copy(struct size_class *class, unsigned long dst,
1482 unsigned long src)
1483{
1484 struct page *s_page, *d_page;
1485 unsigned int s_objidx, d_objidx;
1486 unsigned long s_off, d_off;
1487 void *s_addr, *d_addr;
1488 int s_size, d_size, size;
1489 int written = 0;
1490
1491 s_size = d_size = class->size;
1492
1493 obj_to_location(src, &s_page, &s_objidx);
1494 obj_to_location(dst, &d_page, &d_objidx);
1495
1496 s_off = offset_in_page(class->size * s_objidx);
1497 d_off = offset_in_page(class->size * d_objidx);
1498
1499 if (s_off + class->size > PAGE_SIZE)
1500 s_size = PAGE_SIZE - s_off;
1501
1502 if (d_off + class->size > PAGE_SIZE)
1503 d_size = PAGE_SIZE - d_off;
1504
1505 s_addr = kmap_atomic(s_page);
1506 d_addr = kmap_atomic(d_page);
1507
1508 while (1) {
1509 size = min(s_size, d_size);
1510 memcpy(d_addr + d_off, s_addr + s_off, size);
1511 written += size;
1512
1513 if (written == class->size)
1514 break;
1515
1516 s_off += size;
1517 s_size -= size;
1518 d_off += size;
1519 d_size -= size;
1520
1521 /*
1522 * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic()
1523 * calls must occurs in reverse order of calls to kmap_atomic().
1524 * So, to call kunmap_atomic(s_addr) we should first call
1525 * kunmap_atomic(d_addr). For more details see
1526 * Documentation/mm/highmem.rst.
1527 */
1528 if (s_off >= PAGE_SIZE) {
1529 kunmap_atomic(d_addr);
1530 kunmap_atomic(s_addr);
1531 s_page = get_next_page(s_page);
1532 s_addr = kmap_atomic(s_page);
1533 d_addr = kmap_atomic(d_page);
1534 s_size = class->size - written;
1535 s_off = 0;
1536 }
1537
1538 if (d_off >= PAGE_SIZE) {
1539 kunmap_atomic(d_addr);
1540 d_page = get_next_page(d_page);
1541 d_addr = kmap_atomic(d_page);
1542 d_size = class->size - written;
1543 d_off = 0;
1544 }
1545 }
1546
1547 kunmap_atomic(d_addr);
1548 kunmap_atomic(s_addr);
1549}
1550
1551/*
1552 * Find alloced object in zspage from index object and
1553 * return handle.
1554 */
1555static unsigned long find_alloced_obj(struct size_class *class,
1556 struct page *page, int *obj_idx)
1557{
1558 unsigned int offset;
1559 int index = *obj_idx;
1560 unsigned long handle = 0;
1561 void *addr = kmap_atomic(page);
1562
1563 offset = get_first_obj_offset(page);
1564 offset += class->size * index;
1565
1566 while (offset < PAGE_SIZE) {
1567 if (obj_allocated(page, addr + offset, &handle))
1568 break;
1569
1570 offset += class->size;
1571 index++;
1572 }
1573
1574 kunmap_atomic(addr);
1575
1576 *obj_idx = index;
1577
1578 return handle;
1579}
1580
1581static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
1582 struct zspage *dst_zspage)
1583{
1584 unsigned long used_obj, free_obj;
1585 unsigned long handle;
1586 int obj_idx = 0;
1587 struct page *s_page = get_first_page(src_zspage);
1588 struct size_class *class = pool->size_class[src_zspage->class];
1589
1590 while (1) {
1591 handle = find_alloced_obj(class, s_page, &obj_idx);
1592 if (!handle) {
1593 s_page = get_next_page(s_page);
1594 if (!s_page)
1595 break;
1596 obj_idx = 0;
1597 continue;
1598 }
1599
1600 used_obj = handle_to_obj(handle);
1601 free_obj = obj_malloc(pool, dst_zspage, handle);
1602 zs_object_copy(class, free_obj, used_obj);
1603 obj_idx++;
1604 record_obj(handle, free_obj);
1605 obj_free(class->size, used_obj);
1606
1607 /* Stop if there is no more space */
1608 if (zspage_full(class, dst_zspage))
1609 break;
1610
1611 /* Stop if there are no more objects to migrate */
1612 if (zspage_empty(src_zspage))
1613 break;
1614 }
1615}
1616
1617static struct zspage *isolate_src_zspage(struct size_class *class)
1618{
1619 struct zspage *zspage;
1620 int fg;
1621
1622 for (fg = ZS_INUSE_RATIO_10; fg <= ZS_INUSE_RATIO_99; fg++) {
1623 zspage = list_first_entry_or_null(&class->fullness_list[fg],
1624 struct zspage, list);
1625 if (zspage) {
1626 remove_zspage(class, zspage, fg);
1627 return zspage;
1628 }
1629 }
1630
1631 return zspage;
1632}
1633
1634static struct zspage *isolate_dst_zspage(struct size_class *class)
1635{
1636 struct zspage *zspage;
1637 int fg;
1638
1639 for (fg = ZS_INUSE_RATIO_99; fg >= ZS_INUSE_RATIO_10; fg--) {
1640 zspage = list_first_entry_or_null(&class->fullness_list[fg],
1641 struct zspage, list);
1642 if (zspage) {
1643 remove_zspage(class, zspage, fg);
1644 return zspage;
1645 }
1646 }
1647
1648 return zspage;
1649}
1650
1651/*
1652 * putback_zspage - add @zspage into right class's fullness list
1653 * @class: destination class
1654 * @zspage: target page
1655 *
1656 * Return @zspage's fullness status
1657 */
1658static int putback_zspage(struct size_class *class, struct zspage *zspage)
1659{
1660 int fullness;
1661
1662 fullness = get_fullness_group(class, zspage);
1663 insert_zspage(class, zspage, fullness);
1664 set_zspage_mapping(zspage, class->index, fullness);
1665
1666 return fullness;
1667}
1668
1669#ifdef CONFIG_COMPACTION
1670/*
1671 * To prevent zspage destroy during migration, zspage freeing should
1672 * hold locks of all pages in the zspage.
1673 */
1674static void lock_zspage(struct zspage *zspage)
1675{
1676 struct page *curr_page, *page;
1677
1678 /*
1679 * Pages we haven't locked yet can be migrated off the list while we're
1680 * trying to lock them, so we need to be careful and only attempt to
1681 * lock each page under migrate_read_lock(). Otherwise, the page we lock
1682 * may no longer belong to the zspage. This means that we may wait for
1683 * the wrong page to unlock, so we must take a reference to the page
1684 * prior to waiting for it to unlock outside migrate_read_lock().
1685 */
1686 while (1) {
1687 migrate_read_lock(zspage);
1688 page = get_first_page(zspage);
1689 if (trylock_page(page))
1690 break;
1691 get_page(page);
1692 migrate_read_unlock(zspage);
1693 wait_on_page_locked(page);
1694 put_page(page);
1695 }
1696
1697 curr_page = page;
1698 while ((page = get_next_page(curr_page))) {
1699 if (trylock_page(page)) {
1700 curr_page = page;
1701 } else {
1702 get_page(page);
1703 migrate_read_unlock(zspage);
1704 wait_on_page_locked(page);
1705 put_page(page);
1706 migrate_read_lock(zspage);
1707 }
1708 }
1709 migrate_read_unlock(zspage);
1710}
1711#endif /* CONFIG_COMPACTION */
1712
1713static void migrate_lock_init(struct zspage *zspage)
1714{
1715 rwlock_init(&zspage->lock);
1716}
1717
1718static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
1719{
1720 read_lock(&zspage->lock);
1721}
1722
1723static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
1724{
1725 read_unlock(&zspage->lock);
1726}
1727
1728#ifdef CONFIG_COMPACTION
1729static void migrate_write_lock(struct zspage *zspage)
1730{
1731 write_lock(&zspage->lock);
1732}
1733
1734static void migrate_write_lock_nested(struct zspage *zspage)
1735{
1736 write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING);
1737}
1738
1739static void migrate_write_unlock(struct zspage *zspage)
1740{
1741 write_unlock(&zspage->lock);
1742}
1743
1744/* Number of isolated subpage for *page migration* in this zspage */
1745static void inc_zspage_isolation(struct zspage *zspage)
1746{
1747 zspage->isolated++;
1748}
1749
1750static void dec_zspage_isolation(struct zspage *zspage)
1751{
1752 VM_BUG_ON(zspage->isolated == 0);
1753 zspage->isolated--;
1754}
1755
1756static const struct movable_operations zsmalloc_mops;
1757
1758static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1759 struct page *newpage, struct page *oldpage)
1760{
1761 struct page *page;
1762 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
1763 int idx = 0;
1764
1765 page = get_first_page(zspage);
1766 do {
1767 if (page == oldpage)
1768 pages[idx] = newpage;
1769 else
1770 pages[idx] = page;
1771 idx++;
1772 } while ((page = get_next_page(page)) != NULL);
1773
1774 create_page_chain(class, zspage, pages);
1775 set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
1776 if (unlikely(ZsHugePage(zspage)))
1777 newpage->index = oldpage->index;
1778 __SetPageMovable(newpage, &zsmalloc_mops);
1779}
1780
1781static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1782{
1783 struct zs_pool *pool;
1784 struct zspage *zspage;
1785
1786 /*
1787 * Page is locked so zspage couldn't be destroyed. For detail, look at
1788 * lock_zspage in free_zspage.
1789 */
1790 VM_BUG_ON_PAGE(PageIsolated(page), page);
1791
1792 zspage = get_zspage(page);
1793 pool = zspage->pool;
1794 spin_lock(&pool->lock);
1795 inc_zspage_isolation(zspage);
1796 spin_unlock(&pool->lock);
1797
1798 return true;
1799}
1800
1801static int zs_page_migrate(struct page *newpage, struct page *page,
1802 enum migrate_mode mode)
1803{
1804 struct zs_pool *pool;
1805 struct size_class *class;
1806 struct zspage *zspage;
1807 struct page *dummy;
1808 void *s_addr, *d_addr, *addr;
1809 unsigned int offset;
1810 unsigned long handle;
1811 unsigned long old_obj, new_obj;
1812 unsigned int obj_idx;
1813
1814 /*
1815 * We cannot support the _NO_COPY case here, because copy needs to
1816 * happen under the zs lock, which does not work with
1817 * MIGRATE_SYNC_NO_COPY workflow.
1818 */
1819 if (mode == MIGRATE_SYNC_NO_COPY)
1820 return -EINVAL;
1821
1822 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1823
1824 /* The page is locked, so this pointer must remain valid */
1825 zspage = get_zspage(page);
1826 pool = zspage->pool;
1827
1828 /*
1829 * The pool's lock protects the race between zpage migration
1830 * and zs_free.
1831 */
1832 spin_lock(&pool->lock);
1833 class = zspage_class(pool, zspage);
1834
1835 /* the migrate_write_lock protects zpage access via zs_map_object */
1836 migrate_write_lock(zspage);
1837
1838 offset = get_first_obj_offset(page);
1839 s_addr = kmap_atomic(page);
1840
1841 /*
1842 * Here, any user cannot access all objects in the zspage so let's move.
1843 */
1844 d_addr = kmap_atomic(newpage);
1845 copy_page(d_addr, s_addr);
1846 kunmap_atomic(d_addr);
1847
1848 for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
1849 addr += class->size) {
1850 if (obj_allocated(page, addr, &handle)) {
1851
1852 old_obj = handle_to_obj(handle);
1853 obj_to_location(old_obj, &dummy, &obj_idx);
1854 new_obj = (unsigned long)location_to_obj(newpage,
1855 obj_idx);
1856 record_obj(handle, new_obj);
1857 }
1858 }
1859 kunmap_atomic(s_addr);
1860
1861 replace_sub_page(class, zspage, newpage, page);
1862 dec_zspage_isolation(zspage);
1863 /*
1864 * Since we complete the data copy and set up new zspage structure,
1865 * it's okay to release the pool's lock.
1866 */
1867 spin_unlock(&pool->lock);
1868 migrate_write_unlock(zspage);
1869
1870 get_page(newpage);
1871 if (page_zone(newpage) != page_zone(page)) {
1872 dec_zone_page_state(page, NR_ZSPAGES);
1873 inc_zone_page_state(newpage, NR_ZSPAGES);
1874 }
1875
1876 reset_page(page);
1877 put_page(page);
1878
1879 return MIGRATEPAGE_SUCCESS;
1880}
1881
1882static void zs_page_putback(struct page *page)
1883{
1884 struct zs_pool *pool;
1885 struct zspage *zspage;
1886
1887 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1888
1889 zspage = get_zspage(page);
1890 pool = zspage->pool;
1891 spin_lock(&pool->lock);
1892 dec_zspage_isolation(zspage);
1893 spin_unlock(&pool->lock);
1894}
1895
1896static const struct movable_operations zsmalloc_mops = {
1897 .isolate_page = zs_page_isolate,
1898 .migrate_page = zs_page_migrate,
1899 .putback_page = zs_page_putback,
1900};
1901
1902/*
1903 * Caller should hold page_lock of all pages in the zspage
1904 * In here, we cannot use zspage meta data.
1905 */
1906static void async_free_zspage(struct work_struct *work)
1907{
1908 int i;
1909 struct size_class *class;
1910 unsigned int class_idx;
1911 int fullness;
1912 struct zspage *zspage, *tmp;
1913 LIST_HEAD(free_pages);
1914 struct zs_pool *pool = container_of(work, struct zs_pool,
1915 free_work);
1916
1917 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
1918 class = pool->size_class[i];
1919 if (class->index != i)
1920 continue;
1921
1922 spin_lock(&pool->lock);
1923 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0],
1924 &free_pages);
1925 spin_unlock(&pool->lock);
1926 }
1927
1928 list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
1929 list_del(&zspage->list);
1930 lock_zspage(zspage);
1931
1932 get_zspage_mapping(zspage, &class_idx, &fullness);
1933 VM_BUG_ON(fullness != ZS_INUSE_RATIO_0);
1934 class = pool->size_class[class_idx];
1935 spin_lock(&pool->lock);
1936 __free_zspage(pool, class, zspage);
1937 spin_unlock(&pool->lock);
1938 }
1939};
1940
1941static void kick_deferred_free(struct zs_pool *pool)
1942{
1943 schedule_work(&pool->free_work);
1944}
1945
1946static void zs_flush_migration(struct zs_pool *pool)
1947{
1948 flush_work(&pool->free_work);
1949}
1950
1951static void init_deferred_free(struct zs_pool *pool)
1952{
1953 INIT_WORK(&pool->free_work, async_free_zspage);
1954}
1955
1956static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
1957{
1958 struct page *page = get_first_page(zspage);
1959
1960 do {
1961 WARN_ON(!trylock_page(page));
1962 __SetPageMovable(page, &zsmalloc_mops);
1963 unlock_page(page);
1964 } while ((page = get_next_page(page)) != NULL);
1965}
1966#else
1967static inline void zs_flush_migration(struct zs_pool *pool) { }
1968#endif
1969
1970/*
1971 *
1972 * Based on the number of unused allocated objects calculate
1973 * and return the number of pages that we can free.
1974 */
1975static unsigned long zs_can_compact(struct size_class *class)
1976{
1977 unsigned long obj_wasted;
1978 unsigned long obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED);
1979 unsigned long obj_used = zs_stat_get(class, ZS_OBJS_INUSE);
1980
1981 if (obj_allocated <= obj_used)
1982 return 0;
1983
1984 obj_wasted = obj_allocated - obj_used;
1985 obj_wasted /= class->objs_per_zspage;
1986
1987 return obj_wasted * class->pages_per_zspage;
1988}
1989
1990static unsigned long __zs_compact(struct zs_pool *pool,
1991 struct size_class *class)
1992{
1993 struct zspage *src_zspage = NULL;
1994 struct zspage *dst_zspage = NULL;
1995 unsigned long pages_freed = 0;
1996
1997 /*
1998 * protect the race between zpage migration and zs_free
1999 * as well as zpage allocation/free
2000 */
2001 spin_lock(&pool->lock);
2002 while (zs_can_compact(class)) {
2003 int fg;
2004
2005 if (!dst_zspage) {
2006 dst_zspage = isolate_dst_zspage(class);
2007 if (!dst_zspage)
2008 break;
2009 migrate_write_lock(dst_zspage);
2010 }
2011
2012 src_zspage = isolate_src_zspage(class);
2013 if (!src_zspage)
2014 break;
2015
2016 migrate_write_lock_nested(src_zspage);
2017
2018 migrate_zspage(pool, src_zspage, dst_zspage);
2019 fg = putback_zspage(class, src_zspage);
2020 migrate_write_unlock(src_zspage);
2021
2022 if (fg == ZS_INUSE_RATIO_0) {
2023 free_zspage(pool, class, src_zspage);
2024 pages_freed += class->pages_per_zspage;
2025 }
2026 src_zspage = NULL;
2027
2028 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
2029 || spin_is_contended(&pool->lock)) {
2030 putback_zspage(class, dst_zspage);
2031 migrate_write_unlock(dst_zspage);
2032 dst_zspage = NULL;
2033
2034 spin_unlock(&pool->lock);
2035 cond_resched();
2036 spin_lock(&pool->lock);
2037 }
2038 }
2039
2040 if (src_zspage) {
2041 putback_zspage(class, src_zspage);
2042 migrate_write_unlock(src_zspage);
2043 }
2044
2045 if (dst_zspage) {
2046 putback_zspage(class, dst_zspage);
2047 migrate_write_unlock(dst_zspage);
2048 }
2049 spin_unlock(&pool->lock);
2050
2051 return pages_freed;
2052}
2053
2054unsigned long zs_compact(struct zs_pool *pool)
2055{
2056 int i;
2057 struct size_class *class;
2058 unsigned long pages_freed = 0;
2059
2060 /*
2061 * Pool compaction is performed under pool->lock so it is basically
2062 * single-threaded. Having more than one thread in __zs_compact()
2063 * will increase pool->lock contention, which will impact other
2064 * zsmalloc operations that need pool->lock.
2065 */
2066 if (atomic_xchg(&pool->compaction_in_progress, 1))
2067 return 0;
2068
2069 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2070 class = pool->size_class[i];
2071 if (class->index != i)
2072 continue;
2073 pages_freed += __zs_compact(pool, class);
2074 }
2075 atomic_long_add(pages_freed, &pool->stats.pages_compacted);
2076 atomic_set(&pool->compaction_in_progress, 0);
2077
2078 return pages_freed;
2079}
2080EXPORT_SYMBOL_GPL(zs_compact);
2081
2082void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
2083{
2084 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
2085}
2086EXPORT_SYMBOL_GPL(zs_pool_stats);
2087
2088static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
2089 struct shrink_control *sc)
2090{
2091 unsigned long pages_freed;
2092 struct zs_pool *pool = shrinker->private_data;
2093
2094 /*
2095 * Compact classes and calculate compaction delta.
2096 * Can run concurrently with a manually triggered
2097 * (by user) compaction.
2098 */
2099 pages_freed = zs_compact(pool);
2100
2101 return pages_freed ? pages_freed : SHRINK_STOP;
2102}
2103
2104static unsigned long zs_shrinker_count(struct shrinker *shrinker,
2105 struct shrink_control *sc)
2106{
2107 int i;
2108 struct size_class *class;
2109 unsigned long pages_to_free = 0;
2110 struct zs_pool *pool = shrinker->private_data;
2111
2112 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2113 class = pool->size_class[i];
2114 if (class->index != i)
2115 continue;
2116
2117 pages_to_free += zs_can_compact(class);
2118 }
2119
2120 return pages_to_free;
2121}
2122
2123static void zs_unregister_shrinker(struct zs_pool *pool)
2124{
2125 shrinker_free(pool->shrinker);
2126}
2127
2128static int zs_register_shrinker(struct zs_pool *pool)
2129{
2130 pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name);
2131 if (!pool->shrinker)
2132 return -ENOMEM;
2133
2134 pool->shrinker->scan_objects = zs_shrinker_scan;
2135 pool->shrinker->count_objects = zs_shrinker_count;
2136 pool->shrinker->batch = 0;
2137 pool->shrinker->private_data = pool;
2138
2139 shrinker_register(pool->shrinker);
2140
2141 return 0;
2142}
2143
2144static int calculate_zspage_chain_size(int class_size)
2145{
2146 int i, min_waste = INT_MAX;
2147 int chain_size = 1;
2148
2149 if (is_power_of_2(class_size))
2150 return chain_size;
2151
2152 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
2153 int waste;
2154
2155 waste = (i * PAGE_SIZE) % class_size;
2156 if (waste < min_waste) {
2157 min_waste = waste;
2158 chain_size = i;
2159 }
2160 }
2161
2162 return chain_size;
2163}
2164
2165/**
2166 * zs_create_pool - Creates an allocation pool to work from.
2167 * @name: pool name to be created
2168 *
2169 * This function must be called before anything when using
2170 * the zsmalloc allocator.
2171 *
2172 * On success, a pointer to the newly created pool is returned,
2173 * otherwise NULL.
2174 */
2175struct zs_pool *zs_create_pool(const char *name)
2176{
2177 int i;
2178 struct zs_pool *pool;
2179 struct size_class *prev_class = NULL;
2180
2181 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2182 if (!pool)
2183 return NULL;
2184
2185 init_deferred_free(pool);
2186 spin_lock_init(&pool->lock);
2187 atomic_set(&pool->compaction_in_progress, 0);
2188
2189 pool->name = kstrdup(name, GFP_KERNEL);
2190 if (!pool->name)
2191 goto err;
2192
2193 if (create_cache(pool))
2194 goto err;
2195
2196 /*
2197 * Iterate reversely, because, size of size_class that we want to use
2198 * for merging should be larger or equal to current size.
2199 */
2200 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2201 int size;
2202 int pages_per_zspage;
2203 int objs_per_zspage;
2204 struct size_class *class;
2205 int fullness;
2206
2207 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
2208 if (size > ZS_MAX_ALLOC_SIZE)
2209 size = ZS_MAX_ALLOC_SIZE;
2210 pages_per_zspage = calculate_zspage_chain_size(size);
2211 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
2212
2213 /*
2214 * We iterate from biggest down to smallest classes,
2215 * so huge_class_size holds the size of the first huge
2216 * class. Any object bigger than or equal to that will
2217 * endup in the huge class.
2218 */
2219 if (pages_per_zspage != 1 && objs_per_zspage != 1 &&
2220 !huge_class_size) {
2221 huge_class_size = size;
2222 /*
2223 * The object uses ZS_HANDLE_SIZE bytes to store the
2224 * handle. We need to subtract it, because zs_malloc()
2225 * unconditionally adds handle size before it performs
2226 * size class search - so object may be smaller than
2227 * huge class size, yet it still can end up in the huge
2228 * class because it grows by ZS_HANDLE_SIZE extra bytes
2229 * right before class lookup.
2230 */
2231 huge_class_size -= (ZS_HANDLE_SIZE - 1);
2232 }
2233
2234 /*
2235 * size_class is used for normal zsmalloc operation such
2236 * as alloc/free for that size. Although it is natural that we
2237 * have one size_class for each size, there is a chance that we
2238 * can get more memory utilization if we use one size_class for
2239 * many different sizes whose size_class have same
2240 * characteristics. So, we makes size_class point to
2241 * previous size_class if possible.
2242 */
2243 if (prev_class) {
2244 if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
2245 pool->size_class[i] = prev_class;
2246 continue;
2247 }
2248 }
2249
2250 class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
2251 if (!class)
2252 goto err;
2253
2254 class->size = size;
2255 class->index = i;
2256 class->pages_per_zspage = pages_per_zspage;
2257 class->objs_per_zspage = objs_per_zspage;
2258 pool->size_class[i] = class;
2259
2260 fullness = ZS_INUSE_RATIO_0;
2261 while (fullness < NR_FULLNESS_GROUPS) {
2262 INIT_LIST_HEAD(&class->fullness_list[fullness]);
2263 fullness++;
2264 }
2265
2266 prev_class = class;
2267 }
2268
2269 /* debug only, don't abort if it fails */
2270 zs_pool_stat_create(pool, name);
2271
2272 /*
2273 * Not critical since shrinker is only used to trigger internal
2274 * defragmentation of the pool which is pretty optional thing. If
2275 * registration fails we still can use the pool normally and user can
2276 * trigger compaction manually. Thus, ignore return code.
2277 */
2278 zs_register_shrinker(pool);
2279
2280 return pool;
2281
2282err:
2283 zs_destroy_pool(pool);
2284 return NULL;
2285}
2286EXPORT_SYMBOL_GPL(zs_create_pool);
2287
2288void zs_destroy_pool(struct zs_pool *pool)
2289{
2290 int i;
2291
2292 zs_unregister_shrinker(pool);
2293 zs_flush_migration(pool);
2294 zs_pool_stat_destroy(pool);
2295
2296 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2297 int fg;
2298 struct size_class *class = pool->size_class[i];
2299
2300 if (!class)
2301 continue;
2302
2303 if (class->index != i)
2304 continue;
2305
2306 for (fg = ZS_INUSE_RATIO_0; fg < NR_FULLNESS_GROUPS; fg++) {
2307 if (list_empty(&class->fullness_list[fg]))
2308 continue;
2309
2310 pr_err("Class-%d fullness group %d is not empty\n",
2311 class->size, fg);
2312 }
2313 kfree(class);
2314 }
2315
2316 destroy_cache(pool);
2317 kfree(pool->name);
2318 kfree(pool);
2319}
2320EXPORT_SYMBOL_GPL(zs_destroy_pool);
2321
2322static int __init zs_init(void)
2323{
2324 int ret;
2325
2326 ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
2327 zs_cpu_prepare, zs_cpu_dead);
2328 if (ret)
2329 goto out;
2330
2331#ifdef CONFIG_ZPOOL
2332 zpool_register_driver(&zs_zpool_driver);
2333#endif
2334
2335 zs_stat_init();
2336
2337 return 0;
2338
2339out:
2340 return ret;
2341}
2342
2343static void __exit zs_exit(void)
2344{
2345#ifdef CONFIG_ZPOOL
2346 zpool_unregister_driver(&zs_zpool_driver);
2347#endif
2348 cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
2349
2350 zs_stat_exit();
2351}
2352
2353module_init(zs_init);
2354module_exit(zs_exit);
2355
2356MODULE_LICENSE("Dual BSD/GPL");
2357MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");