Loading...
1/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 */
12
13#ifdef CONFIG_ZSMALLOC_DEBUG
14#define DEBUG
15#endif
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/bitops.h>
20#include <linux/errno.h>
21#include <linux/highmem.h>
22#include <linux/init.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <asm/tlbflush.h>
26#include <asm/pgtable.h>
27#include <linux/cpumask.h>
28#include <linux/cpu.h>
29#include <linux/vmalloc.h>
30
31#include "zsmalloc.h"
32#include "zsmalloc_int.h"
33
34/*
35 * A zspage's class index and fullness group
36 * are encoded in its (first)page->mapping
37 */
38#define CLASS_IDX_BITS 28
39#define FULLNESS_BITS 4
40#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
41#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
42
43/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
44static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
45
46static int is_first_page(struct page *page)
47{
48 return PagePrivate(page);
49}
50
51static int is_last_page(struct page *page)
52{
53 return PagePrivate2(page);
54}
55
56static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
57 enum fullness_group *fullness)
58{
59 unsigned long m;
60 BUG_ON(!is_first_page(page));
61
62 m = (unsigned long)page->mapping;
63 *fullness = m & FULLNESS_MASK;
64 *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
65}
66
67static void set_zspage_mapping(struct page *page, unsigned int class_idx,
68 enum fullness_group fullness)
69{
70 unsigned long m;
71 BUG_ON(!is_first_page(page));
72
73 m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
74 (fullness & FULLNESS_MASK);
75 page->mapping = (struct address_space *)m;
76}
77
78static int get_size_class_index(int size)
79{
80 int idx = 0;
81
82 if (likely(size > ZS_MIN_ALLOC_SIZE))
83 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
84 ZS_SIZE_CLASS_DELTA);
85
86 return idx;
87}
88
89static enum fullness_group get_fullness_group(struct page *page)
90{
91 int inuse, max_objects;
92 enum fullness_group fg;
93 BUG_ON(!is_first_page(page));
94
95 inuse = page->inuse;
96 max_objects = page->objects;
97
98 if (inuse == 0)
99 fg = ZS_EMPTY;
100 else if (inuse == max_objects)
101 fg = ZS_FULL;
102 else if (inuse <= max_objects / fullness_threshold_frac)
103 fg = ZS_ALMOST_EMPTY;
104 else
105 fg = ZS_ALMOST_FULL;
106
107 return fg;
108}
109
110static void insert_zspage(struct page *page, struct size_class *class,
111 enum fullness_group fullness)
112{
113 struct page **head;
114
115 BUG_ON(!is_first_page(page));
116
117 if (fullness >= _ZS_NR_FULLNESS_GROUPS)
118 return;
119
120 head = &class->fullness_list[fullness];
121 if (*head)
122 list_add_tail(&page->lru, &(*head)->lru);
123
124 *head = page;
125}
126
127static void remove_zspage(struct page *page, struct size_class *class,
128 enum fullness_group fullness)
129{
130 struct page **head;
131
132 BUG_ON(!is_first_page(page));
133
134 if (fullness >= _ZS_NR_FULLNESS_GROUPS)
135 return;
136
137 head = &class->fullness_list[fullness];
138 BUG_ON(!*head);
139 if (list_empty(&(*head)->lru))
140 *head = NULL;
141 else if (*head == page)
142 *head = (struct page *)list_entry((*head)->lru.next,
143 struct page, lru);
144
145 list_del_init(&page->lru);
146}
147
148static enum fullness_group fix_fullness_group(struct zs_pool *pool,
149 struct page *page)
150{
151 int class_idx;
152 struct size_class *class;
153 enum fullness_group currfg, newfg;
154
155 BUG_ON(!is_first_page(page));
156
157 get_zspage_mapping(page, &class_idx, &currfg);
158 newfg = get_fullness_group(page);
159 if (newfg == currfg)
160 goto out;
161
162 class = &pool->size_class[class_idx];
163 remove_zspage(page, class, currfg);
164 insert_zspage(page, class, newfg);
165 set_zspage_mapping(page, class_idx, newfg);
166
167out:
168 return newfg;
169}
170
171/*
172 * We have to decide on how many pages to link together
173 * to form a zspage for each size class. This is important
174 * to reduce wastage due to unusable space left at end of
175 * each zspage which is given as:
176 * wastage = Zp - Zp % size_class
177 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
178 *
179 * For example, for size class of 3/8 * PAGE_SIZE, we should
180 * link together 3 PAGE_SIZE sized pages to form a zspage
181 * since then we can perfectly fit in 8 such objects.
182 */
183static int get_pages_per_zspage(int class_size)
184{
185 int i, max_usedpc = 0;
186 /* zspage order which gives maximum used size per KB */
187 int max_usedpc_order = 1;
188
189 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
190 int zspage_size;
191 int waste, usedpc;
192
193 zspage_size = i * PAGE_SIZE;
194 waste = zspage_size % class_size;
195 usedpc = (zspage_size - waste) * 100 / zspage_size;
196
197 if (usedpc > max_usedpc) {
198 max_usedpc = usedpc;
199 max_usedpc_order = i;
200 }
201 }
202
203 return max_usedpc_order;
204}
205
206/*
207 * A single 'zspage' is composed of many system pages which are
208 * linked together using fields in struct page. This function finds
209 * the first/head page, given any component page of a zspage.
210 */
211static struct page *get_first_page(struct page *page)
212{
213 if (is_first_page(page))
214 return page;
215 else
216 return page->first_page;
217}
218
219static struct page *get_next_page(struct page *page)
220{
221 struct page *next;
222
223 if (is_last_page(page))
224 next = NULL;
225 else if (is_first_page(page))
226 next = (struct page *)page->private;
227 else
228 next = list_entry(page->lru.next, struct page, lru);
229
230 return next;
231}
232
233/* Encode <page, obj_idx> as a single handle value */
234static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
235{
236 unsigned long handle;
237
238 if (!page) {
239 BUG_ON(obj_idx);
240 return NULL;
241 }
242
243 handle = page_to_pfn(page) << OBJ_INDEX_BITS;
244 handle |= (obj_idx & OBJ_INDEX_MASK);
245
246 return (void *)handle;
247}
248
249/* Decode <page, obj_idx> pair from the given object handle */
250static void obj_handle_to_location(void *handle, struct page **page,
251 unsigned long *obj_idx)
252{
253 unsigned long hval = (unsigned long)handle;
254
255 *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
256 *obj_idx = hval & OBJ_INDEX_MASK;
257}
258
259static unsigned long obj_idx_to_offset(struct page *page,
260 unsigned long obj_idx, int class_size)
261{
262 unsigned long off = 0;
263
264 if (!is_first_page(page))
265 off = page->index;
266
267 return off + obj_idx * class_size;
268}
269
270static void reset_page(struct page *page)
271{
272 clear_bit(PG_private, &page->flags);
273 clear_bit(PG_private_2, &page->flags);
274 set_page_private(page, 0);
275 page->mapping = NULL;
276 page->freelist = NULL;
277 reset_page_mapcount(page);
278}
279
280static void free_zspage(struct page *first_page)
281{
282 struct page *nextp, *tmp, *head_extra;
283
284 BUG_ON(!is_first_page(first_page));
285 BUG_ON(first_page->inuse);
286
287 head_extra = (struct page *)page_private(first_page);
288
289 reset_page(first_page);
290 __free_page(first_page);
291
292 /* zspage with only 1 system page */
293 if (!head_extra)
294 return;
295
296 list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
297 list_del(&nextp->lru);
298 reset_page(nextp);
299 __free_page(nextp);
300 }
301 reset_page(head_extra);
302 __free_page(head_extra);
303}
304
305/* Initialize a newly allocated zspage */
306static void init_zspage(struct page *first_page, struct size_class *class)
307{
308 unsigned long off = 0;
309 struct page *page = first_page;
310
311 BUG_ON(!is_first_page(first_page));
312 while (page) {
313 struct page *next_page;
314 struct link_free *link;
315 unsigned int i, objs_on_page;
316
317 /*
318 * page->index stores offset of first object starting
319 * in the page. For the first page, this is always 0,
320 * so we use first_page->index (aka ->freelist) to store
321 * head of corresponding zspage's freelist.
322 */
323 if (page != first_page)
324 page->index = off;
325
326 link = (struct link_free *)kmap_atomic(page) +
327 off / sizeof(*link);
328 objs_on_page = (PAGE_SIZE - off) / class->size;
329
330 for (i = 1; i <= objs_on_page; i++) {
331 off += class->size;
332 if (off < PAGE_SIZE) {
333 link->next = obj_location_to_handle(page, i);
334 link += class->size / sizeof(*link);
335 }
336 }
337
338 /*
339 * We now come to the last (full or partial) object on this
340 * page, which must point to the first object on the next
341 * page (if present)
342 */
343 next_page = get_next_page(page);
344 link->next = obj_location_to_handle(next_page, 0);
345 kunmap_atomic(link);
346 page = next_page;
347 off = (off + class->size) % PAGE_SIZE;
348 }
349}
350
351/*
352 * Allocate a zspage for the given size class
353 */
354static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
355{
356 int i, error;
357 struct page *first_page = NULL;
358
359 /*
360 * Allocate individual pages and link them together as:
361 * 1. first page->private = first sub-page
362 * 2. all sub-pages are linked together using page->lru
363 * 3. each sub-page is linked to the first page using page->first_page
364 *
365 * For each size class, First/Head pages are linked together using
366 * page->lru. Also, we set PG_private to identify the first page
367 * (i.e. no other sub-page has this flag set) and PG_private_2 to
368 * identify the last page.
369 */
370 error = -ENOMEM;
371 for (i = 0; i < class->pages_per_zspage; i++) {
372 struct page *page, *prev_page;
373
374 page = alloc_page(flags);
375 if (!page)
376 goto cleanup;
377
378 INIT_LIST_HEAD(&page->lru);
379 if (i == 0) { /* first page */
380 SetPagePrivate(page);
381 set_page_private(page, 0);
382 first_page = page;
383 first_page->inuse = 0;
384 }
385 if (i == 1)
386 first_page->private = (unsigned long)page;
387 if (i >= 1)
388 page->first_page = first_page;
389 if (i >= 2)
390 list_add(&page->lru, &prev_page->lru);
391 if (i == class->pages_per_zspage - 1) /* last page */
392 SetPagePrivate2(page);
393 prev_page = page;
394 }
395
396 init_zspage(first_page, class);
397
398 first_page->freelist = obj_location_to_handle(first_page, 0);
399 /* Maximum number of objects we can store in this zspage */
400 first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
401
402 error = 0; /* Success */
403
404cleanup:
405 if (unlikely(error) && first_page) {
406 free_zspage(first_page);
407 first_page = NULL;
408 }
409
410 return first_page;
411}
412
413static struct page *find_get_zspage(struct size_class *class)
414{
415 int i;
416 struct page *page;
417
418 for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
419 page = class->fullness_list[i];
420 if (page)
421 break;
422 }
423
424 return page;
425}
426
427
428static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
429 void *pcpu)
430{
431 int cpu = (long)pcpu;
432 struct mapping_area *area;
433
434 switch (action) {
435 case CPU_UP_PREPARE:
436 area = &per_cpu(zs_map_area, cpu);
437 if (area->vm)
438 break;
439 area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes);
440 if (!area->vm)
441 return notifier_from_errno(-ENOMEM);
442 break;
443 case CPU_DEAD:
444 case CPU_UP_CANCELED:
445 area = &per_cpu(zs_map_area, cpu);
446 if (area->vm)
447 free_vm_area(area->vm);
448 area->vm = NULL;
449 break;
450 }
451
452 return NOTIFY_OK;
453}
454
455static struct notifier_block zs_cpu_nb = {
456 .notifier_call = zs_cpu_notifier
457};
458
459static void zs_exit(void)
460{
461 int cpu;
462
463 for_each_online_cpu(cpu)
464 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
465 unregister_cpu_notifier(&zs_cpu_nb);
466}
467
468static int zs_init(void)
469{
470 int cpu, ret;
471
472 register_cpu_notifier(&zs_cpu_nb);
473 for_each_online_cpu(cpu) {
474 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
475 if (notifier_to_errno(ret))
476 goto fail;
477 }
478 return 0;
479fail:
480 zs_exit();
481 return notifier_to_errno(ret);
482}
483
484struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
485{
486 int i, ovhd_size;
487 struct zs_pool *pool;
488
489 if (!name)
490 return NULL;
491
492 ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
493 pool = kzalloc(ovhd_size, GFP_KERNEL);
494 if (!pool)
495 return NULL;
496
497 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
498 int size;
499 struct size_class *class;
500
501 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
502 if (size > ZS_MAX_ALLOC_SIZE)
503 size = ZS_MAX_ALLOC_SIZE;
504
505 class = &pool->size_class[i];
506 class->size = size;
507 class->index = i;
508 spin_lock_init(&class->lock);
509 class->pages_per_zspage = get_pages_per_zspage(size);
510
511 }
512
513 pool->flags = flags;
514 pool->name = name;
515
516 return pool;
517}
518EXPORT_SYMBOL_GPL(zs_create_pool);
519
520void zs_destroy_pool(struct zs_pool *pool)
521{
522 int i;
523
524 for (i = 0; i < ZS_SIZE_CLASSES; i++) {
525 int fg;
526 struct size_class *class = &pool->size_class[i];
527
528 for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
529 if (class->fullness_list[fg]) {
530 pr_info("Freeing non-empty class with size "
531 "%db, fullness group %d\n",
532 class->size, fg);
533 }
534 }
535 }
536 kfree(pool);
537}
538EXPORT_SYMBOL_GPL(zs_destroy_pool);
539
540/**
541 * zs_malloc - Allocate block of given size from pool.
542 * @pool: pool to allocate from
543 * @size: size of block to allocate
544 *
545 * On success, handle to the allocated object is returned,
546 * otherwise NULL.
547 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
548 */
549void *zs_malloc(struct zs_pool *pool, size_t size)
550{
551 void *obj;
552 struct link_free *link;
553 int class_idx;
554 struct size_class *class;
555
556 struct page *first_page, *m_page;
557 unsigned long m_objidx, m_offset;
558
559 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
560 return NULL;
561
562 class_idx = get_size_class_index(size);
563 class = &pool->size_class[class_idx];
564 BUG_ON(class_idx != class->index);
565
566 spin_lock(&class->lock);
567 first_page = find_get_zspage(class);
568
569 if (!first_page) {
570 spin_unlock(&class->lock);
571 first_page = alloc_zspage(class, pool->flags);
572 if (unlikely(!first_page))
573 return NULL;
574
575 set_zspage_mapping(first_page, class->index, ZS_EMPTY);
576 spin_lock(&class->lock);
577 class->pages_allocated += class->pages_per_zspage;
578 }
579
580 obj = first_page->freelist;
581 obj_handle_to_location(obj, &m_page, &m_objidx);
582 m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
583
584 link = (struct link_free *)kmap_atomic(m_page) +
585 m_offset / sizeof(*link);
586 first_page->freelist = link->next;
587 memset(link, POISON_INUSE, sizeof(*link));
588 kunmap_atomic(link);
589
590 first_page->inuse++;
591 /* Now move the zspage to another fullness group, if required */
592 fix_fullness_group(pool, first_page);
593 spin_unlock(&class->lock);
594
595 return obj;
596}
597EXPORT_SYMBOL_GPL(zs_malloc);
598
599void zs_free(struct zs_pool *pool, void *obj)
600{
601 struct link_free *link;
602 struct page *first_page, *f_page;
603 unsigned long f_objidx, f_offset;
604
605 int class_idx;
606 struct size_class *class;
607 enum fullness_group fullness;
608
609 if (unlikely(!obj))
610 return;
611
612 obj_handle_to_location(obj, &f_page, &f_objidx);
613 first_page = get_first_page(f_page);
614
615 get_zspage_mapping(first_page, &class_idx, &fullness);
616 class = &pool->size_class[class_idx];
617 f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
618
619 spin_lock(&class->lock);
620
621 /* Insert this object in containing zspage's freelist */
622 link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
623 + f_offset);
624 link->next = first_page->freelist;
625 kunmap_atomic(link);
626 first_page->freelist = obj;
627
628 first_page->inuse--;
629 fullness = fix_fullness_group(pool, first_page);
630
631 if (fullness == ZS_EMPTY)
632 class->pages_allocated -= class->pages_per_zspage;
633
634 spin_unlock(&class->lock);
635
636 if (fullness == ZS_EMPTY)
637 free_zspage(first_page);
638}
639EXPORT_SYMBOL_GPL(zs_free);
640
641/**
642 * zs_map_object - get address of allocated object from handle.
643 * @pool: pool from which the object was allocated
644 * @handle: handle returned from zs_malloc
645 *
646 * Before using an object allocated from zs_malloc, it must be mapped using
647 * this function. When done with the object, it must be unmapped using
648 * zs_unmap_object
649*/
650void *zs_map_object(struct zs_pool *pool, void *handle)
651{
652 struct page *page;
653 unsigned long obj_idx, off;
654
655 unsigned int class_idx;
656 enum fullness_group fg;
657 struct size_class *class;
658 struct mapping_area *area;
659
660 BUG_ON(!handle);
661
662 obj_handle_to_location(handle, &page, &obj_idx);
663 get_zspage_mapping(get_first_page(page), &class_idx, &fg);
664 class = &pool->size_class[class_idx];
665 off = obj_idx_to_offset(page, obj_idx, class->size);
666
667 area = &get_cpu_var(zs_map_area);
668 if (off + class->size <= PAGE_SIZE) {
669 /* this object is contained entirely within a page */
670 area->vm_addr = kmap_atomic(page);
671 } else {
672 /* this object spans two pages */
673 struct page *nextp;
674
675 nextp = get_next_page(page);
676 BUG_ON(!nextp);
677
678
679 set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL));
680 set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL));
681
682 /* We pre-allocated VM area so mapping can never fail */
683 area->vm_addr = area->vm->addr;
684 }
685
686 return area->vm_addr + off;
687}
688EXPORT_SYMBOL_GPL(zs_map_object);
689
690void zs_unmap_object(struct zs_pool *pool, void *handle)
691{
692 struct page *page;
693 unsigned long obj_idx, off;
694
695 unsigned int class_idx;
696 enum fullness_group fg;
697 struct size_class *class;
698 struct mapping_area *area;
699
700 BUG_ON(!handle);
701
702 obj_handle_to_location(handle, &page, &obj_idx);
703 get_zspage_mapping(get_first_page(page), &class_idx, &fg);
704 class = &pool->size_class[class_idx];
705 off = obj_idx_to_offset(page, obj_idx, class->size);
706
707 area = &__get_cpu_var(zs_map_area);
708 if (off + class->size <= PAGE_SIZE) {
709 kunmap_atomic(area->vm_addr);
710 } else {
711 set_pte(area->vm_ptes[0], __pte(0));
712 set_pte(area->vm_ptes[1], __pte(0));
713 __flush_tlb_one((unsigned long)area->vm_addr);
714 __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE);
715 }
716 put_cpu_var(zs_map_area);
717}
718EXPORT_SYMBOL_GPL(zs_unmap_object);
719
720u64 zs_get_total_size_bytes(struct zs_pool *pool)
721{
722 int i;
723 u64 npages = 0;
724
725 for (i = 0; i < ZS_SIZE_CLASSES; i++)
726 npages += pool->size_class[i].pages_allocated;
727
728 return npages << PAGE_SHIFT;
729}
730EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
731
732module_init(zs_init);
733module_exit(zs_exit);
734
735MODULE_LICENSE("Dual BSD/GPL");
736MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");