Loading...
1#include <stdlib.h>
2#include <string.h>
3#include <malloc.h>
4#include <pthread.h>
5#include <unistd.h>
6#include <assert.h>
7
8#include <linux/mempool.h>
9#include <linux/poison.h>
10#include <linux/slab.h>
11#include <linux/radix-tree.h>
12#include <urcu/uatomic.h>
13
14int nr_allocated;
15int preempt_count;
16
17struct kmem_cache {
18 pthread_mutex_t lock;
19 int size;
20 int nr_objs;
21 void *objs;
22 void (*ctor)(void *);
23};
24
25void *mempool_alloc(mempool_t *pool, int gfp_mask)
26{
27 return pool->alloc(gfp_mask, pool->data);
28}
29
30void mempool_free(void *element, mempool_t *pool)
31{
32 pool->free(element, pool->data);
33}
34
35mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
36 mempool_free_t *free_fn, void *pool_data)
37{
38 mempool_t *ret = malloc(sizeof(*ret));
39
40 ret->alloc = alloc_fn;
41 ret->free = free_fn;
42 ret->data = pool_data;
43 return ret;
44}
45
46void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
47{
48 struct radix_tree_node *node;
49
50 if (flags & __GFP_NOWARN)
51 return NULL;
52
53 pthread_mutex_lock(&cachep->lock);
54 if (cachep->nr_objs) {
55 cachep->nr_objs--;
56 node = cachep->objs;
57 cachep->objs = node->private_data;
58 pthread_mutex_unlock(&cachep->lock);
59 node->private_data = NULL;
60 } else {
61 pthread_mutex_unlock(&cachep->lock);
62 node = malloc(cachep->size);
63 if (cachep->ctor)
64 cachep->ctor(node);
65 }
66
67 uatomic_inc(&nr_allocated);
68 return node;
69}
70
71void kmem_cache_free(struct kmem_cache *cachep, void *objp)
72{
73 assert(objp);
74 uatomic_dec(&nr_allocated);
75 pthread_mutex_lock(&cachep->lock);
76 if (cachep->nr_objs > 10) {
77 memset(objp, POISON_FREE, cachep->size);
78 free(objp);
79 } else {
80 struct radix_tree_node *node = objp;
81 cachep->nr_objs++;
82 node->private_data = cachep->objs;
83 cachep->objs = node;
84 }
85 pthread_mutex_unlock(&cachep->lock);
86}
87
88void *kmalloc(size_t size, gfp_t gfp)
89{
90 void *ret = malloc(size);
91 uatomic_inc(&nr_allocated);
92 return ret;
93}
94
95void kfree(void *p)
96{
97 if (!p)
98 return;
99 uatomic_dec(&nr_allocated);
100 free(p);
101}
102
103struct kmem_cache *
104kmem_cache_create(const char *name, size_t size, size_t offset,
105 unsigned long flags, void (*ctor)(void *))
106{
107 struct kmem_cache *ret = malloc(sizeof(*ret));
108
109 pthread_mutex_init(&ret->lock, NULL);
110 ret->size = size;
111 ret->nr_objs = 0;
112 ret->objs = NULL;
113 ret->ctor = ctor;
114 return ret;
115}
1// SPDX-License-Identifier: GPL-2.0
2#include <stdlib.h>
3#include <string.h>
4#include <malloc.h>
5#include <pthread.h>
6#include <unistd.h>
7#include <assert.h>
8
9#include <linux/gfp.h>
10#include <linux/poison.h>
11#include <linux/slab.h>
12#include <linux/radix-tree.h>
13#include <urcu/uatomic.h>
14
15int nr_allocated;
16int preempt_count;
17int kmalloc_verbose;
18int test_verbose;
19
20struct kmem_cache {
21 pthread_mutex_t lock;
22 unsigned int size;
23 unsigned int align;
24 int nr_objs;
25 void *objs;
26 void (*ctor)(void *);
27};
28
29void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
30{
31 void *p;
32
33 if (!(gfp & __GFP_DIRECT_RECLAIM))
34 return NULL;
35
36 pthread_mutex_lock(&cachep->lock);
37 if (cachep->nr_objs) {
38 struct radix_tree_node *node = cachep->objs;
39 cachep->nr_objs--;
40 cachep->objs = node->parent;
41 pthread_mutex_unlock(&cachep->lock);
42 node->parent = NULL;
43 p = node;
44 } else {
45 pthread_mutex_unlock(&cachep->lock);
46 if (cachep->align)
47 posix_memalign(&p, cachep->align, cachep->size);
48 else
49 p = malloc(cachep->size);
50 if (cachep->ctor)
51 cachep->ctor(p);
52 else if (gfp & __GFP_ZERO)
53 memset(p, 0, cachep->size);
54 }
55
56 uatomic_inc(&nr_allocated);
57 if (kmalloc_verbose)
58 printf("Allocating %p from slab\n", p);
59 return p;
60}
61
62void kmem_cache_free(struct kmem_cache *cachep, void *objp)
63{
64 assert(objp);
65 uatomic_dec(&nr_allocated);
66 if (kmalloc_verbose)
67 printf("Freeing %p to slab\n", objp);
68 pthread_mutex_lock(&cachep->lock);
69 if (cachep->nr_objs > 10 || cachep->align) {
70 memset(objp, POISON_FREE, cachep->size);
71 free(objp);
72 } else {
73 struct radix_tree_node *node = objp;
74 cachep->nr_objs++;
75 node->parent = cachep->objs;
76 cachep->objs = node;
77 }
78 pthread_mutex_unlock(&cachep->lock);
79}
80
81void *kmalloc(size_t size, gfp_t gfp)
82{
83 void *ret;
84
85 if (!(gfp & __GFP_DIRECT_RECLAIM))
86 return NULL;
87
88 ret = malloc(size);
89 uatomic_inc(&nr_allocated);
90 if (kmalloc_verbose)
91 printf("Allocating %p from malloc\n", ret);
92 if (gfp & __GFP_ZERO)
93 memset(ret, 0, size);
94 return ret;
95}
96
97void kfree(void *p)
98{
99 if (!p)
100 return;
101 uatomic_dec(&nr_allocated);
102 if (kmalloc_verbose)
103 printf("Freeing %p to malloc\n", p);
104 free(p);
105}
106
107struct kmem_cache *
108kmem_cache_create(const char *name, unsigned int size, unsigned int align,
109 unsigned int flags, void (*ctor)(void *))
110{
111 struct kmem_cache *ret = malloc(sizeof(*ret));
112
113 pthread_mutex_init(&ret->lock, NULL);
114 ret->size = size;
115 ret->align = align;
116 ret->nr_objs = 0;
117 ret->objs = NULL;
118 ret->ctor = ctor;
119 return ret;
120}