Loading...
1#include <stdlib.h>
2#include <string.h>
3#include <malloc.h>
4#include <pthread.h>
5#include <unistd.h>
6#include <assert.h>
7
8#include <linux/mempool.h>
9#include <linux/poison.h>
10#include <linux/slab.h>
11#include <linux/radix-tree.h>
12#include <urcu/uatomic.h>
13
14int nr_allocated;
15int preempt_count;
16
17struct kmem_cache {
18 pthread_mutex_t lock;
19 int size;
20 int nr_objs;
21 void *objs;
22 void (*ctor)(void *);
23};
24
25void *mempool_alloc(mempool_t *pool, int gfp_mask)
26{
27 return pool->alloc(gfp_mask, pool->data);
28}
29
30void mempool_free(void *element, mempool_t *pool)
31{
32 pool->free(element, pool->data);
33}
34
35mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
36 mempool_free_t *free_fn, void *pool_data)
37{
38 mempool_t *ret = malloc(sizeof(*ret));
39
40 ret->alloc = alloc_fn;
41 ret->free = free_fn;
42 ret->data = pool_data;
43 return ret;
44}
45
46void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
47{
48 struct radix_tree_node *node;
49
50 if (flags & __GFP_NOWARN)
51 return NULL;
52
53 pthread_mutex_lock(&cachep->lock);
54 if (cachep->nr_objs) {
55 cachep->nr_objs--;
56 node = cachep->objs;
57 cachep->objs = node->private_data;
58 pthread_mutex_unlock(&cachep->lock);
59 node->private_data = NULL;
60 } else {
61 pthread_mutex_unlock(&cachep->lock);
62 node = malloc(cachep->size);
63 if (cachep->ctor)
64 cachep->ctor(node);
65 }
66
67 uatomic_inc(&nr_allocated);
68 return node;
69}
70
71void kmem_cache_free(struct kmem_cache *cachep, void *objp)
72{
73 assert(objp);
74 uatomic_dec(&nr_allocated);
75 pthread_mutex_lock(&cachep->lock);
76 if (cachep->nr_objs > 10) {
77 memset(objp, POISON_FREE, cachep->size);
78 free(objp);
79 } else {
80 struct radix_tree_node *node = objp;
81 cachep->nr_objs++;
82 node->private_data = cachep->objs;
83 cachep->objs = node;
84 }
85 pthread_mutex_unlock(&cachep->lock);
86}
87
88void *kmalloc(size_t size, gfp_t gfp)
89{
90 void *ret = malloc(size);
91 uatomic_inc(&nr_allocated);
92 return ret;
93}
94
95void kfree(void *p)
96{
97 if (!p)
98 return;
99 uatomic_dec(&nr_allocated);
100 free(p);
101}
102
103struct kmem_cache *
104kmem_cache_create(const char *name, size_t size, size_t offset,
105 unsigned long flags, void (*ctor)(void *))
106{
107 struct kmem_cache *ret = malloc(sizeof(*ret));
108
109 pthread_mutex_init(&ret->lock, NULL);
110 ret->size = size;
111 ret->nr_objs = 0;
112 ret->objs = NULL;
113 ret->ctor = ctor;
114 return ret;
115}
1#include <stdlib.h>
2#include <string.h>
3#include <malloc.h>
4#include <unistd.h>
5#include <assert.h>
6
7#include <linux/mempool.h>
8#include <linux/slab.h>
9#include <urcu/uatomic.h>
10
11int nr_allocated;
12
13void *mempool_alloc(mempool_t *pool, int gfp_mask)
14{
15 return pool->alloc(gfp_mask, pool->data);
16}
17
18void mempool_free(void *element, mempool_t *pool)
19{
20 pool->free(element, pool->data);
21}
22
23mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
24 mempool_free_t *free_fn, void *pool_data)
25{
26 mempool_t *ret = malloc(sizeof(*ret));
27
28 ret->alloc = alloc_fn;
29 ret->free = free_fn;
30 ret->data = pool_data;
31 return ret;
32}
33
34void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
35{
36 void *ret = malloc(cachep->size);
37 if (cachep->ctor)
38 cachep->ctor(ret);
39 uatomic_inc(&nr_allocated);
40 return ret;
41}
42
43void kmem_cache_free(struct kmem_cache *cachep, void *objp)
44{
45 assert(objp);
46 uatomic_dec(&nr_allocated);
47 memset(objp, 0, cachep->size);
48 free(objp);
49}
50
51struct kmem_cache *
52kmem_cache_create(const char *name, size_t size, size_t offset,
53 unsigned long flags, void (*ctor)(void *))
54{
55 struct kmem_cache *ret = malloc(sizeof(*ret));
56
57 ret->size = size;
58 ret->ctor = ctor;
59 return ret;
60}