Loading...
1#include <stdlib.h>
2#include <string.h>
3#include <malloc.h>
4#include <pthread.h>
5#include <unistd.h>
6#include <assert.h>
7
8#include <linux/mempool.h>
9#include <linux/poison.h>
10#include <linux/slab.h>
11#include <linux/radix-tree.h>
12#include <urcu/uatomic.h>
13
14int nr_allocated;
15int preempt_count;
16
17struct kmem_cache {
18 pthread_mutex_t lock;
19 int size;
20 int nr_objs;
21 void *objs;
22 void (*ctor)(void *);
23};
24
25void *mempool_alloc(mempool_t *pool, int gfp_mask)
26{
27 return pool->alloc(gfp_mask, pool->data);
28}
29
30void mempool_free(void *element, mempool_t *pool)
31{
32 pool->free(element, pool->data);
33}
34
35mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
36 mempool_free_t *free_fn, void *pool_data)
37{
38 mempool_t *ret = malloc(sizeof(*ret));
39
40 ret->alloc = alloc_fn;
41 ret->free = free_fn;
42 ret->data = pool_data;
43 return ret;
44}
45
46void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
47{
48 struct radix_tree_node *node;
49
50 if (flags & __GFP_NOWARN)
51 return NULL;
52
53 pthread_mutex_lock(&cachep->lock);
54 if (cachep->nr_objs) {
55 cachep->nr_objs--;
56 node = cachep->objs;
57 cachep->objs = node->private_data;
58 pthread_mutex_unlock(&cachep->lock);
59 node->private_data = NULL;
60 } else {
61 pthread_mutex_unlock(&cachep->lock);
62 node = malloc(cachep->size);
63 if (cachep->ctor)
64 cachep->ctor(node);
65 }
66
67 uatomic_inc(&nr_allocated);
68 return node;
69}
70
71void kmem_cache_free(struct kmem_cache *cachep, void *objp)
72{
73 assert(objp);
74 uatomic_dec(&nr_allocated);
75 pthread_mutex_lock(&cachep->lock);
76 if (cachep->nr_objs > 10) {
77 memset(objp, POISON_FREE, cachep->size);
78 free(objp);
79 } else {
80 struct radix_tree_node *node = objp;
81 cachep->nr_objs++;
82 node->private_data = cachep->objs;
83 cachep->objs = node;
84 }
85 pthread_mutex_unlock(&cachep->lock);
86}
87
88void *kmalloc(size_t size, gfp_t gfp)
89{
90 void *ret = malloc(size);
91 uatomic_inc(&nr_allocated);
92 return ret;
93}
94
95void kfree(void *p)
96{
97 if (!p)
98 return;
99 uatomic_dec(&nr_allocated);
100 free(p);
101}
102
103struct kmem_cache *
104kmem_cache_create(const char *name, size_t size, size_t offset,
105 unsigned long flags, void (*ctor)(void *))
106{
107 struct kmem_cache *ret = malloc(sizeof(*ret));
108
109 pthread_mutex_init(&ret->lock, NULL);
110 ret->size = size;
111 ret->nr_objs = 0;
112 ret->objs = NULL;
113 ret->ctor = ctor;
114 return ret;
115}
1// SPDX-License-Identifier: GPL-2.0
2#include <stdlib.h>
3#include <string.h>
4#include <malloc.h>
5#include <pthread.h>
6#include <unistd.h>
7#include <assert.h>
8
9#include <linux/gfp.h>
10#include <linux/poison.h>
11#include <linux/slab.h>
12#include <linux/radix-tree.h>
13#include <urcu/uatomic.h>
14
15int nr_allocated;
16int preempt_count;
17int kmalloc_verbose;
18int test_verbose;
19
20struct kmem_cache {
21 pthread_mutex_t lock;
22 int size;
23 int nr_objs;
24 void *objs;
25 void (*ctor)(void *);
26};
27
28void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
29{
30 struct radix_tree_node *node;
31
32 if (!(flags & __GFP_DIRECT_RECLAIM))
33 return NULL;
34
35 pthread_mutex_lock(&cachep->lock);
36 if (cachep->nr_objs) {
37 cachep->nr_objs--;
38 node = cachep->objs;
39 cachep->objs = node->parent;
40 pthread_mutex_unlock(&cachep->lock);
41 node->parent = NULL;
42 } else {
43 pthread_mutex_unlock(&cachep->lock);
44 node = malloc(cachep->size);
45 if (cachep->ctor)
46 cachep->ctor(node);
47 }
48
49 uatomic_inc(&nr_allocated);
50 if (kmalloc_verbose)
51 printf("Allocating %p from slab\n", node);
52 return node;
53}
54
55void kmem_cache_free(struct kmem_cache *cachep, void *objp)
56{
57 assert(objp);
58 uatomic_dec(&nr_allocated);
59 if (kmalloc_verbose)
60 printf("Freeing %p to slab\n", objp);
61 pthread_mutex_lock(&cachep->lock);
62 if (cachep->nr_objs > 10) {
63 memset(objp, POISON_FREE, cachep->size);
64 free(objp);
65 } else {
66 struct radix_tree_node *node = objp;
67 cachep->nr_objs++;
68 node->parent = cachep->objs;
69 cachep->objs = node;
70 }
71 pthread_mutex_unlock(&cachep->lock);
72}
73
74void *kmalloc(size_t size, gfp_t gfp)
75{
76 void *ret;
77
78 if (!(gfp & __GFP_DIRECT_RECLAIM))
79 return NULL;
80
81 ret = malloc(size);
82 uatomic_inc(&nr_allocated);
83 if (kmalloc_verbose)
84 printf("Allocating %p from malloc\n", ret);
85 if (gfp & __GFP_ZERO)
86 memset(ret, 0, size);
87 return ret;
88}
89
90void kfree(void *p)
91{
92 if (!p)
93 return;
94 uatomic_dec(&nr_allocated);
95 if (kmalloc_verbose)
96 printf("Freeing %p to malloc\n", p);
97 free(p);
98}
99
100struct kmem_cache *
101kmem_cache_create(const char *name, size_t size, size_t offset,
102 unsigned long flags, void (*ctor)(void *))
103{
104 struct kmem_cache *ret = malloc(sizeof(*ret));
105
106 pthread_mutex_init(&ret->lock, NULL);
107 ret->size = size;
108 ret->nr_objs = 0;
109 ret->objs = NULL;
110 ret->ctor = ctor;
111 return ret;
112}