Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdlib.h>
  3#include <string.h>
  4#include <malloc.h>
  5#include <pthread.h>
  6#include <unistd.h>
  7#include <assert.h>
  8
  9#include <linux/gfp.h>
 10#include <linux/poison.h>
 11#include <linux/slab.h>
 12#include <linux/radix-tree.h>
 13#include <urcu/uatomic.h>
 14
 15int nr_allocated;
 16int preempt_count;
 17int test_verbose;
 18
 19struct kmem_cache {
 20	pthread_mutex_t lock;
 21	unsigned int size;
 22	unsigned int align;
 23	int nr_objs;
 24	void *objs;
 25	void (*ctor)(void *);
 26	unsigned int non_kernel;
 27	unsigned long nr_allocated;
 28	unsigned long nr_tallocated;
 29};
 30
 31void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
 32{
 33	cachep->non_kernel = val;
 34}
 35
 36unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
 37{
 38	return cachep->size * cachep->nr_allocated;
 39}
 40
 41unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
 42{
 43	return cachep->nr_allocated;
 44}
 45
 46unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
 47{
 48	return cachep->nr_tallocated;
 49}
 50
 51void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
 52{
 53	cachep->nr_tallocated = 0;
 54}
 55
 56void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
 57		int gfp)
 58{
 59	void *p;
 60
 61	if (!(gfp & __GFP_DIRECT_RECLAIM)) {
 62		if (!cachep->non_kernel)
 63			return NULL;
 64
 65		cachep->non_kernel--;
 66	}
 67
 68	pthread_mutex_lock(&cachep->lock);
 69	if (cachep->nr_objs) {
 70		struct radix_tree_node *node = cachep->objs;
 71		cachep->nr_objs--;
 72		cachep->objs = node->parent;
 73		pthread_mutex_unlock(&cachep->lock);
 74		node->parent = NULL;
 75		p = node;
 76	} else {
 77		pthread_mutex_unlock(&cachep->lock);
 78		if (cachep->align)
 79			posix_memalign(&p, cachep->align, cachep->size);
 80		else
 81			p = malloc(cachep->size);
 82		if (cachep->ctor)
 83			cachep->ctor(p);
 84		else if (gfp & __GFP_ZERO)
 85			memset(p, 0, cachep->size);
 86	}
 87
 88	uatomic_inc(&cachep->nr_allocated);
 89	uatomic_inc(&nr_allocated);
 90	uatomic_inc(&cachep->nr_tallocated);
 91	if (kmalloc_verbose)
 92		printf("Allocating %p from slab\n", p);
 93	return p;
 94}
 95
 96void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
 97{
 98	assert(objp);
 99	uatomic_dec(&nr_allocated);
100	uatomic_dec(&cachep->nr_allocated);
101	if (kmalloc_verbose)
102		printf("Freeing %p to slab\n", objp);
103	if (cachep->nr_objs > 10 || cachep->align) {
104		memset(objp, POISON_FREE, cachep->size);
105		free(objp);
106	} else {
107		struct radix_tree_node *node = objp;
108		cachep->nr_objs++;
109		node->parent = cachep->objs;
110		cachep->objs = node;
111	}
112}
113
 
 
 
 
 
 
 
 
 
114void kmem_cache_free(struct kmem_cache *cachep, void *objp)
115{
116	pthread_mutex_lock(&cachep->lock);
117	kmem_cache_free_locked(cachep, objp);
118	pthread_mutex_unlock(&cachep->lock);
119}
120
121void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
122{
123	if (kmalloc_verbose)
124		pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
125
126	pthread_mutex_lock(&cachep->lock);
127	for (int i = 0; i < size; i++)
128		kmem_cache_free_locked(cachep, list[i]);
129	pthread_mutex_unlock(&cachep->lock);
130}
131
132void kmem_cache_shrink(struct kmem_cache *cachep)
133{
134}
135
136int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
137			  void **p)
138{
139	size_t i;
140
141	if (kmalloc_verbose)
142		pr_debug("Bulk alloc %lu\n", size);
143
144	if (!(gfp & __GFP_DIRECT_RECLAIM)) {
145		if (cachep->non_kernel < size)
146			return 0;
147
148		cachep->non_kernel -= size;
149	}
150
151	pthread_mutex_lock(&cachep->lock);
152	if (cachep->nr_objs >= size) {
153		struct radix_tree_node *node;
154
155		for (i = 0; i < size; i++) {
 
 
 
 
 
 
156			node = cachep->objs;
157			cachep->nr_objs--;
158			cachep->objs = node->parent;
159			p[i] = node;
160			node->parent = NULL;
161		}
162		pthread_mutex_unlock(&cachep->lock);
163	} else {
164		pthread_mutex_unlock(&cachep->lock);
165		for (i = 0; i < size; i++) {
 
 
 
 
 
 
166			if (cachep->align) {
167				posix_memalign(&p[i], cachep->align,
168					       cachep->size * size);
169			} else {
170				p[i] = malloc(cachep->size * size);
 
 
171			}
172			if (cachep->ctor)
173				cachep->ctor(p[i]);
174			else if (gfp & __GFP_ZERO)
175				memset(p[i], 0, cachep->size);
176		}
 
 
 
 
 
 
 
 
 
177	}
178
179	for (i = 0; i < size; i++) {
180		uatomic_inc(&nr_allocated);
181		uatomic_inc(&cachep->nr_allocated);
182		uatomic_inc(&cachep->nr_tallocated);
183		if (kmalloc_verbose)
184			printf("Allocating %p from slab\n", p[i]);
185	}
186
187	return size;
188}
189
190struct kmem_cache *
191kmem_cache_create(const char *name, unsigned int size, unsigned int align,
192		unsigned int flags, void (*ctor)(void *))
193{
194	struct kmem_cache *ret = malloc(sizeof(*ret));
195
196	pthread_mutex_init(&ret->lock, NULL);
197	ret->size = size;
198	ret->align = align;
199	ret->nr_objs = 0;
200	ret->nr_allocated = 0;
201	ret->nr_tallocated = 0;
202	ret->objs = NULL;
203	ret->ctor = ctor;
204	ret->non_kernel = 0;
205	return ret;
206}
207
208/*
209 * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
210 */
211void test_kmem_cache_bulk(void)
212{
213	int i;
214	void *list[12];
215	static struct kmem_cache *test_cache, *test_cache2;
216
217	/*
218	 * Testing the bulk allocators without aligned kmem_cache to force the
219	 * bulk alloc/free to reuse
220	 */
221	test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
222
223	for (i = 0; i < 5; i++)
224		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
225
226	for (i = 0; i < 5; i++)
227		kmem_cache_free(test_cache, list[i]);
228	assert(test_cache->nr_objs == 5);
229
230	kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
231	kmem_cache_free_bulk(test_cache, 5, list);
232
233	for (i = 0; i < 12 ; i++)
234		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
235
236	for (i = 0; i < 12; i++)
237		kmem_cache_free(test_cache, list[i]);
238
239	/* The last free will not be kept around */
240	assert(test_cache->nr_objs == 11);
241
242	/* Aligned caches will immediately free */
243	test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
244
245	kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
246	kmem_cache_free_bulk(test_cache2, 10, list);
247	assert(!test_cache2->nr_objs);
248
249
250}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdlib.h>
  3#include <string.h>
  4#include <malloc.h>
  5#include <pthread.h>
  6#include <unistd.h>
  7#include <assert.h>
  8
  9#include <linux/gfp.h>
 10#include <linux/poison.h>
 11#include <linux/slab.h>
 12#include <linux/radix-tree.h>
 13#include <urcu/uatomic.h>
 14
 15int nr_allocated;
 16int preempt_count;
 17int test_verbose;
 18
 19struct kmem_cache {
 20	pthread_mutex_t lock;
 21	unsigned int size;
 22	unsigned int align;
 23	int nr_objs;
 24	void *objs;
 25	void (*ctor)(void *);
 26	unsigned int non_kernel;
 27	unsigned long nr_allocated;
 28	unsigned long nr_tallocated;
 29};
 30
 31void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
 32{
 33	cachep->non_kernel = val;
 34}
 35
 36unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
 37{
 38	return cachep->size * cachep->nr_allocated;
 39}
 40
 41unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
 42{
 43	return cachep->nr_allocated;
 44}
 45
 46unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
 47{
 48	return cachep->nr_tallocated;
 49}
 50
 51void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
 52{
 53	cachep->nr_tallocated = 0;
 54}
 55
 56void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
 57		int gfp)
 58{
 59	void *p;
 60
 61	if (!(gfp & __GFP_DIRECT_RECLAIM)) {
 62		if (!cachep->non_kernel)
 63			return NULL;
 64
 65		cachep->non_kernel--;
 66	}
 67
 68	pthread_mutex_lock(&cachep->lock);
 69	if (cachep->nr_objs) {
 70		struct radix_tree_node *node = cachep->objs;
 71		cachep->nr_objs--;
 72		cachep->objs = node->parent;
 73		pthread_mutex_unlock(&cachep->lock);
 74		node->parent = NULL;
 75		p = node;
 76	} else {
 77		pthread_mutex_unlock(&cachep->lock);
 78		if (cachep->align)
 79			posix_memalign(&p, cachep->align, cachep->size);
 80		else
 81			p = malloc(cachep->size);
 82		if (cachep->ctor)
 83			cachep->ctor(p);
 84		else if (gfp & __GFP_ZERO)
 85			memset(p, 0, cachep->size);
 86	}
 87
 88	uatomic_inc(&cachep->nr_allocated);
 89	uatomic_inc(&nr_allocated);
 90	uatomic_inc(&cachep->nr_tallocated);
 91	if (kmalloc_verbose)
 92		printf("Allocating %p from slab\n", p);
 93	return p;
 94}
 95
 96void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
 97{
 98	assert(objp);
 
 
 
 
 99	if (cachep->nr_objs > 10 || cachep->align) {
100		memset(objp, POISON_FREE, cachep->size);
101		free(objp);
102	} else {
103		struct radix_tree_node *node = objp;
104		cachep->nr_objs++;
105		node->parent = cachep->objs;
106		cachep->objs = node;
107	}
108}
109
110void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
111{
112	uatomic_dec(&nr_allocated);
113	uatomic_dec(&cachep->nr_allocated);
114	if (kmalloc_verbose)
115		printf("Freeing %p to slab\n", objp);
116	__kmem_cache_free_locked(cachep, objp);
117}
118
119void kmem_cache_free(struct kmem_cache *cachep, void *objp)
120{
121	pthread_mutex_lock(&cachep->lock);
122	kmem_cache_free_locked(cachep, objp);
123	pthread_mutex_unlock(&cachep->lock);
124}
125
126void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
127{
128	if (kmalloc_verbose)
129		pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
130
131	pthread_mutex_lock(&cachep->lock);
132	for (int i = 0; i < size; i++)
133		kmem_cache_free_locked(cachep, list[i]);
134	pthread_mutex_unlock(&cachep->lock);
135}
136
137void kmem_cache_shrink(struct kmem_cache *cachep)
138{
139}
140
141int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
142			  void **p)
143{
144	size_t i;
145
146	if (kmalloc_verbose)
147		pr_debug("Bulk alloc %lu\n", size);
148
 
 
 
 
 
 
 
149	pthread_mutex_lock(&cachep->lock);
150	if (cachep->nr_objs >= size) {
151		struct radix_tree_node *node;
152
153		for (i = 0; i < size; i++) {
154			if (!(gfp & __GFP_DIRECT_RECLAIM)) {
155				if (!cachep->non_kernel)
156					break;
157				cachep->non_kernel--;
158			}
159
160			node = cachep->objs;
161			cachep->nr_objs--;
162			cachep->objs = node->parent;
163			p[i] = node;
164			node->parent = NULL;
165		}
166		pthread_mutex_unlock(&cachep->lock);
167	} else {
168		pthread_mutex_unlock(&cachep->lock);
169		for (i = 0; i < size; i++) {
170			if (!(gfp & __GFP_DIRECT_RECLAIM)) {
171				if (!cachep->non_kernel)
172					break;
173				cachep->non_kernel--;
174			}
175
176			if (cachep->align) {
177				posix_memalign(&p[i], cachep->align,
178					       cachep->size);
179			} else {
180				p[i] = malloc(cachep->size);
181				if (!p[i])
182					break;
183			}
184			if (cachep->ctor)
185				cachep->ctor(p[i]);
186			else if (gfp & __GFP_ZERO)
187				memset(p[i], 0, cachep->size);
188		}
189	}
190
191	if (i < size) {
192		size = i;
193		pthread_mutex_lock(&cachep->lock);
194		for (i = 0; i < size; i++)
195			__kmem_cache_free_locked(cachep, p[i]);
196		pthread_mutex_unlock(&cachep->lock);
197		return 0;
198	}
199
200	for (i = 0; i < size; i++) {
201		uatomic_inc(&nr_allocated);
202		uatomic_inc(&cachep->nr_allocated);
203		uatomic_inc(&cachep->nr_tallocated);
204		if (kmalloc_verbose)
205			printf("Allocating %p from slab\n", p[i]);
206	}
207
208	return size;
209}
210
211struct kmem_cache *
212kmem_cache_create(const char *name, unsigned int size, unsigned int align,
213		unsigned int flags, void (*ctor)(void *))
214{
215	struct kmem_cache *ret = malloc(sizeof(*ret));
216
217	pthread_mutex_init(&ret->lock, NULL);
218	ret->size = size;
219	ret->align = align;
220	ret->nr_objs = 0;
221	ret->nr_allocated = 0;
222	ret->nr_tallocated = 0;
223	ret->objs = NULL;
224	ret->ctor = ctor;
225	ret->non_kernel = 0;
226	return ret;
227}
228
229/*
230 * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
231 */
232void test_kmem_cache_bulk(void)
233{
234	int i;
235	void *list[12];
236	static struct kmem_cache *test_cache, *test_cache2;
237
238	/*
239	 * Testing the bulk allocators without aligned kmem_cache to force the
240	 * bulk alloc/free to reuse
241	 */
242	test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
243
244	for (i = 0; i < 5; i++)
245		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
246
247	for (i = 0; i < 5; i++)
248		kmem_cache_free(test_cache, list[i]);
249	assert(test_cache->nr_objs == 5);
250
251	kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
252	kmem_cache_free_bulk(test_cache, 5, list);
253
254	for (i = 0; i < 12 ; i++)
255		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
256
257	for (i = 0; i < 12; i++)
258		kmem_cache_free(test_cache, list[i]);
259
260	/* The last free will not be kept around */
261	assert(test_cache->nr_objs == 11);
262
263	/* Aligned caches will immediately free */
264	test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
265
266	kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
267	kmem_cache_free_bulk(test_cache2, 10, list);
268	assert(!test_cache2->nr_objs);
269
270
271}