Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdlib.h>
  3#include <string.h>
  4#include <malloc.h>
  5#include <pthread.h>
  6#include <unistd.h>
  7#include <assert.h>
  8
  9#include <linux/gfp.h>
 10#include <linux/poison.h>
 11#include <linux/slab.h>
 12#include <linux/radix-tree.h>
 13#include <urcu/uatomic.h>
 14
 15int nr_allocated;
 16int preempt_count;
 17int kmalloc_verbose;
 18int test_verbose;
 19
 20struct kmem_cache {
 21	pthread_mutex_t lock;
 22	int size;
 
 23	int nr_objs;
 24	void *objs;
 25	void (*ctor)(void *);
 26};
 27
 28void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
 29{
 30	struct radix_tree_node *node;
 31
 32	if (!(flags & __GFP_DIRECT_RECLAIM))
 33		return NULL;
 34
 35	pthread_mutex_lock(&cachep->lock);
 36	if (cachep->nr_objs) {
 
 37		cachep->nr_objs--;
 38		node = cachep->objs;
 39		cachep->objs = node->parent;
 40		pthread_mutex_unlock(&cachep->lock);
 41		node->parent = NULL;
 
 42	} else {
 43		pthread_mutex_unlock(&cachep->lock);
 44		node = malloc(cachep->size);
 
 
 
 45		if (cachep->ctor)
 46			cachep->ctor(node);
 
 
 47	}
 48
 49	uatomic_inc(&nr_allocated);
 50	if (kmalloc_verbose)
 51		printf("Allocating %p from slab\n", node);
 52	return node;
 53}
 54
 55void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 56{
 57	assert(objp);
 58	uatomic_dec(&nr_allocated);
 59	if (kmalloc_verbose)
 60		printf("Freeing %p to slab\n", objp);
 61	pthread_mutex_lock(&cachep->lock);
 62	if (cachep->nr_objs > 10) {
 63		memset(objp, POISON_FREE, cachep->size);
 64		free(objp);
 65	} else {
 66		struct radix_tree_node *node = objp;
 67		cachep->nr_objs++;
 68		node->parent = cachep->objs;
 69		cachep->objs = node;
 70	}
 71	pthread_mutex_unlock(&cachep->lock);
 72}
 73
 74void *kmalloc(size_t size, gfp_t gfp)
 75{
 76	void *ret;
 77
 78	if (!(gfp & __GFP_DIRECT_RECLAIM))
 79		return NULL;
 80
 81	ret = malloc(size);
 82	uatomic_inc(&nr_allocated);
 83	if (kmalloc_verbose)
 84		printf("Allocating %p from malloc\n", ret);
 85	if (gfp & __GFP_ZERO)
 86		memset(ret, 0, size);
 87	return ret;
 88}
 89
 90void kfree(void *p)
 91{
 92	if (!p)
 93		return;
 94	uatomic_dec(&nr_allocated);
 95	if (kmalloc_verbose)
 96		printf("Freeing %p to malloc\n", p);
 97	free(p);
 98}
 99
100struct kmem_cache *
101kmem_cache_create(const char *name, size_t size, size_t offset,
102	unsigned long flags, void (*ctor)(void *))
103{
104	struct kmem_cache *ret = malloc(sizeof(*ret));
105
106	pthread_mutex_init(&ret->lock, NULL);
107	ret->size = size;
 
108	ret->nr_objs = 0;
109	ret->objs = NULL;
110	ret->ctor = ctor;
111	return ret;
112}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdlib.h>
  3#include <string.h>
  4#include <malloc.h>
  5#include <pthread.h>
  6#include <unistd.h>
  7#include <assert.h>
  8
  9#include <linux/gfp.h>
 10#include <linux/poison.h>
 11#include <linux/slab.h>
 12#include <linux/radix-tree.h>
 13#include <urcu/uatomic.h>
 14
 15int nr_allocated;
 16int preempt_count;
 17int kmalloc_verbose;
 18int test_verbose;
 19
 20struct kmem_cache {
 21	pthread_mutex_t lock;
 22	unsigned int size;
 23	unsigned int align;
 24	int nr_objs;
 25	void *objs;
 26	void (*ctor)(void *);
 27};
 28
 29void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
 30{
 31	void *p;
 32
 33	if (!(gfp & __GFP_DIRECT_RECLAIM))
 34		return NULL;
 35
 36	pthread_mutex_lock(&cachep->lock);
 37	if (cachep->nr_objs) {
 38		struct radix_tree_node *node = cachep->objs;
 39		cachep->nr_objs--;
 
 40		cachep->objs = node->parent;
 41		pthread_mutex_unlock(&cachep->lock);
 42		node->parent = NULL;
 43		p = node;
 44	} else {
 45		pthread_mutex_unlock(&cachep->lock);
 46		if (cachep->align)
 47			posix_memalign(&p, cachep->align, cachep->size);
 48		else
 49			p = malloc(cachep->size);
 50		if (cachep->ctor)
 51			cachep->ctor(p);
 52		else if (gfp & __GFP_ZERO)
 53			memset(p, 0, cachep->size);
 54	}
 55
 56	uatomic_inc(&nr_allocated);
 57	if (kmalloc_verbose)
 58		printf("Allocating %p from slab\n", p);
 59	return p;
 60}
 61
 62void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 63{
 64	assert(objp);
 65	uatomic_dec(&nr_allocated);
 66	if (kmalloc_verbose)
 67		printf("Freeing %p to slab\n", objp);
 68	pthread_mutex_lock(&cachep->lock);
 69	if (cachep->nr_objs > 10 || cachep->align) {
 70		memset(objp, POISON_FREE, cachep->size);
 71		free(objp);
 72	} else {
 73		struct radix_tree_node *node = objp;
 74		cachep->nr_objs++;
 75		node->parent = cachep->objs;
 76		cachep->objs = node;
 77	}
 78	pthread_mutex_unlock(&cachep->lock);
 79}
 80
 81void *kmalloc(size_t size, gfp_t gfp)
 82{
 83	void *ret;
 84
 85	if (!(gfp & __GFP_DIRECT_RECLAIM))
 86		return NULL;
 87
 88	ret = malloc(size);
 89	uatomic_inc(&nr_allocated);
 90	if (kmalloc_verbose)
 91		printf("Allocating %p from malloc\n", ret);
 92	if (gfp & __GFP_ZERO)
 93		memset(ret, 0, size);
 94	return ret;
 95}
 96
 97void kfree(void *p)
 98{
 99	if (!p)
100		return;
101	uatomic_dec(&nr_allocated);
102	if (kmalloc_verbose)
103		printf("Freeing %p to malloc\n", p);
104	free(p);
105}
106
107struct kmem_cache *
108kmem_cache_create(const char *name, unsigned int size, unsigned int align,
109		unsigned int flags, void (*ctor)(void *))
110{
111	struct kmem_cache *ret = malloc(sizeof(*ret));
112
113	pthread_mutex_init(&ret->lock, NULL);
114	ret->size = size;
115	ret->align = align;
116	ret->nr_objs = 0;
117	ret->objs = NULL;
118	ret->ctor = ctor;
119	return ret;
120}