Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/objpool.h>
  4#include <linux/slab.h>
  5#include <linux/vmalloc.h>
  6#include <linux/atomic.h>
  7#include <linux/irqflags.h>
  8#include <linux/cpumask.h>
  9#include <linux/log2.h>
 10
 11/*
 12 * objpool: ring-array based lockless MPMC/FIFO queues
 13 *
 14 * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
 15 */
 16
 17/* initialize percpu objpool_slot */
 18static int
 19objpool_init_percpu_slot(struct objpool_head *pool,
 20			 struct objpool_slot *slot,
 21			 int nodes, void *context,
 22			 objpool_init_obj_cb objinit)
 23{
 24	void *obj = (void *)&slot->entries[pool->capacity];
 25	int i;
 26
 27	/* initialize elements of percpu objpool_slot */
 28	slot->mask = pool->capacity - 1;
 29
 30	for (i = 0; i < nodes; i++) {
 31		if (objinit) {
 32			int rc = objinit(obj, context);
 33			if (rc)
 34				return rc;
 35		}
 36		slot->entries[slot->tail & slot->mask] = obj;
 37		obj = obj + pool->obj_size;
 38		slot->tail++;
 39		slot->last = slot->tail;
 40		pool->nr_objs++;
 41	}
 42
 43	return 0;
 44}
 45
 46/* allocate and initialize percpu slots */
 47static int
 48objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
 49			  void *context, objpool_init_obj_cb objinit)
 50{
 51	int i, cpu_count = 0;
 52
 53	for (i = 0; i < nr_cpu_ids; i++) {
 54
 55		struct objpool_slot *slot;
 56		int nodes, size, rc;
 57
 58		/* skip the cpu node which could never be present */
 59		if (!cpu_possible(i))
 60			continue;
 61
 62		/* compute how many objects to be allocated with this slot */
 63		nodes = nr_objs / pool->nr_possible_cpus;
 64		if (cpu_count < (nr_objs % pool->nr_possible_cpus))
 65			nodes++;
 66		cpu_count++;
 67
 68		size = struct_size(slot, entries, pool->capacity) +
 69			pool->obj_size * nodes;
 70
 71		/*
 72		 * here we allocate percpu-slot & objs together in a single
 73		 * allocation to make it more compact, taking advantage of
 74		 * warm caches and TLB hits. in default vmalloc is used to
 75		 * reduce the pressure of kernel slab system. as we know,
 76		 * mimimal size of vmalloc is one page since vmalloc would
 77		 * always align the requested size to page size.
 78		 * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC)
 79		 * allocate percpu slot with kmalloc.
 80		 */
 81		slot = NULL;
 82
 83		if ((pool->gfp & (GFP_ATOMIC | GFP_KERNEL)) != GFP_ATOMIC)
 84			slot = __vmalloc_node(size, sizeof(void *), pool->gfp,
 85				cpu_to_node(i), __builtin_return_address(0));
 86
 87		if (!slot) {
 88			slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
 89			if (!slot)
 90				return -ENOMEM;
 91		}
 92		memset(slot, 0, size);
 93		pool->cpu_slots[i] = slot;
 94
 95		/* initialize the objpool_slot of cpu node i */
 96		rc = objpool_init_percpu_slot(pool, slot, nodes, context, objinit);
 97		if (rc)
 98			return rc;
 99	}
100
101	return 0;
102}
103
104/* cleanup all percpu slots of the object pool */
105static void objpool_fini_percpu_slots(struct objpool_head *pool)
106{
107	int i;
108
109	if (!pool->cpu_slots)
110		return;
111
112	for (i = 0; i < nr_cpu_ids; i++)
113		kvfree(pool->cpu_slots[i]);
114	kfree(pool->cpu_slots);
115}
116
117/* initialize object pool and pre-allocate objects */
118int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
119		gfp_t gfp, void *context, objpool_init_obj_cb objinit,
120		objpool_fini_cb release)
121{
122	int rc, capacity, slot_size;
123
124	/* check input parameters */
125	if (nr_objs <= 0 || nr_objs > OBJPOOL_NR_OBJECT_MAX ||
126	    object_size <= 0 || object_size > OBJPOOL_OBJECT_SIZE_MAX)
127		return -EINVAL;
128
129	/* align up to unsigned long size */
130	object_size = ALIGN(object_size, sizeof(long));
131
132	/* calculate capacity of percpu objpool_slot */
133	capacity = roundup_pow_of_two(nr_objs);
134	if (!capacity)
135		return -EINVAL;
136
137	/* initialize objpool pool */
138	memset(pool, 0, sizeof(struct objpool_head));
139	pool->nr_possible_cpus = num_possible_cpus();
140	pool->obj_size = object_size;
141	pool->capacity = capacity;
142	pool->gfp = gfp & ~__GFP_ZERO;
143	pool->context = context;
144	pool->release = release;
145	slot_size = nr_cpu_ids * sizeof(struct objpool_slot);
146	pool->cpu_slots = kzalloc(slot_size, pool->gfp);
147	if (!pool->cpu_slots)
148		return -ENOMEM;
149
150	/* initialize per-cpu slots */
151	rc = objpool_init_percpu_slots(pool, nr_objs, context, objinit);
152	if (rc)
153		objpool_fini_percpu_slots(pool);
154	else
155		refcount_set(&pool->ref, pool->nr_objs + 1);
156
157	return rc;
158}
159EXPORT_SYMBOL_GPL(objpool_init);
160
161/* release whole objpool forcely */
162void objpool_free(struct objpool_head *pool)
163{
164	if (!pool->cpu_slots)
165		return;
166
167	/* release percpu slots */
168	objpool_fini_percpu_slots(pool);
169
170	/* call user's cleanup callback if provided */
171	if (pool->release)
172		pool->release(pool, pool->context);
173}
174EXPORT_SYMBOL_GPL(objpool_free);
175
176/* drop the allocated object, rather reclaim it to objpool */
177int objpool_drop(void *obj, struct objpool_head *pool)
178{
179	if (!obj || !pool)
180		return -EINVAL;
181
182	if (refcount_dec_and_test(&pool->ref)) {
183		objpool_free(pool);
184		return 0;
185	}
186
187	return -EAGAIN;
188}
189EXPORT_SYMBOL_GPL(objpool_drop);
190
191/* drop unused objects and defref objpool for releasing */
192void objpool_fini(struct objpool_head *pool)
193{
194	int count = 1; /* extra ref for objpool itself */
195
196	/* drop all remained objects from objpool */
197	while (objpool_pop(pool))
198		count++;
199
200	if (refcount_sub_and_test(count, &pool->ref))
201		objpool_free(pool);
202}
203EXPORT_SYMBOL_GPL(objpool_fini);