Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <linux/bitmap.h>
  3#include <linux/bug.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4#include <linux/export.h>
 
 
 
  5#include <linux/idr.h>
  6#include <linux/slab.h>
  7#include <linux/spinlock.h>
  8#include <linux/xarray.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  9
 10/**
 11 * idr_alloc_u32() - Allocate an ID.
 12 * @idr: IDR handle.
 13 * @ptr: Pointer to be associated with the new ID.
 14 * @nextid: Pointer to an ID.
 15 * @max: The maximum ID to allocate (inclusive).
 16 * @gfp: Memory allocation flags.
 17 *
 18 * Allocates an unused ID in the range specified by @nextid and @max.
 19 * Note that @max is inclusive whereas the @end parameter to idr_alloc()
 20 * is exclusive.  The new ID is assigned to @nextid before the pointer
 21 * is inserted into the IDR, so if @nextid points into the object pointed
 22 * to by @ptr, a concurrent lookup will not find an uninitialised ID.
 23 *
 24 * The caller should provide their own locking to ensure that two
 25 * concurrent modifications to the IDR are not possible.  Read-only
 26 * accesses to the IDR may be done under the RCU read lock or may
 27 * exclude simultaneous writers.
 28 *
 29 * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
 30 * or -ENOSPC if no free IDs could be found.  If an error occurred,
 31 * @nextid is unchanged.
 32 */
 33int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
 34			unsigned long max, gfp_t gfp)
 35{
 36	struct radix_tree_iter iter;
 37	void __rcu **slot;
 38	unsigned int base = idr->idr_base;
 39	unsigned int id = *nextid;
 40
 41	if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
 42		idr->idr_rt.xa_flags |= IDR_RT_MARKER;
 43
 44	id = (id < base) ? 0 : id - base;
 45	radix_tree_iter_init(&iter, id);
 46	slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
 47	if (IS_ERR(slot))
 48		return PTR_ERR(slot);
 49
 50	*nextid = iter.index + base;
 51	/* there is a memory barrier inside radix_tree_iter_replace() */
 52	radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
 53	radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
 54
 55	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56}
 57EXPORT_SYMBOL_GPL(idr_alloc_u32);
 58
 59/**
 60 * idr_alloc() - Allocate an ID.
 61 * @idr: IDR handle.
 62 * @ptr: Pointer to be associated with the new ID.
 63 * @start: The minimum ID (inclusive).
 64 * @end: The maximum ID (exclusive).
 65 * @gfp: Memory allocation flags.
 66 *
 67 * Allocates an unused ID in the range specified by @start and @end.  If
 68 * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
 69 * callers to use @start + N as @end as long as N is within integer range.
 70 *
 71 * The caller should provide their own locking to ensure that two
 72 * concurrent modifications to the IDR are not possible.  Read-only
 73 * accesses to the IDR may be done under the RCU read lock or may
 74 * exclude simultaneous writers.
 75 *
 76 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
 77 * or -ENOSPC if no free IDs could be found.
 
 
 
 
 
 
 
 78 */
 79int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
 80{
 81	u32 id = start;
 82	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84	if (WARN_ON_ONCE(start < 0))
 85		return -EINVAL;
 
 
 86
 87	ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
 88	if (ret)
 89		return ret;
 
 
 
 90
 
 91	return id;
 92}
 93EXPORT_SYMBOL_GPL(idr_alloc);
 94
 95/**
 96 * idr_alloc_cyclic() - Allocate an ID cyclically.
 97 * @idr: IDR handle.
 98 * @ptr: Pointer to be associated with the new ID.
 99 * @start: The minimum ID (inclusive).
100 * @end: The maximum ID (exclusive).
101 * @gfp: Memory allocation flags.
102 *
103 * Allocates an unused ID in the range specified by @nextid and @end.  If
104 * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
105 * callers to use @start + N as @end as long as N is within integer range.
106 * The search for an unused ID will start at the last ID allocated and will
107 * wrap around to @start if no free IDs are found before reaching @end.
108 *
109 * The caller should provide their own locking to ensure that two
110 * concurrent modifications to the IDR are not possible.  Read-only
111 * accesses to the IDR may be done under the RCU read lock or may
112 * exclude simultaneous writers.
113 *
114 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
115 * or -ENOSPC if no free IDs could be found.
116 */
117int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
118{
119	u32 id = idr->idr_next;
120	int err, max = end > 0 ? end - 1 : INT_MAX;
121
122	if ((int)id < start)
123		id = start;
124
125	err = idr_alloc_u32(idr, ptr, &id, max, gfp);
126	if ((err == -ENOSPC) && (id > start)) {
127		id = start;
128		err = idr_alloc_u32(idr, ptr, &id, max, gfp);
129	}
130	if (err)
131		return err;
132
133	idr->idr_next = id + 1;
 
134	return id;
135}
136EXPORT_SYMBOL(idr_alloc_cyclic);
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138/**
139 * idr_remove() - Remove an ID from the IDR.
140 * @idr: IDR handle.
141 * @id: Pointer ID.
142 *
143 * Removes this ID from the IDR.  If the ID was not previously in the IDR,
144 * this function returns %NULL.
145 *
146 * Since this function modifies the IDR, the caller should provide their
147 * own locking to ensure that concurrent modification of the same IDR is
148 * not possible.
149 *
150 * Return: The pointer formerly associated with this ID.
151 */
152void *idr_remove(struct idr *idr, unsigned long id)
153{
154	return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155}
156EXPORT_SYMBOL_GPL(idr_remove);
157
158/**
159 * idr_find() - Return pointer for given ID.
160 * @idr: IDR handle.
161 * @id: Pointer ID.
162 *
163 * Looks up the pointer associated with this ID.  A %NULL pointer may
164 * indicate that @id is not allocated or that the %NULL pointer was
165 * associated with this ID.
166 *
167 * This function can be called under rcu_read_lock(), given that the leaf
168 * pointers lifetimes are correctly managed.
169 *
170 * Return: The pointer associated with this ID.
 
 
 
 
 
 
 
171 */
172void *idr_find(const struct idr *idr, unsigned long id)
173{
174	return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
 
 
 
 
 
175}
176EXPORT_SYMBOL_GPL(idr_find);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
178/**
179 * idr_for_each() - Iterate through all stored pointers.
180 * @idr: IDR handle.
181 * @fn: Function to be called for each pointer.
182 * @data: Data passed to callback function.
 
 
 
 
 
 
 
183 *
184 * The callback function will be called for each entry in @idr, passing
185 * the ID, the entry and @data.
186 *
187 * If @fn returns anything other than %0, the iteration stops and that
188 * value is returned from this function.
189 *
190 * idr_for_each() can be called concurrently with idr_alloc() and
191 * idr_remove() if protected by RCU.  Newly added entries may not be
192 * seen and deleted entries may be seen, but adding and removing entries
193 * will not cause other entries to be skipped, nor spurious ones to be seen.
194 */
195int idr_for_each(const struct idr *idr,
196		int (*fn)(int id, void *p, void *data), void *data)
197{
198	struct radix_tree_iter iter;
199	void __rcu **slot;
200	int base = idr->idr_base;
 
 
 
 
 
 
 
 
 
 
 
 
 
201
202	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
203		int ret;
204		unsigned long id = iter.index + base;
 
 
205
206		if (WARN_ON_ONCE(id > INT_MAX))
207			break;
208		ret = fn(id, rcu_dereference_raw(*slot), data);
209		if (ret)
210			return ret;
211	}
212
213	return 0;
214}
215EXPORT_SYMBOL(idr_for_each);
216
217/**
218 * idr_get_next_ul() - Find next populated entry.
219 * @idr: IDR handle.
220 * @nextid: Pointer to an ID.
221 *
222 * Returns the next populated entry in the tree with an ID greater than
223 * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
224 * to the ID of the found value.  To use in a loop, the value pointed to by
225 * nextid must be incremented by the user.
226 */
227void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
228{
229	struct radix_tree_iter iter;
230	void __rcu **slot;
231	void *entry = NULL;
232	unsigned long base = idr->idr_base;
233	unsigned long id = *nextid;
234
235	id = (id < base) ? 0 : id - base;
236	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
237		entry = rcu_dereference_raw(*slot);
238		if (!entry)
239			continue;
240		if (!xa_is_internal(entry))
241			break;
242		if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
243			break;
244		slot = radix_tree_iter_retry(&iter);
245	}
246	if (!slot)
247		return NULL;
248
249	*nextid = iter.index + base;
250	return entry;
251}
252EXPORT_SYMBOL(idr_get_next_ul);
253
254/**
255 * idr_get_next() - Find next populated entry.
256 * @idr: IDR handle.
257 * @nextid: Pointer to an ID.
258 *
259 * Returns the next populated entry in the tree with an ID greater than
260 * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
261 * to the ID of the found value.  To use in a loop, the value pointed to by
262 * nextid must be incremented by the user.
263 */
264void *idr_get_next(struct idr *idr, int *nextid)
265{
266	unsigned long id = *nextid;
267	void *entry = idr_get_next_ul(idr, &id);
268
269	if (WARN_ON_ONCE(id > INT_MAX))
 
 
 
 
270		return NULL;
271	*nextid = id;
272	return entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273}
274EXPORT_SYMBOL(idr_get_next);
275
 
276/**
277 * idr_replace() - replace pointer for given ID.
278 * @idr: IDR handle.
279 * @ptr: New pointer to associate with the ID.
280 * @id: ID to change.
 
 
 
 
281 *
282 * Replace the pointer registered with an ID and return the old value.
283 * This function can be called under the RCU read lock concurrently with
284 * idr_alloc() and idr_remove() (as long as the ID being removed is not
285 * the one being replaced!).
286 *
287 * Returns: the old value on success.  %-ENOENT indicates that @id was not
288 * found.  %-EINVAL indicates that @ptr was not valid.
289 */
290void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
291{
292	struct radix_tree_node *node;
293	void __rcu **slot = NULL;
294	void *entry;
 
 
 
 
 
 
 
 
295
296	id -= idr->idr_base;
 
297
298	entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
299	if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
 
 
 
 
 
 
300		return ERR_PTR(-ENOENT);
301
302	__radix_tree_replace(&idr->idr_rt, node, slot, ptr);
 
303
304	return entry;
305}
306EXPORT_SYMBOL(idr_replace);
307
 
 
 
 
 
 
308/**
309 * DOC: IDA description
 
310 *
311 * The IDA is an ID allocator which does not provide the ability to
312 * associate an ID with a pointer.  As such, it only needs to store one
313 * bit per ID, and so is more space efficient than an IDR.  To use an IDA,
314 * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
315 * then initialise it using ida_init()).  To allocate a new ID, call
316 * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
317 * To free an ID, call ida_free().
318 *
319 * ida_destroy() can be used to dispose of an IDA without needing to
320 * free the individual IDs in it.  You can use ida_is_empty() to find
321 * out whether the IDA has any IDs currently allocated.
 
 
 
 
 
 
 
 
 
 
 
 
 
322 *
323 * The IDA handles its own locking.  It is safe to call any of the IDA
324 * functions without synchronisation in your code.
 
 
325 *
326 * IDs are currently limited to the range [0-INT_MAX].  If this is an awkward
327 * limitation, it should be quite straightforward to raise the maximum.
328 */
329
330/*
331 * Developer's notes:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332 *
333 * The IDA uses the functionality provided by the XArray to store bitmaps in
334 * each entry.  The XA_FREE_MARK is only cleared when all bits in the bitmap
335 * have been set.
336 *
337 * I considered telling the XArray that each slot is an order-10 node
338 * and indexing by bit number, but the XArray can't allow a single multi-index
339 * entry in the head, which would significantly increase memory consumption
340 * for the IDA.  So instead we divide the index by the number of bits in the
341 * leaf bitmap before doing a radix tree lookup.
342 *
343 * As an optimisation, if there are only a few low bits set in any given
344 * leaf, instead of allocating a 128-byte bitmap, we store the bits
345 * as a value entry.  Value entries never have the XA_FREE_MARK cleared
346 * because we can always convert them into a bitmap entry.
347 *
348 * It would be possible to optimise further; once we've run out of a
349 * single 128-byte bitmap, we currently switch to a 576-byte node, put
350 * the 128-byte bitmap in the first entry and then start allocating extra
351 * 128-byte entries.  We could instead use the 512 bytes of the node's
352 * data as a bitmap before moving to that scheme.  I do not believe this
353 * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
354 * users of the IDA and almost none of them use more than 1024 entries.
355 * Those that do use more than the 8192 IDs that the 512 bytes would
356 * provide.
357 *
358 * The IDA always uses a lock to alloc/free.  If we add a 'test_bit'
359 * equivalent, it will still need locking.  Going to RCU lookup would require
360 * using RCU to free bitmaps, and that's not trivial without embedding an
361 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
362 * bitmap, which is excessive.
363 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
364
365/**
366 * ida_alloc_range() - Allocate an unused ID.
367 * @ida: IDA handle.
368 * @min: Lowest ID to allocate.
369 * @max: Highest ID to allocate.
370 * @gfp: Memory allocation flags.
371 *
372 * Allocate an ID between @min and @max, inclusive.  The allocated ID will
373 * not exceed %INT_MAX, even if @max is larger.
374 *
375 * Context: Any context. It is safe to call this function without
376 * locking in your code.
377 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
378 * or %-ENOSPC if there are no free IDs.
379 */
380int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
381			gfp_t gfp)
382{
383	XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
384	unsigned bit = min % IDA_BITMAP_BITS;
385	unsigned long flags;
386	struct ida_bitmap *bitmap, *alloc = NULL;
 
 
 
 
 
 
 
 
387
388	if ((int)min < 0)
389		return -ENOSPC;
390
391	if ((int)max < 0)
392		max = INT_MAX;
 
 
 
 
 
 
 
 
 
393
394retry:
395	xas_lock_irqsave(&xas, flags);
396next:
397	bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
398	if (xas.xa_index > min / IDA_BITMAP_BITS)
399		bit = 0;
400	if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
401		goto nospc;
402
403	if (xa_is_value(bitmap)) {
404		unsigned long tmp = xa_to_value(bitmap);
405
406		if (bit < BITS_PER_XA_VALUE) {
407			bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
408			if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
409				goto nospc;
410			if (bit < BITS_PER_XA_VALUE) {
411				tmp |= 1UL << bit;
412				xas_store(&xas, xa_mk_value(tmp));
413				goto out;
414			}
415		}
416		bitmap = alloc;
417		if (!bitmap)
418			bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
419		if (!bitmap)
420			goto alloc;
421		bitmap->bitmap[0] = tmp;
422		xas_store(&xas, bitmap);
423		if (xas_error(&xas)) {
424			bitmap->bitmap[0] = 0;
425			goto out;
426		}
427	}
428
429	if (bitmap) {
430		bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
431		if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
432			goto nospc;
433		if (bit == IDA_BITMAP_BITS)
434			goto next;
435
436		__set_bit(bit, bitmap->bitmap);
437		if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
438			xas_clear_mark(&xas, XA_FREE_MARK);
439	} else {
440		if (bit < BITS_PER_XA_VALUE) {
441			bitmap = xa_mk_value(1UL << bit);
442		} else {
443			bitmap = alloc;
444			if (!bitmap)
445				bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
446			if (!bitmap)
447				goto alloc;
448			__set_bit(bit, bitmap->bitmap);
449		}
450		xas_store(&xas, bitmap);
451	}
452out:
453	xas_unlock_irqrestore(&xas, flags);
454	if (xas_nomem(&xas, gfp)) {
455		xas.xa_index = min / IDA_BITMAP_BITS;
456		bit = min % IDA_BITMAP_BITS;
457		goto retry;
458	}
459	if (bitmap != alloc)
460		kfree(alloc);
461	if (xas_error(&xas))
462		return xas_error(&xas);
463	return xas.xa_index * IDA_BITMAP_BITS + bit;
464alloc:
465	xas_unlock_irqrestore(&xas, flags);
466	alloc = kzalloc(sizeof(*bitmap), gfp);
467	if (!alloc)
468		return -ENOMEM;
469	xas_set(&xas, min / IDA_BITMAP_BITS);
470	bit = min % IDA_BITMAP_BITS;
471	goto retry;
472nospc:
473	xas_unlock_irqrestore(&xas, flags);
474	kfree(alloc);
475	return -ENOSPC;
476}
477EXPORT_SYMBOL(ida_alloc_range);
478
479/**
480 * ida_free() - Release an allocated ID.
481 * @ida: IDA handle.
482 * @id: Previously allocated ID.
483 *
484 * Context: Any context. It is safe to call this function without
485 * locking in your code.
486 */
487void ida_free(struct ida *ida, unsigned int id)
488{
489	XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
490	unsigned bit = id % IDA_BITMAP_BITS;
 
 
 
491	struct ida_bitmap *bitmap;
492	unsigned long flags;
493
494	if ((int)id < 0)
495		return;
 
 
 
 
 
496
497	xas_lock_irqsave(&xas, flags);
498	bitmap = xas_load(&xas);
499
500	if (xa_is_value(bitmap)) {
501		unsigned long v = xa_to_value(bitmap);
502		if (bit >= BITS_PER_XA_VALUE)
503			goto err;
504		if (!(v & (1UL << bit)))
505			goto err;
506		v &= ~(1UL << bit);
507		if (!v)
508			goto delete;
509		xas_store(&xas, xa_mk_value(v));
510	} else {
511		if (!test_bit(bit, bitmap->bitmap))
512			goto err;
513		__clear_bit(bit, bitmap->bitmap);
514		xas_set_mark(&xas, XA_FREE_MARK);
515		if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
516			kfree(bitmap);
517delete:
518			xas_store(&xas, NULL);
519		}
520	}
521	xas_unlock_irqrestore(&xas, flags);
522	return;
 
523 err:
524	xas_unlock_irqrestore(&xas, flags);
525	WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
526}
527EXPORT_SYMBOL(ida_free);
528
529/**
530 * ida_destroy() - Free all IDs.
531 * @ida: IDA handle.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
532 *
533 * Calling this function frees all IDs and releases all resources used
534 * by an IDA.  When this call returns, the IDA is empty and can be reused
535 * or freed.  If the IDA is already empty, there is no need to call this
536 * function.
537 *
538 * Context: Any context. It is safe to call this function without
539 * locking in your code.
540 */
541void ida_destroy(struct ida *ida)
 
542{
543	XA_STATE(xas, &ida->xa, 0);
544	struct ida_bitmap *bitmap;
545	unsigned long flags;
546
547	xas_lock_irqsave(&xas, flags);
548	xas_for_each(&xas, bitmap, ULONG_MAX) {
549		if (!xa_is_value(bitmap))
550			kfree(bitmap);
551		xas_store(&xas, NULL);
 
 
 
552	}
553	xas_unlock_irqrestore(&xas, flags);
554}
555EXPORT_SYMBOL(ida_destroy);
556
557#ifndef __KERNEL__
558extern void xa_dump_index(unsigned long index, unsigned int shift);
559#define IDA_CHUNK_SHIFT		ilog2(IDA_BITMAP_BITS)
560
561static void ida_dump_entry(void *entry, unsigned long index)
562{
563	unsigned long i;
 
 
 
 
 
 
 
 
564
565	if (!entry)
566		return;
567
568	if (xa_is_node(entry)) {
569		struct xa_node *node = xa_to_node(entry);
570		unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
571			XA_CHUNK_SHIFT;
572
573		xa_dump_index(index * IDA_BITMAP_BITS, shift);
574		xa_dump_node(node);
575		for (i = 0; i < XA_CHUNK_SIZE; i++)
576			ida_dump_entry(node->slots[i],
577					index | (i << node->shift));
578	} else if (xa_is_value(entry)) {
579		xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
580		pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
581	} else {
582		struct ida_bitmap *bitmap = entry;
583
584		xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
585		pr_cont("bitmap: %p data", bitmap);
586		for (i = 0; i < IDA_BITMAP_LONGS; i++)
587			pr_cont(" %lx", bitmap->bitmap[i]);
588		pr_cont("\n");
589	}
590}
 
 
 
 
 
 
 
 
 
 
 
 
 
591
592static void ida_dump(struct ida *ida)
593{
594	struct xarray *xa = &ida->xa;
595	pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
596				xa->xa_flags >> ROOT_TAG_SHIFT);
597	ida_dump_entry(xa->xa_head, 0);
598}
599#endif
v3.15
   1/*
   2 * 2002-10-18  written by Jim Houston jim.houston@ccur.com
   3 *	Copyright (C) 2002 by Concurrent Computer Corporation
   4 *	Distributed under the GNU GPL license version 2.
   5 *
   6 * Modified by George Anzinger to reuse immediately and to use
   7 * find bit instructions.  Also removed _irq on spinlocks.
   8 *
   9 * Modified by Nadia Derbey to make it RCU safe.
  10 *
  11 * Small id to pointer translation service.
  12 *
  13 * It uses a radix tree like structure as a sparse array indexed
  14 * by the id to obtain the pointer.  The bitmap makes allocating
  15 * a new id quick.
  16 *
  17 * You call it to allocate an id (an int) an associate with that id a
  18 * pointer or what ever, we treat it as a (void *).  You can pass this
  19 * id to a user for him to pass back at a later time.  You then pass
  20 * that id to this code and it returns your pointer.
  21
  22 * You can release ids at any time. When all ids are released, most of
  23 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
  24 * don't need to go to the memory "store" during an id allocate, just
  25 * so you don't need to be too concerned about locking and conflicts
  26 * with the slab allocator.
  27 */
  28
  29#ifndef TEST                        // to test in user space...
  30#include <linux/slab.h>
  31#include <linux/init.h>
  32#include <linux/export.h>
  33#endif
  34#include <linux/err.h>
  35#include <linux/string.h>
  36#include <linux/idr.h>
 
  37#include <linux/spinlock.h>
  38#include <linux/percpu.h>
  39#include <linux/hardirq.h>
  40
  41#define MAX_IDR_SHIFT		(sizeof(int) * 8 - 1)
  42#define MAX_IDR_BIT		(1U << MAX_IDR_SHIFT)
  43
  44/* Leave the possibility of an incomplete final layer */
  45#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
  46
  47/* Number of id_layer structs to leave in free list */
  48#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
  49
  50static struct kmem_cache *idr_layer_cache;
  51static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
  52static DEFINE_PER_CPU(int, idr_preload_cnt);
  53static DEFINE_SPINLOCK(simple_ida_lock);
  54
  55/* the maximum ID which can be allocated given idr->layers */
  56static int idr_max(int layers)
  57{
  58	int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
  59
  60	return (1 << bits) - 1;
  61}
  62
  63/*
  64 * Prefix mask for an idr_layer at @layer.  For layer 0, the prefix mask is
  65 * all bits except for the lower IDR_BITS.  For layer 1, 2 * IDR_BITS, and
  66 * so on.
  67 */
  68static int idr_layer_prefix_mask(int layer)
  69{
  70	return ~idr_max(layer + 1);
  71}
  72
  73static struct idr_layer *get_from_free_list(struct idr *idp)
  74{
  75	struct idr_layer *p;
  76	unsigned long flags;
  77
  78	spin_lock_irqsave(&idp->lock, flags);
  79	if ((p = idp->id_free)) {
  80		idp->id_free = p->ary[0];
  81		idp->id_free_cnt--;
  82		p->ary[0] = NULL;
  83	}
  84	spin_unlock_irqrestore(&idp->lock, flags);
  85	return(p);
  86}
  87
  88/**
  89 * idr_layer_alloc - allocate a new idr_layer
  90 * @gfp_mask: allocation mask
  91 * @layer_idr: optional idr to allocate from
  92 *
  93 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
  94 * one from the per-cpu preload buffer.  If @layer_idr is not %NULL, fetch
  95 * an idr_layer from @idr->id_free.
  96 *
  97 * @layer_idr is to maintain backward compatibility with the old alloc
  98 * interface - idr_pre_get() and idr_get_new*() - and will be removed
  99 * together with per-pool preload buffer.
 100 */
 101static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
 102{
 103	struct idr_layer *new;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 104
 105	/* this is the old path, bypass to get_from_free_list() */
 106	if (layer_idr)
 107		return get_from_free_list(layer_idr);
 108
 109	/*
 110	 * Try to allocate directly from kmem_cache.  We want to try this
 111	 * before preload buffer; otherwise, non-preloading idr_alloc()
 112	 * users will end up taking advantage of preloading ones.  As the
 113	 * following is allowed to fail for preloaded cases, suppress
 114	 * warning this time.
 115	 */
 116	new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
 117	if (new)
 118		return new;
 119
 120	/*
 121	 * Try to fetch one from the per-cpu preload buffer if in process
 122	 * context.  See idr_preload() for details.
 123	 */
 124	if (!in_interrupt()) {
 125		preempt_disable();
 126		new = __this_cpu_read(idr_preload_head);
 127		if (new) {
 128			__this_cpu_write(idr_preload_head, new->ary[0]);
 129			__this_cpu_dec(idr_preload_cnt);
 130			new->ary[0] = NULL;
 131		}
 132		preempt_enable();
 133		if (new)
 134			return new;
 135	}
 136
 137	/*
 138	 * Both failed.  Try kmem_cache again w/o adding __GFP_NOWARN so
 139	 * that memory allocation failure warning is printed as intended.
 140	 */
 141	return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
 142}
 143
 144static void idr_layer_rcu_free(struct rcu_head *head)
 145{
 146	struct idr_layer *layer;
 147
 148	layer = container_of(head, struct idr_layer, rcu_head);
 149	kmem_cache_free(idr_layer_cache, layer);
 150}
 151
 152static inline void free_layer(struct idr *idr, struct idr_layer *p)
 153{
 154	if (idr->hint && idr->hint == p)
 155		RCU_INIT_POINTER(idr->hint, NULL);
 156	call_rcu(&p->rcu_head, idr_layer_rcu_free);
 157}
 158
 159/* only called when idp->lock is held */
 160static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
 161{
 162	p->ary[0] = idp->id_free;
 163	idp->id_free = p;
 164	idp->id_free_cnt++;
 165}
 166
 167static void move_to_free_list(struct idr *idp, struct idr_layer *p)
 168{
 169	unsigned long flags;
 170
 171	/*
 172	 * Depends on the return element being zeroed.
 173	 */
 174	spin_lock_irqsave(&idp->lock, flags);
 175	__move_to_free_list(idp, p);
 176	spin_unlock_irqrestore(&idp->lock, flags);
 177}
 178
 179static void idr_mark_full(struct idr_layer **pa, int id)
 180{
 181	struct idr_layer *p = pa[0];
 182	int l = 0;
 183
 184	__set_bit(id & IDR_MASK, p->bitmap);
 185	/*
 186	 * If this layer is full mark the bit in the layer above to
 187	 * show that this part of the radix tree is full.  This may
 188	 * complete the layer above and require walking up the radix
 189	 * tree.
 190	 */
 191	while (bitmap_full(p->bitmap, IDR_SIZE)) {
 192		if (!(p = pa[++l]))
 193			break;
 194		id = id >> IDR_BITS;
 195		__set_bit((id & IDR_MASK), p->bitmap);
 196	}
 197}
 198
 199static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
 200{
 201	while (idp->id_free_cnt < MAX_IDR_FREE) {
 202		struct idr_layer *new;
 203		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
 204		if (new == NULL)
 205			return (0);
 206		move_to_free_list(idp, new);
 207	}
 208	return 1;
 209}
 210
 211/**
 212 * sub_alloc - try to allocate an id without growing the tree depth
 213 * @idp: idr handle
 214 * @starting_id: id to start search at
 215 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
 216 * @gfp_mask: allocation mask for idr_layer_alloc()
 217 * @layer_idr: optional idr passed to idr_layer_alloc()
 218 *
 219 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
 220 * growing its depth.  Returns
 221 *
 222 *  the allocated id >= 0 if successful,
 223 *  -EAGAIN if the tree needs to grow for allocation to succeed,
 224 *  -ENOSPC if the id space is exhausted,
 225 *  -ENOMEM if more idr_layers need to be allocated.
 226 */
 227static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
 228		     gfp_t gfp_mask, struct idr *layer_idr)
 229{
 230	int n, m, sh;
 231	struct idr_layer *p, *new;
 232	int l, id, oid;
 233
 234	id = *starting_id;
 235 restart:
 236	p = idp->top;
 237	l = idp->layers;
 238	pa[l--] = NULL;
 239	while (1) {
 240		/*
 241		 * We run around this while until we reach the leaf node...
 242		 */
 243		n = (id >> (IDR_BITS*l)) & IDR_MASK;
 244		m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
 245		if (m == IDR_SIZE) {
 246			/* no space available go back to previous layer. */
 247			l++;
 248			oid = id;
 249			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
 250
 251			/* if already at the top layer, we need to grow */
 252			if (id >= 1 << (idp->layers * IDR_BITS)) {
 253				*starting_id = id;
 254				return -EAGAIN;
 255			}
 256			p = pa[l];
 257			BUG_ON(!p);
 258
 259			/* If we need to go up one layer, continue the
 260			 * loop; otherwise, restart from the top.
 261			 */
 262			sh = IDR_BITS * (l + 1);
 263			if (oid >> sh == id >> sh)
 264				continue;
 265			else
 266				goto restart;
 267		}
 268		if (m != n) {
 269			sh = IDR_BITS*l;
 270			id = ((id >> sh) ^ n ^ m) << sh;
 271		}
 272		if ((id >= MAX_IDR_BIT) || (id < 0))
 273			return -ENOSPC;
 274		if (l == 0)
 275			break;
 276		/*
 277		 * Create the layer below if it is missing.
 278		 */
 279		if (!p->ary[m]) {
 280			new = idr_layer_alloc(gfp_mask, layer_idr);
 281			if (!new)
 282				return -ENOMEM;
 283			new->layer = l-1;
 284			new->prefix = id & idr_layer_prefix_mask(new->layer);
 285			rcu_assign_pointer(p->ary[m], new);
 286			p->count++;
 287		}
 288		pa[l--] = p;
 289		p = p->ary[m];
 290	}
 291
 292	pa[l] = p;
 293	return id;
 294}
 295
 296static int idr_get_empty_slot(struct idr *idp, int starting_id,
 297			      struct idr_layer **pa, gfp_t gfp_mask,
 298			      struct idr *layer_idr)
 299{
 300	struct idr_layer *p, *new;
 301	int layers, v, id;
 302	unsigned long flags;
 303
 304	id = starting_id;
 305build_up:
 306	p = idp->top;
 307	layers = idp->layers;
 308	if (unlikely(!p)) {
 309		if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
 310			return -ENOMEM;
 311		p->layer = 0;
 312		layers = 1;
 313	}
 314	/*
 315	 * Add a new layer to the top of the tree if the requested
 316	 * id is larger than the currently allocated space.
 317	 */
 318	while (id > idr_max(layers)) {
 319		layers++;
 320		if (!p->count) {
 321			/* special case: if the tree is currently empty,
 322			 * then we grow the tree by moving the top node
 323			 * upwards.
 324			 */
 325			p->layer++;
 326			WARN_ON_ONCE(p->prefix);
 327			continue;
 328		}
 329		if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
 330			/*
 331			 * The allocation failed.  If we built part of
 332			 * the structure tear it down.
 333			 */
 334			spin_lock_irqsave(&idp->lock, flags);
 335			for (new = p; p && p != idp->top; new = p) {
 336				p = p->ary[0];
 337				new->ary[0] = NULL;
 338				new->count = 0;
 339				bitmap_clear(new->bitmap, 0, IDR_SIZE);
 340				__move_to_free_list(idp, new);
 341			}
 342			spin_unlock_irqrestore(&idp->lock, flags);
 343			return -ENOMEM;
 344		}
 345		new->ary[0] = p;
 346		new->count = 1;
 347		new->layer = layers-1;
 348		new->prefix = id & idr_layer_prefix_mask(new->layer);
 349		if (bitmap_full(p->bitmap, IDR_SIZE))
 350			__set_bit(0, new->bitmap);
 351		p = new;
 352	}
 353	rcu_assign_pointer(idp->top, p);
 354	idp->layers = layers;
 355	v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
 356	if (v == -EAGAIN)
 357		goto build_up;
 358	return(v);
 359}
 360
 361/*
 362 * @id and @pa are from a successful allocation from idr_get_empty_slot().
 363 * Install the user pointer @ptr and mark the slot full.
 364 */
 365static void idr_fill_slot(struct idr *idr, void *ptr, int id,
 366			  struct idr_layer **pa)
 367{
 368	/* update hint used for lookup, cleared from free_layer() */
 369	rcu_assign_pointer(idr->hint, pa[0]);
 370
 371	rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
 372	pa[0]->count++;
 373	idr_mark_full(pa, id);
 374}
 375
 376
 377/**
 378 * idr_preload - preload for idr_alloc()
 379 * @gfp_mask: allocation mask to use for preloading
 380 *
 381 * Preload per-cpu layer buffer for idr_alloc().  Can only be used from
 382 * process context and each idr_preload() invocation should be matched with
 383 * idr_preload_end().  Note that preemption is disabled while preloaded.
 384 *
 385 * The first idr_alloc() in the preloaded section can be treated as if it
 386 * were invoked with @gfp_mask used for preloading.  This allows using more
 387 * permissive allocation masks for idrs protected by spinlocks.
 388 *
 389 * For example, if idr_alloc() below fails, the failure can be treated as
 390 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
 
 
 391 *
 392 *	idr_preload(GFP_KERNEL);
 393 *	spin_lock(lock);
 394 *
 395 *	id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
 396 *
 397 *	spin_unlock(lock);
 398 *	idr_preload_end();
 399 *	if (id < 0)
 400 *		error;
 401 */
 402void idr_preload(gfp_t gfp_mask)
 403{
 404	/*
 405	 * Consuming preload buffer from non-process context breaks preload
 406	 * allocation guarantee.  Disallow usage from those contexts.
 407	 */
 408	WARN_ON_ONCE(in_interrupt());
 409	might_sleep_if(gfp_mask & __GFP_WAIT);
 410
 411	preempt_disable();
 412
 413	/*
 414	 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
 415	 * return value from idr_alloc() needs to be checked for failure
 416	 * anyway.  Silently give up if allocation fails.  The caller can
 417	 * treat failures from idr_alloc() as if idr_alloc() were called
 418	 * with @gfp_mask which should be enough.
 419	 */
 420	while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
 421		struct idr_layer *new;
 422
 423		preempt_enable();
 424		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
 425		preempt_disable();
 426		if (!new)
 427			break;
 428
 429		/* link the new one to per-cpu preload list */
 430		new->ary[0] = __this_cpu_read(idr_preload_head);
 431		__this_cpu_write(idr_preload_head, new);
 432		__this_cpu_inc(idr_preload_cnt);
 433	}
 434}
 435EXPORT_SYMBOL(idr_preload);
 436
 437/**
 438 * idr_alloc - allocate new idr entry
 439 * @idr: the (initialized) idr
 440 * @ptr: pointer to be associated with the new id
 441 * @start: the minimum id (inclusive)
 442 * @end: the maximum id (exclusive, <= 0 for max)
 443 * @gfp_mask: memory allocation flags
 444 *
 445 * Allocate an id in [start, end) and associate it with @ptr.  If no ID is
 446 * available in the specified range, returns -ENOSPC.  On memory allocation
 447 * failure, returns -ENOMEM.
 448 *
 449 * Note that @end is treated as max when <= 0.  This is to always allow
 450 * using @start + N as @end as long as N is inside integer range.
 451 *
 452 * The user is responsible for exclusively synchronizing all operations
 453 * which may modify @idr.  However, read-only accesses such as idr_find()
 454 * or iteration can be performed under RCU read lock provided the user
 455 * destroys @ptr in RCU-safe way after removal from idr.
 456 */
 457int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
 458{
 459	int max = end > 0 ? end - 1 : INT_MAX;	/* inclusive upper limit */
 460	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 461	int id;
 462
 463	might_sleep_if(gfp_mask & __GFP_WAIT);
 464
 465	/* sanity checks */
 466	if (WARN_ON_ONCE(start < 0))
 467		return -EINVAL;
 468	if (unlikely(max < start))
 469		return -ENOSPC;
 470
 471	/* allocate id */
 472	id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
 473	if (unlikely(id < 0))
 474		return id;
 475	if (unlikely(id > max))
 476		return -ENOSPC;
 477
 478	idr_fill_slot(idr, ptr, id, pa);
 479	return id;
 480}
 481EXPORT_SYMBOL_GPL(idr_alloc);
 482
 483/**
 484 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
 485 * @idr: the (initialized) idr
 486 * @ptr: pointer to be associated with the new id
 487 * @start: the minimum id (inclusive)
 488 * @end: the maximum id (exclusive, <= 0 for max)
 489 * @gfp_mask: memory allocation flags
 490 *
 491 * Essentially the same as idr_alloc, but prefers to allocate progressively
 492 * higher ids if it can. If the "cur" counter wraps, then it will start again
 493 * at the "start" end of the range and allocate one that has already been used.
 494 */
 495int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
 496			gfp_t gfp_mask)
 497{
 498	int id;
 499
 500	id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
 501	if (id == -ENOSPC)
 502		id = idr_alloc(idr, ptr, start, end, gfp_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 503
 504	if (likely(id >= 0))
 505		idr->cur = id + 1;
 506	return id;
 507}
 508EXPORT_SYMBOL(idr_alloc_cyclic);
 509
 510static void idr_remove_warning(int id)
 511{
 512	WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
 513}
 514
 515static void sub_remove(struct idr *idp, int shift, int id)
 516{
 517	struct idr_layer *p = idp->top;
 518	struct idr_layer **pa[MAX_IDR_LEVEL + 1];
 519	struct idr_layer ***paa = &pa[0];
 520	struct idr_layer *to_free;
 521	int n;
 522
 523	*paa = NULL;
 524	*++paa = &idp->top;
 525
 526	while ((shift > 0) && p) {
 527		n = (id >> shift) & IDR_MASK;
 528		__clear_bit(n, p->bitmap);
 529		*++paa = &p->ary[n];
 530		p = p->ary[n];
 531		shift -= IDR_BITS;
 532	}
 533	n = id & IDR_MASK;
 534	if (likely(p != NULL && test_bit(n, p->bitmap))) {
 535		__clear_bit(n, p->bitmap);
 536		RCU_INIT_POINTER(p->ary[n], NULL);
 537		to_free = NULL;
 538		while(*paa && ! --((**paa)->count)){
 539			if (to_free)
 540				free_layer(idp, to_free);
 541			to_free = **paa;
 542			**paa-- = NULL;
 543		}
 544		if (!*paa)
 545			idp->layers = 0;
 546		if (to_free)
 547			free_layer(idp, to_free);
 548	} else
 549		idr_remove_warning(id);
 550}
 551
 552/**
 553 * idr_remove - remove the given id and free its slot
 554 * @idp: idr handle
 555 * @id: unique key
 
 
 
 
 
 
 
 
 
 556 */
 557void idr_remove(struct idr *idp, int id)
 558{
 559	struct idr_layer *p;
 560	struct idr_layer *to_free;
 561
 562	if (id < 0)
 563		return;
 564
 565	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
 566	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
 567	    idp->top->ary[0]) {
 568		/*
 569		 * Single child at leftmost slot: we can shrink the tree.
 570		 * This level is not needed anymore since when layers are
 571		 * inserted, they are inserted at the top of the existing
 572		 * tree.
 573		 */
 574		to_free = idp->top;
 575		p = idp->top->ary[0];
 576		rcu_assign_pointer(idp->top, p);
 577		--idp->layers;
 578		to_free->count = 0;
 579		bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
 580		free_layer(idp, to_free);
 581	}
 582	while (idp->id_free_cnt >= MAX_IDR_FREE) {
 583		p = get_from_free_list(idp);
 584		/*
 585		 * Note: we don't call the rcu callback here, since the only
 586		 * layers that fall into the freelist are those that have been
 587		 * preallocated.
 588		 */
 589		kmem_cache_free(idr_layer_cache, p);
 590	}
 591	return;
 592}
 593EXPORT_SYMBOL(idr_remove);
 594
 595static void __idr_remove_all(struct idr *idp)
 596{
 597	int n, id, max;
 598	int bt_mask;
 599	struct idr_layer *p;
 600	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 601	struct idr_layer **paa = &pa[0];
 602
 603	n = idp->layers * IDR_BITS;
 604	p = idp->top;
 605	RCU_INIT_POINTER(idp->top, NULL);
 606	max = idr_max(idp->layers);
 607
 608	id = 0;
 609	while (id >= 0 && id <= max) {
 610		while (n > IDR_BITS && p) {
 611			n -= IDR_BITS;
 612			*paa++ = p;
 613			p = p->ary[(id >> n) & IDR_MASK];
 614		}
 615
 616		bt_mask = id;
 617		id += 1 << n;
 618		/* Get the highest bit that the above add changed from 0->1. */
 619		while (n < fls(id ^ bt_mask)) {
 620			if (p)
 621				free_layer(idp, p);
 622			n += IDR_BITS;
 623			p = *--paa;
 624		}
 625	}
 626	idp->layers = 0;
 627}
 
 628
 629/**
 630 * idr_destroy - release all cached layers within an idr tree
 631 * @idp: idr handle
 
 
 
 
 
 
 
 
 632 *
 633 * Free all id mappings and all idp_layers.  After this function, @idp is
 634 * completely unused and can be freed / recycled.  The caller is
 635 * responsible for ensuring that no one else accesses @idp during or after
 636 * idr_destroy().
 637 *
 638 * A typical clean-up sequence for objects stored in an idr tree will use
 639 * idr_for_each() to free all objects, if necessay, then idr_destroy() to
 640 * free up the id mappings and cached idr_layers.
 641 */
 642void idr_destroy(struct idr *idp)
 643{
 644	__idr_remove_all(idp);
 645
 646	while (idp->id_free_cnt) {
 647		struct idr_layer *p = get_from_free_list(idp);
 648		kmem_cache_free(idr_layer_cache, p);
 649	}
 650}
 651EXPORT_SYMBOL(idr_destroy);
 652
 653void *idr_find_slowpath(struct idr *idp, int id)
 654{
 655	int n;
 656	struct idr_layer *p;
 657
 658	if (id < 0)
 659		return NULL;
 660
 661	p = rcu_dereference_raw(idp->top);
 662	if (!p)
 663		return NULL;
 664	n = (p->layer+1) * IDR_BITS;
 665
 666	if (id > idr_max(p->layer + 1))
 667		return NULL;
 668	BUG_ON(n == 0);
 669
 670	while (n > 0 && p) {
 671		n -= IDR_BITS;
 672		BUG_ON(n != p->layer*IDR_BITS);
 673		p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
 674	}
 675	return((void *)p);
 676}
 677EXPORT_SYMBOL(idr_find_slowpath);
 678
 679/**
 680 * idr_for_each - iterate through all stored pointers
 681 * @idp: idr handle
 682 * @fn: function to be called for each pointer
 683 * @data: data passed back to callback function
 684 *
 685 * Iterate over the pointers registered with the given idr.  The
 686 * callback function will be called for each pointer currently
 687 * registered, passing the id, the pointer and the data pointer passed
 688 * to this function.  It is not safe to modify the idr tree while in
 689 * the callback, so functions such as idr_get_new and idr_remove are
 690 * not allowed.
 691 *
 692 * We check the return of @fn each time. If it returns anything other
 693 * than %0, we break out and return that value.
 694 *
 695 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
 
 
 
 
 
 
 696 */
 697int idr_for_each(struct idr *idp,
 698		 int (*fn)(int id, void *p, void *data), void *data)
 699{
 700	int n, id, max, error = 0;
 701	struct idr_layer *p;
 702	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 703	struct idr_layer **paa = &pa[0];
 704
 705	n = idp->layers * IDR_BITS;
 706	p = rcu_dereference_raw(idp->top);
 707	max = idr_max(idp->layers);
 708
 709	id = 0;
 710	while (id >= 0 && id <= max) {
 711		while (n > 0 && p) {
 712			n -= IDR_BITS;
 713			*paa++ = p;
 714			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
 715		}
 716
 717		if (p) {
 718			error = fn(id, (void *)p, data);
 719			if (error)
 720				break;
 721		}
 722
 723		id += 1 << n;
 724		while (n < fls(id)) {
 725			n += IDR_BITS;
 726			p = *--paa;
 727		}
 728	}
 729
 730	return error;
 731}
 732EXPORT_SYMBOL(idr_for_each);
 733
 734/**
 735 * idr_get_next - lookup next object of id to given id.
 736 * @idp: idr handle
 737 * @nextidp:  pointer to lookup key
 738 *
 739 * Returns pointer to registered object with id, which is next number to
 740 * given id. After being looked up, *@nextidp will be updated for the next
 741 * iteration.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 742 *
 743 * This function can be called under rcu_read_lock(), given that the leaf
 744 * pointers lifetimes are correctly managed.
 
 
 745 */
 746void *idr_get_next(struct idr *idp, int *nextidp)
 747{
 748	struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
 749	struct idr_layer **paa = &pa[0];
 750	int id = *nextidp;
 751	int n, max;
 752
 753	/* find first ent */
 754	p = rcu_dereference_raw(idp->top);
 755	if (!p)
 756		return NULL;
 757	n = (p->layer + 1) * IDR_BITS;
 758	max = idr_max(p->layer + 1);
 759
 760	while (id >= 0 && id <= max) {
 761		while (n > 0 && p) {
 762			n -= IDR_BITS;
 763			*paa++ = p;
 764			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
 765		}
 766
 767		if (p) {
 768			*nextidp = id;
 769			return p;
 770		}
 771
 772		/*
 773		 * Proceed to the next layer at the current level.  Unlike
 774		 * idr_for_each(), @id isn't guaranteed to be aligned to
 775		 * layer boundary at this point and adding 1 << n may
 776		 * incorrectly skip IDs.  Make sure we jump to the
 777		 * beginning of the next layer using round_up().
 778		 */
 779		id = round_up(id + 1, 1 << n);
 780		while (n < fls(id)) {
 781			n += IDR_BITS;
 782			p = *--paa;
 783		}
 784	}
 785	return NULL;
 786}
 787EXPORT_SYMBOL(idr_get_next);
 788
 789
 790/**
 791 * idr_replace - replace pointer for given id
 792 * @idp: idr handle
 793 * @ptr: pointer you want associated with the id
 794 * @id: lookup key
 795 *
 796 * Replace the pointer registered with an id and return the old value.
 797 * A %-ENOENT return indicates that @id was not found.
 798 * A %-EINVAL return indicates that @id was not within valid constraints.
 799 *
 800 * The caller must serialize with writers.
 
 
 
 
 
 
 801 */
 802void *idr_replace(struct idr *idp, void *ptr, int id)
 803{
 804	int n;
 805	struct idr_layer *p, *old_p;
 806
 807	if (id < 0)
 808		return ERR_PTR(-EINVAL);
 809
 810	p = idp->top;
 811	if (!p)
 812		return ERR_PTR(-EINVAL);
 813
 814	n = (p->layer+1) * IDR_BITS;
 815
 816	if (id >= (1 << n))
 817		return ERR_PTR(-EINVAL);
 818
 819	n -= IDR_BITS;
 820	while ((n > 0) && p) {
 821		p = p->ary[(id >> n) & IDR_MASK];
 822		n -= IDR_BITS;
 823	}
 824
 825	n = id & IDR_MASK;
 826	if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
 827		return ERR_PTR(-ENOENT);
 828
 829	old_p = p->ary[n];
 830	rcu_assign_pointer(p->ary[n], ptr);
 831
 832	return old_p;
 833}
 834EXPORT_SYMBOL(idr_replace);
 835
 836void __init idr_init_cache(void)
 837{
 838	idr_layer_cache = kmem_cache_create("idr_layer_cache",
 839				sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
 840}
 841
 842/**
 843 * idr_init - initialize idr handle
 844 * @idp:	idr handle
 845 *
 846 * This function is use to set up the handle (@idp) that you will pass
 847 * to the rest of the functions.
 848 */
 849void idr_init(struct idr *idp)
 850{
 851	memset(idp, 0, sizeof(struct idr));
 852	spin_lock_init(&idp->lock);
 853}
 854EXPORT_SYMBOL(idr_init);
 855
 856static int idr_has_entry(int id, void *p, void *data)
 857{
 858	return 1;
 859}
 860
 861bool idr_is_empty(struct idr *idp)
 862{
 863	return !idr_for_each(idp, idr_has_entry, NULL);
 864}
 865EXPORT_SYMBOL(idr_is_empty);
 866
 867/**
 868 * DOC: IDA description
 869 * IDA - IDR based ID allocator
 870 *
 871 * This is id allocator without id -> pointer translation.  Memory
 872 * usage is much lower than full blown idr because each id only
 873 * occupies a bit.  ida uses a custom leaf node which contains
 874 * IDA_BITMAP_BITS slots.
 875 *
 876 * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
 
 877 */
 878
 879static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
 880{
 881	unsigned long flags;
 882
 883	if (!ida->free_bitmap) {
 884		spin_lock_irqsave(&ida->idr.lock, flags);
 885		if (!ida->free_bitmap) {
 886			ida->free_bitmap = bitmap;
 887			bitmap = NULL;
 888		}
 889		spin_unlock_irqrestore(&ida->idr.lock, flags);
 890	}
 891
 892	kfree(bitmap);
 893}
 894
 895/**
 896 * ida_pre_get - reserve resources for ida allocation
 897 * @ida:	ida handle
 898 * @gfp_mask:	memory allocation flag
 899 *
 900 * This function should be called prior to locking and calling the
 901 * following function.  It preallocates enough memory to satisfy the
 902 * worst possible allocation.
 903 *
 904 * If the system is REALLY out of memory this function returns %0,
 905 * otherwise %1.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906 */
 907int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
 908{
 909	/* allocate idr_layers */
 910	if (!__idr_pre_get(&ida->idr, gfp_mask))
 911		return 0;
 912
 913	/* allocate free_bitmap */
 914	if (!ida->free_bitmap) {
 915		struct ida_bitmap *bitmap;
 916
 917		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
 918		if (!bitmap)
 919			return 0;
 920
 921		free_bitmap(ida, bitmap);
 922	}
 923
 924	return 1;
 925}
 926EXPORT_SYMBOL(ida_pre_get);
 927
 928/**
 929 * ida_get_new_above - allocate new ID above or equal to a start id
 930 * @ida:	ida handle
 931 * @starting_id: id to start search at
 932 * @p_id:	pointer to the allocated handle
 933 *
 934 * Allocate new ID above or equal to @starting_id.  It should be called
 935 * with any required locks.
 936 *
 937 * If memory is required, it will return %-EAGAIN, you should unlock
 938 * and go back to the ida_pre_get() call.  If the ida is full, it will
 939 * return %-ENOSPC.
 940 *
 941 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
 942 */
 943int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
 
 944{
 945	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 946	struct ida_bitmap *bitmap;
 947	unsigned long flags;
 948	int idr_id = starting_id / IDA_BITMAP_BITS;
 949	int offset = starting_id % IDA_BITMAP_BITS;
 950	int t, id;
 951
 952 restart:
 953	/* get vacant slot */
 954	t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
 955	if (t < 0)
 956		return t == -ENOMEM ? -EAGAIN : t;
 957
 958	if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
 959		return -ENOSPC;
 960
 961	if (t != idr_id)
 962		offset = 0;
 963	idr_id = t;
 964
 965	/* if bitmap isn't there, create a new one */
 966	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
 967	if (!bitmap) {
 968		spin_lock_irqsave(&ida->idr.lock, flags);
 969		bitmap = ida->free_bitmap;
 970		ida->free_bitmap = NULL;
 971		spin_unlock_irqrestore(&ida->idr.lock, flags);
 972
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973		if (!bitmap)
 974			return -EAGAIN;
 975
 976		memset(bitmap, 0, sizeof(struct ida_bitmap));
 977		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
 978				(void *)bitmap);
 979		pa[0]->count++;
 
 
 
 980	}
 981
 982	/* lookup for empty slot */
 983	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
 984	if (t == IDA_BITMAP_BITS) {
 985		/* no empty slot after offset, continue to the next chunk */
 986		idr_id++;
 987		offset = 0;
 988		goto restart;
 989	}
 990
 991	id = idr_id * IDA_BITMAP_BITS + t;
 992	if (id >= MAX_IDR_BIT)
 993		return -ENOSPC;
 994
 995	__set_bit(t, bitmap->bitmap);
 996	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
 997		idr_mark_full(pa, idr_id);
 998
 999	*p_id = id;
1000
1001	/* Each leaf node can handle nearly a thousand slots and the
1002	 * whole idea of ida is to have small memory foot print.
1003	 * Throw away extra resources one by one after each successful
1004	 * allocation.
1005	 */
1006	if (ida->idr.id_free_cnt || ida->free_bitmap) {
1007		struct idr_layer *p = get_from_free_list(&ida->idr);
1008		if (p)
1009			kmem_cache_free(idr_layer_cache, p);
1010	}
1011
1012	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1013}
1014EXPORT_SYMBOL(ida_get_new_above);
1015
1016/**
1017 * ida_remove - remove the given ID
1018 * @ida:	ida handle
1019 * @id:		ID to free
 
 
 
1020 */
1021void ida_remove(struct ida *ida, int id)
1022{
1023	struct idr_layer *p = ida->idr.top;
1024	int shift = (ida->idr.layers - 1) * IDR_BITS;
1025	int idr_id = id / IDA_BITMAP_BITS;
1026	int offset = id % IDA_BITMAP_BITS;
1027	int n;
1028	struct ida_bitmap *bitmap;
 
1029
1030	/* clear full bits while looking up the leaf idr_layer */
1031	while ((shift > 0) && p) {
1032		n = (idr_id >> shift) & IDR_MASK;
1033		__clear_bit(n, p->bitmap);
1034		p = p->ary[n];
1035		shift -= IDR_BITS;
1036	}
1037
1038	if (p == NULL)
1039		goto err;
1040
1041	n = idr_id & IDR_MASK;
1042	__clear_bit(n, p->bitmap);
1043
1044	bitmap = (void *)p->ary[n];
1045	if (!test_bit(offset, bitmap->bitmap))
1046		goto err;
1047
1048	/* update bitmap and remove it if empty */
1049	__clear_bit(offset, bitmap->bitmap);
1050	if (--bitmap->nr_busy == 0) {
1051		__set_bit(n, p->bitmap);	/* to please idr_remove() */
1052		idr_remove(&ida->idr, idr_id);
1053		free_bitmap(ida, bitmap);
 
 
 
 
 
 
 
1054	}
1055
1056	return;
1057
1058 err:
1059	WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
 
1060}
1061EXPORT_SYMBOL(ida_remove);
1062
1063/**
1064 * ida_destroy - release all cached layers within an ida tree
1065 * @ida:		ida handle
1066 */
1067void ida_destroy(struct ida *ida)
1068{
1069	idr_destroy(&ida->idr);
1070	kfree(ida->free_bitmap);
1071}
1072EXPORT_SYMBOL(ida_destroy);
1073
1074/**
1075 * ida_simple_get - get a new id.
1076 * @ida: the (initialized) ida.
1077 * @start: the minimum id (inclusive, < 0x8000000)
1078 * @end: the maximum id (exclusive, < 0x8000000 or 0)
1079 * @gfp_mask: memory allocation flags
1080 *
1081 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1082 * On memory allocation failure, returns -ENOMEM.
 
 
1083 *
1084 * Use ida_simple_remove() to get rid of an id.
 
1085 */
1086int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1087		   gfp_t gfp_mask)
1088{
1089	int ret, id;
1090	unsigned int max;
1091	unsigned long flags;
1092
1093	BUG_ON((int)start < 0);
1094	BUG_ON((int)end < 0);
1095
1096	if (end == 0)
1097		max = 0x80000000;
1098	else {
1099		BUG_ON(end < start);
1100		max = end - 1;
1101	}
 
 
 
1102
1103again:
1104	if (!ida_pre_get(ida, gfp_mask))
1105		return -ENOMEM;
1106
1107	spin_lock_irqsave(&simple_ida_lock, flags);
1108	ret = ida_get_new_above(ida, start, &id);
1109	if (!ret) {
1110		if (id > max) {
1111			ida_remove(ida, id);
1112			ret = -ENOSPC;
1113		} else {
1114			ret = id;
1115		}
1116	}
1117	spin_unlock_irqrestore(&simple_ida_lock, flags);
1118
1119	if (unlikely(ret == -EAGAIN))
1120		goto again;
1121
1122	return ret;
1123}
1124EXPORT_SYMBOL(ida_simple_get);
1125
1126/**
1127 * ida_simple_remove - remove an allocated id.
1128 * @ida: the (initialized) ida.
1129 * @id: the id returned by ida_simple_get.
1130 */
1131void ida_simple_remove(struct ida *ida, unsigned int id)
1132{
1133	unsigned long flags;
1134
1135	BUG_ON((int)id < 0);
1136	spin_lock_irqsave(&simple_ida_lock, flags);
1137	ida_remove(ida, id);
1138	spin_unlock_irqrestore(&simple_ida_lock, flags);
 
 
 
 
 
1139}
1140EXPORT_SYMBOL(ida_simple_remove);
1141
1142/**
1143 * ida_init - initialize ida handle
1144 * @ida:	ida handle
1145 *
1146 * This function is use to set up the handle (@ida) that you will pass
1147 * to the rest of the functions.
1148 */
1149void ida_init(struct ida *ida)
1150{
1151	memset(ida, 0, sizeof(struct ida));
1152	idr_init(&ida->idr);
1153
 
 
 
 
 
 
1154}
1155EXPORT_SYMBOL(ida_init);